Sync contents

This commit is contained in:
Daniel Xu 2025-12-02 04:15:53 +00:00
parent bd639dea8b
commit 5e51e31043
5 changed files with 110 additions and 25 deletions

View file

@ -1,4 +1,4 @@
{ {
"last_synced_sha": "52e233ae8c999937881c32b6b15606de6b391789", "last_synced_sha": "4431cf66bde9b717e16d5c9af17c5083e183ac09",
"last_sync_time": "2025-12-02T02:40:27.052745" "last_sync_time": "2025-12-02T04:15:53.354450"
} }

View file

@ -147,6 +147,46 @@ async def create_training_client_from_state_async(
Async version of create_training_client_from_state. Async version of create_training_client_from_state.
#### `create_training_client_from_state_with_optimizer`
```python
def create_training_client_from_state_with_optimizer(
path: str,
user_metadata: dict[str, str] | None = None) -> TrainingClient
```
Create a TrainingClient from saved model weights and optimizer state.
This is similar to create_training_client_from_state but also restores
optimizer state (e.g., Adam momentum), which is useful for resuming
training exactly where it left off.
Args:
- `path`: Tinker path to saved weights (e.g., "tinker://run-id/weights/checkpoint-001")
- `user_metadata`: Optional metadata to attach to the new training run
Returns:
- `TrainingClient` loaded with the specified weights and optimizer state
Example:
```python
# Resume training from a checkpoint with optimizer state
training_client = service_client.create_training_client_from_state_with_optimizer(
"tinker://run-id/weights/checkpoint-001"
)
# Continue training with restored optimizer momentum
```
#### `create_training_client_from_state_with_optimizer_async`
```python
async def create_training_client_from_state_with_optimizer_async(
path: str,
user_metadata: dict[str, str] | None = None) -> TrainingClient
```
Async version of create_training_client_from_state_with_optimizer.
#### `create_sampling_client` #### `create_sampling_client`
```python ```python

View file

@ -237,21 +237,16 @@ class ImageAssetPointerChunk(StrictBase)
Image format Image format
#### `height`
Image height in pixels
#### `location` #### `location`
Path or URL to the image asset Path or URL to the image asset
#### `tokens` #### `expected_tokens`
Number of tokens this image represents Expected number of tokens this image represents.
This is only advisory: the tinker backend will compute the number of tokens
#### `width` from the image, and we can fail requests quickly if the tokens does not
match expected_tokens.
Image width in pixels
## `CheckpointsListResponse` Objects ## `CheckpointsListResponse` Objects
@ -605,18 +600,6 @@ Image data as bytes
Image format Image format
#### `height`
Image height in pixels
#### `tokens`
Number of tokens this image represents
#### `width`
Image width in pixels
#### `expected_tokens` #### `expected_tokens`
Expected number of tokens this image represents. Expected number of tokens this image represents.

View file

@ -1,6 +1,6 @@
[project] [project]
name = "tinker" name = "tinker"
version = "0.6.0" version = "0.6.1"
description = "The official Python SDK for the tinker API" description = "The official Python SDK for the tinker API"
readme = "README.md" readme = "README.md"
license = "Apache-2.0" license = "Apache-2.0"

View file

@ -275,6 +275,68 @@ class ServiceClient(TelemetryProvider):
await load_future.result_async() await load_future.result_async()
return training_client return training_client
@sync_only
@capture_exceptions(fatal=True)
def create_training_client_from_state_with_optimizer(
self, path: str, user_metadata: dict[str, str] | None = None
) -> TrainingClient:
"""Create a TrainingClient from saved model weights and optimizer state.
This is similar to create_training_client_from_state but also restores
optimizer state (e.g., Adam momentum), which is useful for resuming
training exactly where it left off.
Args:
- `path`: Tinker path to saved weights (e.g., "tinker://run-id/weights/checkpoint-001")
- `user_metadata`: Optional metadata to attach to the new training run
Returns:
- `TrainingClient` loaded with the specified weights and optimizer state
Example:
```python
# Resume training from a checkpoint with optimizer state
training_client = service_client.create_training_client_from_state_with_optimizer(
"tinker://run-id/weights/checkpoint-001"
)
# Continue training with restored optimizer momentum
```
"""
rest_client = self.create_rest_client()
# Use weights info endpoint which allows access to models with public checkpoints
weights_info = rest_client.get_weights_info_by_tinker_path(path).result()
training_client = self.create_lora_training_client(
base_model=weights_info.base_model,
rank=weights_info.lora_rank,
user_metadata=user_metadata,
)
training_client.load_state_with_optimizer(path).result()
return training_client
@capture_exceptions(fatal=True)
async def create_training_client_from_state_with_optimizer_async(
self, path: str, user_metadata: dict[str, str] | None = None
) -> TrainingClient:
"""Async version of create_training_client_from_state_with_optimizer."""
rest_client = self.create_rest_client()
# Use weights info endpoint which allows access to models with public checkpoints
weights_info = await rest_client.get_weights_info_by_tinker_path(path)
# Right now all training runs are LoRa runs.
assert weights_info.is_lora and weights_info.lora_rank is not None
training_client = await self.create_lora_training_client_async(
base_model=weights_info.base_model,
rank=weights_info.lora_rank,
user_metadata=user_metadata,
)
load_future = await training_client.load_state_with_optimizer_async(path)
await load_future.result_async()
return training_client
@capture_exceptions(fatal=True) @capture_exceptions(fatal=True)
def create_sampling_client( def create_sampling_client(
self, self,