Skip to content

Commit e2f33e1

Browse files
release: 0.1.0-alpha.12 (#148)
1 parent 4203271 commit e2f33e1

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+911
-748
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "0.1.0-alpha.11"
2+
".": "0.1.0-alpha.12"
33
}

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 30
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/togetherai%2Ftogetherai-48f3206278cd93af20d7d74aed1fd8e1513a04a60468505a40b0a15fbdab31a3.yml
3-
openapi_spec_hash: 69c1236ff3815089881984840aa4d3f6
4-
config_hash: f39be209cf332e8d80f34099b178970a
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/togetherai%2Ftogetherai-653a4aa26fdd2b335d1ead9c2ea0672cbe48a7616b76bf350a2421a8def4e08d.yml
3+
openapi_spec_hash: 1d5af8ab9d8c11d7f5225e19ebd1654a
4+
config_hash: d15dd709dd3f87b0a8b83b00b4abc881

CHANGELOG.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,13 @@
11
# Changelog
22

3+
## 0.1.0-alpha.12 (2025-06-10)
4+
5+
Full Changelog: [v0.1.0-alpha.11...v0.1.0-alpha.12](https://github.com/togethercomputer/together-py/compare/v0.1.0-alpha.11...v0.1.0-alpha.12)
6+
7+
### Features
8+
9+
* **api:** address diagnostic issues in audio api, correct openapi issue in images api, disambiguate a response in finetune api, enable automated testing on finetune and images ([9d72038](https://github.com/togethercomputer/together-py/commit/9d7203895723e9be3600fa970430d33b51049094))
10+
311
## 0.1.0-alpha.11 (2025-06-03)
412

513
Full Changelog: [v0.1.0-alpha.10...v0.1.0-alpha.11](https://github.com/togethercomputer/together-py/compare/v0.1.0-alpha.10...v0.1.0-alpha.11)

api.md

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -85,12 +85,20 @@ Types:
8585

8686
```python
8787
from together.types import (
88+
CosineLrSchedulerArgs,
8889
FineTune,
8990
FineTuneEvent,
91+
FullTrainingType,
92+
LinearLrSchedulerArgs,
93+
LoRaTrainingType,
94+
LrScheduler,
95+
TrainingMethodDpo,
96+
TrainingMethodSft,
9097
FineTuneCreateResponse,
9198
FineTuneListResponse,
9299
FineTuneCancelResponse,
93100
FineTuneDownloadResponse,
101+
FineTuneListEventsResponse,
94102
FineTuneRetrieveCheckpointsResponse,
95103
)
96104
```
@@ -102,7 +110,7 @@ Methods:
102110
- <code title="get /fine-tunes">client.fine_tune.<a href="./src/together/resources/fine_tune.py">list</a>() -> <a href="./src/together/types/fine_tune_list_response.py">FineTuneListResponse</a></code>
103111
- <code title="post /fine-tunes/{id}/cancel">client.fine_tune.<a href="./src/together/resources/fine_tune.py">cancel</a>(id) -> <a href="./src/together/types/fine_tune_cancel_response.py">FineTuneCancelResponse</a></code>
104112
- <code title="get /finetune/download">client.fine_tune.<a href="./src/together/resources/fine_tune.py">download</a>(\*\*<a href="src/together/types/fine_tune_download_params.py">params</a>) -> <a href="./src/together/types/fine_tune_download_response.py">FineTuneDownloadResponse</a></code>
105-
- <code title="get /fine-tunes/{id}/events">client.fine_tune.<a href="./src/together/resources/fine_tune.py">list_events</a>(id) -> <a href="./src/together/types/fine_tune_event.py">FineTuneEvent</a></code>
113+
- <code title="get /fine-tunes/{id}/events">client.fine_tune.<a href="./src/together/resources/fine_tune.py">list_events</a>(id) -> <a href="./src/together/types/fine_tune_list_events_response.py">FineTuneListEventsResponse</a></code>
106114
- <code title="get /fine-tunes/{id}/checkpoints">client.fine_tune.<a href="./src/together/resources/fine_tune.py">retrieve_checkpoints</a>(id) -> <a href="./src/together/types/fine_tune_retrieve_checkpoints_response.py">FineTuneRetrieveCheckpointsResponse</a></code>
107115

108116
# CodeInterpreter
@@ -134,7 +142,7 @@ Methods:
134142
Types:
135143

136144
```python
137-
from together.types import ImageFile
145+
from together.types import ImageDataB64, ImageDataURL, ImageFile
138146
```
139147

140148
Methods:
@@ -146,7 +154,7 @@ Methods:
146154
Types:
147155

148156
```python
149-
from together.types import AudioFile
157+
from together.types import AudioFile, AudioSpeechStreamChunk
150158
```
151159

152160
Methods:
@@ -185,6 +193,7 @@ Types:
185193

186194
```python
187195
from together.types import (
196+
Autoscaling,
188197
EndpointCreateResponse,
189198
EndpointRetrieveResponse,
190199
EndpointUpdateResponse,

examples/image.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import base64
22

33
from together import Together
4+
from together.types.image_data_b64 import ImageDataB64
45

56
client = Together(api_key="04cf1e314be9c686cd14b3881f5c4ad76505af4c93a8d3fe6ef62337114d1d51")
67

@@ -11,7 +12,7 @@
1112
)
1213

1314
# Write the image to a file
14-
if image.data and image.data[0] and image.data[0].b64_json:
15+
if image.data and image.data[0] and isinstance(image.data[0], ImageDataB64):
1516
image_data = image.data[0].b64_json
1617

1718
binary_data = base64.b64decode(image_data)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "together"
3-
version = "0.1.0-alpha.11"
3+
version = "0.1.0-alpha.12"
44
description = "The official Python library for the together API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

src/together/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "together"
4-
__version__ = "0.1.0-alpha.11" # x-release-please-version
4+
__version__ = "0.1.0-alpha.12" # x-release-please-version

src/together/lib/cli/api/images.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from PIL import Image
66

77
from together import Together
8+
from together.types.image_data_b64 import ImageDataB64
89

910

1011
@click.group()
@@ -63,7 +64,7 @@ def generate(
6364
)
6465

6566
for i, choice in enumerate(response.data):
66-
if choice.b64_json:
67+
if isinstance(choice, ImageDataB64):
6768
with open(f"{output}/{prefix}{choice.index}.png", "wb") as f:
6869
f.write(base64.b64decode(choice.b64_json))
6970

src/together/lib/resources/fine_tune.py

Lines changed: 17 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,14 @@
66
from ..types.fine_tune import (
77
FinetuneTrainingLimits,
88
)
9-
from ...types.fine_tune_create_params import (
10-
LrScheduler,
11-
TrainingType,
12-
TrainingMethod,
13-
FineTuneCreateParams,
14-
TrainingTypeFullTrainingType,
15-
TrainingTypeLoRaTrainingType,
16-
TrainingMethodTrainingMethodDpo,
17-
TrainingMethodTrainingMethodSft,
18-
LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs,
19-
LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs,
20-
)
9+
from ...types.lr_scheduler_param import LrSchedulerParam
10+
from ...types.fine_tune_create_params import TrainingType, TrainingMethod, FineTuneCreateParams
11+
from ...types.full_training_type_param import FullTrainingTypeParam
12+
from ...types.lo_ra_training_type_param import LoRaTrainingTypeParam
13+
from ...types.training_method_dpo_param import TrainingMethodDpoParam
14+
from ...types.training_method_sft_param import TrainingMethodSftParam
15+
from ...types.cosine_lr_scheduler_args_param import CosineLrSchedulerArgsParam
16+
from ...types.linear_lr_scheduler_args_param import LinearLrSchedulerArgsParam
2117

2218
AVAILABLE_TRAINING_METHODS = {
2319
"sft",
@@ -71,7 +67,7 @@ def create_finetune_request(
7167
if warmup_ratio is None:
7268
warmup_ratio = 0.0
7369

74-
training_type: TrainingType = TrainingTypeFullTrainingType(type="Full")
70+
training_type: TrainingType = FullTrainingTypeParam(type="Full")
7571
max_batch_size: int = 0
7672
max_batch_size_dpo: int = 0
7773
min_batch_size: int = 0
@@ -80,7 +76,7 @@ def create_finetune_request(
8076
raise ValueError(f"LoRA adapters are not supported for the selected model ({model_or_checkpoint}).")
8177
lora_r = lora_r if lora_r is not None else model_limits.lora_training.max_rank
8278
lora_alpha = lora_alpha if lora_alpha is not None else lora_r * 2
83-
training_type = TrainingTypeLoRaTrainingType(
79+
training_type = LoRaTrainingTypeParam(
8480
type="Lora",
8581
lora_r=lora_r,
8682
lora_alpha=lora_alpha,
@@ -146,29 +142,29 @@ def create_finetune_request(
146142
if dpo_beta is not None and training_method != "dpo":
147143
raise ValueError("dpo_beta is only supported for DPO training")
148144

149-
lr_scheduler: LrScheduler
145+
lr_scheduler: LrSchedulerParam
150146
if lr_scheduler_type == "cosine":
151147
if scheduler_num_cycles <= 0.0:
152148
raise ValueError(f"Number of cycles should be greater than 0 (got {scheduler_num_cycles})")
153149

154-
lr_scheduler = LrScheduler(
150+
lr_scheduler = LrSchedulerParam(
155151
lr_scheduler_type="cosine",
156-
lr_scheduler_args=LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs(
152+
lr_scheduler_args=CosineLrSchedulerArgsParam(
157153
min_lr_ratio=min_lr_ratio, # type: ignore
158154
num_cycles=scheduler_num_cycles,
159155
),
160156
)
161157
else:
162-
lr_scheduler = LrScheduler(
158+
lr_scheduler = LrSchedulerParam(
163159
lr_scheduler_type="linear",
164-
lr_scheduler_args=LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs(min_lr_ratio=min_lr_ratio), # type: ignore
160+
lr_scheduler_args=LinearLrSchedulerArgsParam(min_lr_ratio=min_lr_ratio), # type: ignore
165161
)
166162

167163
training_method_cls: TrainingMethod
168164
if training_method == "sft":
169-
training_method_cls = TrainingMethodTrainingMethodSft(method="sft", train_on_inputs=train_on_inputs) # type: ignore
165+
training_method_cls = TrainingMethodSftParam(method="sft", train_on_inputs=train_on_inputs) # type: ignore
170166
elif training_method == "dpo":
171-
training_method_cls = TrainingMethodTrainingMethodDpo(method="dpo", dpo_beta=dpo_beta) # type: ignore
167+
training_method_cls = TrainingMethodDpoParam(method="dpo", dpo_beta=dpo_beta) # type: ignore
172168

173169
finetune_request = FineTuneCreateParams(
174170
model=model, # type: ignore

0 commit comments

Comments
 (0)