Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api"

[tool.poetry]
name = "together"
version = "1.5.17"
version = "1.5.18"
authors = ["Together AI <[email protected]>"]
description = "Python client for Together's Cloud Platform!"
readme = "README.md"
Expand Down
18 changes: 17 additions & 1 deletion src/together/cli/api/finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,18 @@ def fine_tuning(ctx: click.Context) -> None:
"The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}. "
"The step value is optional, without it the final checkpoint will be used.",
)
@click.option(
"--hf-api-token",
type=str,
default=None,
help="HF API token to use to upload a checkpoint to a private repo",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
help="HF API token to use to upload a checkpoint to a private repo",
help="HF API token to use for uploading a checkpoint to a private repo",

)
@click.option(
"--hf-repo-to-upload",
type=str,
default=None,
help="HF repo to upload the fine-tuned model to",
)
def create(
ctx: click.Context,
training_file: str,
Expand Down Expand Up @@ -234,6 +246,8 @@ def create(
rpo_alpha: float | None,
simpo_gamma: float | None,
from_checkpoint: str,
hf_api_token: str | None,
hf_repo_to_upload: str | None,
) -> None:
"""Start fine-tuning"""
client: Together = ctx.obj
Expand Down Expand Up @@ -270,6 +284,8 @@ def create(
rpo_alpha=rpo_alpha,
simpo_gamma=simpo_gamma,
from_checkpoint=from_checkpoint,
hf_api_token=hf_api_token,
hf_repo_to_upload=hf_repo_to_upload,
)

if model is None and from_checkpoint is None:
Expand All @@ -280,7 +296,7 @@ def create(
model_name = from_checkpoint.split(":")[0]

model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits(
model=model_name
model=model_name,
)

if lora:
Expand Down
16 changes: 16 additions & 0 deletions src/together/resources/finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ def create_finetune_request(
rpo_alpha: float | None = None,
simpo_gamma: float | None = None,
from_checkpoint: str | None = None,
hf_api_token: str | None = None,
hf_repo_to_upload: str | None = None,
) -> FinetuneRequest:
if model is not None and from_checkpoint is not None:
raise ValueError(
Expand Down Expand Up @@ -262,6 +264,8 @@ def create_finetune_request(
wandb_name=wandb_name,
training_method=training_method_cls,
from_checkpoint=from_checkpoint,
hf_api_token=hf_api_token,
hf_repo_to_upload=hf_repo_to_upload,
)

return finetune_request
Expand Down Expand Up @@ -341,6 +345,8 @@ def create(
rpo_alpha: float | None = None,
simpo_gamma: float | None = None,
from_checkpoint: str | None = None,
hf_api_token: str | None = None,
hf_repo_to_upload: str | None = None,
) -> FinetuneResponse:
"""
Method to initiate a fine-tuning job
Expand Down Expand Up @@ -397,6 +403,8 @@ def create(
from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
The step value is optional, without it the final checkpoint will be used.
hf_api_token (str, optional): API key for the Hugging Face Hub. Defaults to None.
hf_repo_to_upload (str, optional): HF repo to upload the fine-tuned model to. Defaults to None.

Returns:
FinetuneResponse: Object containing information about fine-tuning job.
Expand Down Expand Up @@ -450,6 +458,8 @@ def create(
rpo_alpha=rpo_alpha,
simpo_gamma=simpo_gamma,
from_checkpoint=from_checkpoint,
hf_api_token=hf_api_token,
hf_repo_to_upload=hf_repo_to_upload,
)

if verbose:
Expand Down Expand Up @@ -762,6 +772,8 @@ async def create(
rpo_alpha: float | None = None,
simpo_gamma: float | None = None,
from_checkpoint: str | None = None,
hf_api_token: str | None = None,
hf_repo_to_upload: str | None = None,
) -> FinetuneResponse:
"""
Async method to initiate a fine-tuning job
Expand Down Expand Up @@ -818,6 +830,8 @@ async def create(
from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
The step value is optional, without it the final checkpoint will be used.
hf_api_token (str, optional): API key for the Huggging Face Hub. Defaults to None.
hf_repo_to_upload (str, optional): HF repo to upload the fine-tuned model to. Defaults to None.

Returns:
FinetuneResponse: Object containing information about fine-tuning job.
Expand Down Expand Up @@ -871,6 +885,8 @@ async def create(
rpo_alpha=rpo_alpha,
simpo_gamma=simpo_gamma,
from_checkpoint=from_checkpoint,
hf_api_token=hf_api_token,
hf_repo_to_upload=hf_repo_to_upload,
)

if verbose:
Expand Down
3 changes: 3 additions & 0 deletions src/together/types/finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,9 @@ class FinetuneRequest(BaseModel):
)
# from step
from_checkpoint: str | None = None
# hf related fields
hf_api_token: str | None = None
hf_repo_to_upload: str | None = None


class FinetuneResponse(BaseModel):
Expand Down