diff --git a/pyproject.toml b/pyproject.toml index 17373042..afd13384 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api" [tool.poetry] name = "together" -version = "1.5.17" +version = "1.5.18" authors = ["Together AI "] description = "Python client for Together's Cloud Platform!" readme = "README.md" diff --git a/src/together/cli/api/finetune.py b/src/together/cli/api/finetune.py index 172acdd3..eaff63b7 100644 --- a/src/together/cli/api/finetune.py +++ b/src/together/cli/api/finetune.py @@ -200,6 +200,18 @@ def fine_tuning(ctx: click.Context) -> None: "The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}. " "The step value is optional, without it the final checkpoint will be used.", ) +@click.option( + "--hf-api-token", + type=str, + default=None, + help="HF API token to use for uploading a checkpoint to a private repo", +) +@click.option( + "--hf-output-repo-name", + type=str, + default=None, + help="HF repo to upload the fine-tuned model to", +) def create( ctx: click.Context, training_file: str, @@ -234,6 +246,8 @@ def create( rpo_alpha: float | None, simpo_gamma: float | None, from_checkpoint: str, + hf_api_token: str | None, + hf_output_repo_name: str | None, ) -> None: """Start fine-tuning""" client: Together = ctx.obj @@ -270,6 +284,8 @@ def create( rpo_alpha=rpo_alpha, simpo_gamma=simpo_gamma, from_checkpoint=from_checkpoint, + hf_api_token=hf_api_token, + hf_output_repo_name=hf_output_repo_name, ) if model is None and from_checkpoint is None: @@ -280,7 +296,7 @@ def create( model_name = from_checkpoint.split(":")[0] model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits( - model=model_name + model=model_name, ) if lora: diff --git a/src/together/resources/finetune.py b/src/together/resources/finetune.py index 27baf2d2..b69c2a3f 100644 --- a/src/together/resources/finetune.py +++ b/src/together/resources/finetune.py @@ -76,6 +76,8 @@ def create_finetune_request( rpo_alpha: float | None = None, simpo_gamma: float | None = None, from_checkpoint: str | None = None, + hf_api_token: str | None = None, + hf_output_repo_name: str | None = None, ) -> FinetuneRequest: if model is not None and from_checkpoint is not None: raise ValueError( @@ -262,6 +264,8 @@ def create_finetune_request( wandb_name=wandb_name, training_method=training_method_cls, from_checkpoint=from_checkpoint, + hf_api_token=hf_api_token, + hf_output_repo_name=hf_output_repo_name, ) return finetune_request @@ -341,6 +345,8 @@ def create( rpo_alpha: float | None = None, simpo_gamma: float | None = None, from_checkpoint: str | None = None, + hf_api_token: str | None = None, + hf_output_repo_name: str | None = None, ) -> FinetuneResponse: """ Method to initiate a fine-tuning job @@ -397,6 +403,8 @@ def create( from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job. The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}. The step value is optional, without it the final checkpoint will be used. + hf_api_token (str, optional): API key for the Hugging Face Hub. Defaults to None. + hf_output_repo_name (str, optional): HF repo to upload the fine-tuned model to. Defaults to None. Returns: FinetuneResponse: Object containing information about fine-tuning job. @@ -450,6 +458,8 @@ def create( rpo_alpha=rpo_alpha, simpo_gamma=simpo_gamma, from_checkpoint=from_checkpoint, + hf_api_token=hf_api_token, + hf_output_repo_name=hf_output_repo_name, ) if verbose: @@ -762,6 +772,8 @@ async def create( rpo_alpha: float | None = None, simpo_gamma: float | None = None, from_checkpoint: str | None = None, + hf_api_token: str | None = None, + hf_output_repo_name: str | None = None, ) -> FinetuneResponse: """ Async method to initiate a fine-tuning job @@ -818,6 +830,8 @@ async def create( from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job. The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}. The step value is optional, without it the final checkpoint will be used. + hf_api_token (str, optional): API key for the Huggging Face Hub. Defaults to None. + hf_output_repo_name (str, optional): HF repo to upload the fine-tuned model to. Defaults to None. Returns: FinetuneResponse: Object containing information about fine-tuning job. @@ -871,6 +885,8 @@ async def create( rpo_alpha=rpo_alpha, simpo_gamma=simpo_gamma, from_checkpoint=from_checkpoint, + hf_api_token=hf_api_token, + hf_output_repo_name=hf_output_repo_name, ) if verbose: diff --git a/src/together/types/finetune.py b/src/together/types/finetune.py index e8c388f9..ee160eec 100644 --- a/src/together/types/finetune.py +++ b/src/together/types/finetune.py @@ -212,6 +212,9 @@ class FinetuneRequest(BaseModel): ) # from step from_checkpoint: str | None = None + # hf related fields + hf_api_token: str | None = None + hf_output_repo_name: str | None = None class FinetuneResponse(BaseModel):