diff --git a/pyproject.toml b/pyproject.toml index ed948da4..3911eb95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api" [tool.poetry] name = "together" -version = "1.3.5" +version = "1.3.6" authors = [ "Together AI " ] diff --git a/src/together/cli/api/finetune.py b/src/together/cli/api/finetune.py index 36fba827..ff1a09b3 100644 --- a/src/together/cli/api/finetune.py +++ b/src/together/cli/api/finetune.py @@ -108,6 +108,9 @@ def fine_tuning(ctx: click.Context) -> None: "--suffix", type=str, default=None, help="Suffix for the fine-tuned model name" ) @click.option("--wandb-api-key", type=str, default=None, help="Wandb API key") +@click.option("--wandb-base-url", type=str, default=None, help="Wandb base URL") +@click.option("--wandb-project-name", type=str, default=None, help="Wandb project name") +@click.option("--wandb-name", type=str, default=None, help="Wandb run name") @click.option( "--confirm", "-y", @@ -144,6 +147,9 @@ def create( lora_trainable_modules: str, suffix: str, wandb_api_key: str, + wandb_base_url: str, + wandb_project_name: str, + wandb_name: str, confirm: bool, train_on_inputs: bool | Literal["auto"], ) -> None: @@ -170,6 +176,9 @@ def create( lora_trainable_modules=lora_trainable_modules, suffix=suffix, wandb_api_key=wandb_api_key, + wandb_base_url=wandb_base_url, + wandb_project_name=wandb_project_name, + wandb_name=wandb_name, train_on_inputs=train_on_inputs, ) diff --git a/src/together/resources/finetune.py b/src/together/resources/finetune.py index ceb7bf0b..368a2506 100644 --- a/src/together/resources/finetune.py +++ b/src/together/resources/finetune.py @@ -48,6 +48,9 @@ def createFinetuneRequest( lora_trainable_modules: str | None = "all-linear", suffix: str | None = None, wandb_api_key: str | None = None, + wandb_base_url: str | None = None, + wandb_project_name: str | None = None, + wandb_name: str | None = None, train_on_inputs: bool | Literal["auto"] = "auto", ) -> FinetuneRequest: if batch_size == "max": @@ -118,6 +121,9 @@ def createFinetuneRequest( training_type=training_type, suffix=suffix, wandb_key=wandb_api_key, + wandb_base_url=wandb_base_url, + wandb_project_name=wandb_project_name, + wandb_name=wandb_name, train_on_inputs=train_on_inputs, ) @@ -150,6 +156,9 @@ def create( lora_trainable_modules: str | None = "all-linear", suffix: str | None = None, wandb_api_key: str | None = None, + wandb_base_url: str | None = None, + wandb_project_name: str | None = None, + wandb_name: str | None = None, verbose: bool = False, model_limits: FinetuneTrainingLimits | None = None, train_on_inputs: bool | Literal["auto"] = "auto", @@ -182,6 +191,12 @@ def create( Defaults to None. wandb_api_key (str, optional): API key for Weights & Biases integration. Defaults to None. + wandb_base_url (str, optional): Base URL for Weights & Biases integration. + Defaults to None. + wandb_project_name (str, optional): Project name for Weights & Biases integration. + Defaults to None. + wandb_name (str, optional): Run name for Weights & Biases integration. + Defaults to None. verbose (bool, optional): whether to print the job parameters before submitting a request. Defaults to False. model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning. @@ -225,6 +240,9 @@ def create( lora_trainable_modules=lora_trainable_modules, suffix=suffix, wandb_api_key=wandb_api_key, + wandb_base_url=wandb_base_url, + wandb_project_name=wandb_project_name, + wandb_name=wandb_name, train_on_inputs=train_on_inputs, ) @@ -479,6 +497,9 @@ async def create( lora_trainable_modules: str | None = "all-linear", suffix: str | None = None, wandb_api_key: str | None = None, + wandb_base_url: str | None = None, + wandb_project_name: str | None = None, + wandb_name: str | None = None, verbose: bool = False, model_limits: FinetuneTrainingLimits | None = None, train_on_inputs: bool | Literal["auto"] = "auto", @@ -511,6 +532,12 @@ async def create( Defaults to None. wandb_api_key (str, optional): API key for Weights & Biases integration. Defaults to None. + wandb_base_url (str, optional): Base URL for Weights & Biases integration. + Defaults to None. + wandb_project_name (str, optional): Project name for Weights & Biases integration. + Defaults to None. + wandb_name (str, optional): Run name for Weights & Biases integration. + Defaults to None. verbose (bool, optional): whether to print the job parameters before submitting a request. Defaults to False. model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning. @@ -554,6 +581,9 @@ async def create( lora_trainable_modules=lora_trainable_modules, suffix=suffix, wandb_api_key=wandb_api_key, + wandb_base_url=wandb_base_url, + wandb_project_name=wandb_project_name, + wandb_name=wandb_name, train_on_inputs=train_on_inputs, ) diff --git a/src/together/types/finetune.py b/src/together/types/finetune.py index 7a638859..05bc8c42 100644 --- a/src/together/types/finetune.py +++ b/src/together/types/finetune.py @@ -168,7 +168,15 @@ class FinetuneRequest(BaseModel): suffix: str | None = None # weights & biases api key wandb_key: str | None = None + # weights & biases base url + wandb_base_url: str | None = None + # wandb project name + wandb_project_name: str | None = None + # wandb run name + wandb_name: str | None = None + # training type training_type: FullTrainingType | LoRATrainingType | None = None + # train on inputs train_on_inputs: StrictBool | Literal["auto"] = "auto" @@ -236,8 +244,12 @@ class FinetuneResponse(BaseModel): evals_completed: int | None = None # place in job queue (decrementing counter) queue_depth: int | None = None - # weights & biases project name + # weights & biases base url + wandb_base_url: str | None = None + # wandb project name wandb_project_name: str | None = None + # wandb run name + wandb_name: str | None = None # weights & biases job url wandb_url: str | None = None # training file metadata