Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api"

[tool.poetry]
name = "together"
version = "1.3.5"
version = "1.3.6"
authors = [
"Together AI <[email protected]>"
]
Expand Down
9 changes: 9 additions & 0 deletions src/together/cli/api/finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,9 @@ def fine_tuning(ctx: click.Context) -> None:
"--suffix", type=str, default=None, help="Suffix for the fine-tuned model name"
)
@click.option("--wandb-api-key", type=str, default=None, help="Wandb API key")
@click.option("--wandb-base-url", type=str, default=None, help="Wandb base URL")
@click.option("--wandb-project-name", type=str, default=None, help="Wandb project name")
@click.option("--wandb-name", type=str, default=None, help="Wandb run name")
@click.option(
"--confirm",
"-y",
Expand Down Expand Up @@ -144,6 +147,9 @@ def create(
lora_trainable_modules: str,
suffix: str,
wandb_api_key: str,
wandb_base_url: str,
wandb_project_name: str,
wandb_name: str,
confirm: bool,
train_on_inputs: bool | Literal["auto"],
) -> None:
Expand All @@ -170,6 +176,9 @@ def create(
lora_trainable_modules=lora_trainable_modules,
suffix=suffix,
wandb_api_key=wandb_api_key,
wandb_base_url=wandb_base_url,
wandb_project_name=wandb_project_name,
wandb_name=wandb_name,
train_on_inputs=train_on_inputs,
)

Expand Down
30 changes: 30 additions & 0 deletions src/together/resources/finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,9 @@ def createFinetuneRequest(
lora_trainable_modules: str | None = "all-linear",
suffix: str | None = None,
wandb_api_key: str | None = None,
wandb_base_url: str | None = None,
wandb_project_name: str | None = None,
wandb_name: str | None = None,
train_on_inputs: bool | Literal["auto"] = "auto",
) -> FinetuneRequest:
if batch_size == "max":
Expand Down Expand Up @@ -118,6 +121,9 @@ def createFinetuneRequest(
training_type=training_type,
suffix=suffix,
wandb_key=wandb_api_key,
wandb_base_url=wandb_base_url,
wandb_project_name=wandb_project_name,
wandb_name=wandb_name,
train_on_inputs=train_on_inputs,
)

Expand Down Expand Up @@ -150,6 +156,9 @@ def create(
lora_trainable_modules: str | None = "all-linear",
suffix: str | None = None,
wandb_api_key: str | None = None,
wandb_base_url: str | None = None,
wandb_project_name: str | None = None,
wandb_name: str | None = None,
verbose: bool = False,
model_limits: FinetuneTrainingLimits | None = None,
train_on_inputs: bool | Literal["auto"] = "auto",
Expand Down Expand Up @@ -182,6 +191,12 @@ def create(
Defaults to None.
wandb_api_key (str, optional): API key for Weights & Biases integration.
Defaults to None.
wandb_base_url (str, optional): Base URL for Weights & Biases integration.
Defaults to None.
wandb_project_name (str, optional): Project name for Weights & Biases integration.
Defaults to None.
wandb_name (str, optional): Run name for Weights & Biases integration.
Defaults to None.
verbose (bool, optional): whether to print the job parameters before submitting a request.
Defaults to False.
model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
Expand Down Expand Up @@ -225,6 +240,9 @@ def create(
lora_trainable_modules=lora_trainable_modules,
suffix=suffix,
wandb_api_key=wandb_api_key,
wandb_base_url=wandb_base_url,
wandb_project_name=wandb_project_name,
wandb_name=wandb_name,
train_on_inputs=train_on_inputs,
)

Expand Down Expand Up @@ -479,6 +497,9 @@ async def create(
lora_trainable_modules: str | None = "all-linear",
suffix: str | None = None,
wandb_api_key: str | None = None,
wandb_base_url: str | None = None,
wandb_project_name: str | None = None,
wandb_name: str | None = None,
verbose: bool = False,
model_limits: FinetuneTrainingLimits | None = None,
train_on_inputs: bool | Literal["auto"] = "auto",
Expand Down Expand Up @@ -511,6 +532,12 @@ async def create(
Defaults to None.
wandb_api_key (str, optional): API key for Weights & Biases integration.
Defaults to None.
wandb_base_url (str, optional): Base URL for Weights & Biases integration.
Defaults to None.
wandb_project_name (str, optional): Project name for Weights & Biases integration.
Defaults to None.
wandb_name (str, optional): Run name for Weights & Biases integration.
Defaults to None.
verbose (bool, optional): whether to print the job parameters before submitting a request.
Defaults to False.
model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
Expand Down Expand Up @@ -554,6 +581,9 @@ async def create(
lora_trainable_modules=lora_trainable_modules,
suffix=suffix,
wandb_api_key=wandb_api_key,
wandb_base_url=wandb_base_url,
wandb_project_name=wandb_project_name,
wandb_name=wandb_name,
train_on_inputs=train_on_inputs,
)

Expand Down
14 changes: 13 additions & 1 deletion src/together/types/finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,15 @@ class FinetuneRequest(BaseModel):
suffix: str | None = None
# weights & biases api key
wandb_key: str | None = None
# weights & biases base url
wandb_base_url: str | None = None
# wandb project name
wandb_project_name: str | None = None
# wandb run name
wandb_name: str | None = None
# training type
training_type: FullTrainingType | LoRATrainingType | None = None
# train on inputs
train_on_inputs: StrictBool | Literal["auto"] = "auto"


Expand Down Expand Up @@ -236,8 +244,12 @@ class FinetuneResponse(BaseModel):
evals_completed: int | None = None
# place in job queue (decrementing counter)
queue_depth: int | None = None
# weights & biases project name
# weights & biases base url
wandb_base_url: str | None = None
# wandb project name
wandb_project_name: str | None = None
# wandb run name
wandb_name: str | None = None
# weights & biases job url
wandb_url: str | None = None
# training file metadata
Expand Down
Loading