Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "0.34.0"
".": "0.35.0"
}
4 changes: 2 additions & 2 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
configured_endpoints: 45
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/togetherai%2Ftogetherai-817bdc0e9a5082575f07386056968f56af20cbc40cbbc716ab4b8c4ec9220b53.yml
openapi_spec_hash: 30b3f6d251dfd02bca8ffa3f755e7574
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/togetherai%2Ftogetherai-5bf4a0ec441254dfa07138a49ee44a8b84696db879c4d4df73ba836ba7a51d62.yml
openapi_spec_hash: f602dfd1aaac78f149c1dbe352b7b7e0
config_hash: 9749f2f8998aa6b15452b2187ff675b9
21 changes: 21 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,26 @@
# Changelog

## 0.35.0 (2026-01-06)

Full Changelog: [v0.34.0...v0.35.0](https://github.com/togethercomputer/together-typescript/compare/v0.34.0...v0.35.0)

### Features

* Add compliance and chat_template_kwargs to chat completions spec ([137e09b](https://github.com/togethercomputer/together-typescript/commit/137e09b3f4d4bbf459d6c1e5ebbbcee0df60da51))
* Support VLM finetuning ([4a0271f](https://github.com/togethercomputer/together-typescript/commit/4a0271f78cecc05e137c6977238917fdf2bae381))
* VLM Support update ([f595d10](https://github.com/togethercomputer/together-typescript/commit/f595d105a9bade8b4ff62d3bd18e008e739d910e))


### Chores

* break long lines in snippets into multiline ([6b244ee](https://github.com/togethercomputer/together-typescript/commit/6b244eecf98a507718759113d7d1372463a4230f))
* **internal:** codegen related update ([ebd27f6](https://github.com/togethercomputer/together-typescript/commit/ebd27f699339a11370d08b33efc0ae8db1641b80))


### Documentation

* add more examples ([99b172f](https://github.com/togethercomputer/together-typescript/commit/99b172f6c74ed1fd50342b22da4494e200a4a5fc))

## 0.34.0 (2025-12-16)

Full Changelog: [v0.33.0...v0.34.0](https://github.com/togethercomputer/together-typescript/compare/v0.33.0...v0.34.0)
Expand Down
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.

Copyright 2025 Together
Copyright 2026 Together

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ import Together from 'together-ai';
const client = new Together();

const stream = await client.chat.completions.create({
messages: [{ role: 'user', content: 'Say this is a test' }],
messages: [{ role: 'user', content: 'Say this is a test!' }],
model: 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',
stream: true,
});
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "together-ai",
"version": "0.34.0",
"version": "0.35.0",
"description": "The official TypeScript library for the Together API",
"author": "Together <[email protected]>",
"types": "dist/index.d.ts",
Expand Down
4 changes: 4 additions & 0 deletions src/resources/chat/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,10 @@ export interface CompletionCreateParamsBase {
| 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo'
| (string & {});

chat_template_kwargs?: unknown;

compliance?: 'hipaa';

/**
* Defined the behavior of the API when max_tokens exceed the maximum context
* length of the model. When set to 'error', API will return 400 with appropriate
Expand Down
20 changes: 20 additions & 0 deletions src/resources/fine-tuning.ts
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,8 @@ export interface FinetuneResponse {

model_output_path?: string;

multimodal_params?: FinetuneResponse.MultimodalParams;

n_checkpoints?: number;

n_epochs?: number;
Expand Down Expand Up @@ -319,6 +321,14 @@ export namespace FinetuneResponse {
}
}

export interface MultimodalParams {
/**
* Whether to train the vision encoder of the model. Only available for multimodal
* models.
*/
train_vision?: boolean;
}

/**
* Progress information for a fine-tuning job
*/
Expand Down Expand Up @@ -1255,6 +1265,8 @@ export interface FineTuningCreateParams {
*/
max_grad_norm?: number;

multimodal_params?: FineTuningCreateParams.MultimodalParams;

/**
* Number of intermediate model versions saved during training for evaluation
*/
Expand Down Expand Up @@ -1360,6 +1372,14 @@ export namespace FineTuningCreateParams {
}
}

export interface MultimodalParams {
/**
* Whether to train the vision encoder of the model. Only available for multimodal
* models.
*/
train_vision?: boolean;
}

export interface TrainingMethodSft {
method: 'sft';

Expand Down
2 changes: 1 addition & 1 deletion src/version.ts
Original file line number Diff line number Diff line change
@@ -1 +1 @@
export const VERSION = '0.34.0'; // x-release-please-version
export const VERSION = '0.35.0'; // x-release-please-version
10 changes: 9 additions & 1 deletion tests/api-resources/chat/completions.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,16 @@ describe('resource completions', () => {

test('create: required and optional params', async () => {
const response = await client.chat.completions.create({
messages: [{ content: 'content', role: 'system', name: 'name' }],
messages: [
{
content: 'content',
role: 'system',
name: 'name',
},
],
model: 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',
chat_template_kwargs: {},
compliance: 'hipaa',
context_length_exceeded_behavior: 'truncate',
echo: true,
frequency_penalty: 0,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,13 @@ describe('resource codeInterpreter', () => {
const response = await client.codeInterpreter.execute({
code: "print('Hello, world!')",
language: 'python',
files: [{ content: 'content', encoding: 'string', name: 'name' }],
files: [
{
content: 'content',
encoding: 'string',
name: 'name',
},
],
session_id: 'ses_abcDEF123',
});
});
Expand Down
6 changes: 5 additions & 1 deletion tests/api-resources/endpoints.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,11 @@ describe('resource endpoints', () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
client.endpoints.list(
{ mine: true, type: 'dedicated', usage_type: 'on-demand' },
{
mine: true,
type: 'dedicated',
usage_type: 'on-demand',
},
{ path: '/_stainless_unknown_path' },
),
).rejects.toThrow(Together.NotFoundError);
Expand Down
6 changes: 5 additions & 1 deletion tests/api-resources/evals.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,11 @@ describe('resource evals', () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
client.evals.list(
{ limit: 0, status: 'status', userId: 'userId' },
{
limit: 0,
status: 'status',
userId: 'userId',
},
{ path: '/_stainless_unknown_path' },
),
).rejects.toThrow(Together.NotFoundError);
Expand Down
6 changes: 5 additions & 1 deletion tests/api-resources/fine-tuning.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,12 @@ describe('resource fineTuning', () => {
hf_model_revision: 'hf_model_revision',
hf_output_repo_name: 'hf_output_repo_name',
learning_rate: 0,
lr_scheduler: { lr_scheduler_type: 'linear', lr_scheduler_args: { min_lr_ratio: 0 } },
lr_scheduler: {
lr_scheduler_type: 'linear',
lr_scheduler_args: { min_lr_ratio: 0 },
},
max_grad_norm: 0,
multimodal_params: { train_vision: true },
n_checkpoints: 0,
n_epochs: 0,
n_evals: 0,
Expand Down
60 changes: 50 additions & 10 deletions tests/index.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,11 @@ describe('instantiate client', () => {
error: jest.fn(),
};

const client = new Together({ logger: logger, logLevel: 'debug', apiKey: 'My API Key' });
const client = new Together({
logger: logger,
logLevel: 'debug',
apiKey: 'My API Key',
});

await forceAPIResponseForClient(client);
expect(debugMock).toHaveBeenCalled();
Expand All @@ -107,7 +111,11 @@ describe('instantiate client', () => {
error: jest.fn(),
};

const client = new Together({ logger: logger, logLevel: 'info', apiKey: 'My API Key' });
const client = new Together({
logger: logger,
logLevel: 'info',
apiKey: 'My API Key',
});

await forceAPIResponseForClient(client);
expect(debugMock).not.toHaveBeenCalled();
Expand Down Expand Up @@ -157,7 +165,11 @@ describe('instantiate client', () => {
};

process.env['TOGETHER_LOG'] = 'debug';
const client = new Together({ logger: logger, logLevel: 'off', apiKey: 'My API Key' });
const client = new Together({
logger: logger,
logLevel: 'off',
apiKey: 'My API Key',
});

await forceAPIResponseForClient(client);
expect(debugMock).not.toHaveBeenCalled();
Expand All @@ -173,7 +185,11 @@ describe('instantiate client', () => {
};

process.env['TOGETHER_LOG'] = 'not a log level';
const client = new Together({ logger: logger, logLevel: 'debug', apiKey: 'My API Key' });
const client = new Together({
logger: logger,
logLevel: 'debug',
apiKey: 'My API Key',
});
expect(client.logLevel).toBe('debug');
expect(warnMock).not.toHaveBeenCalled();
});
Expand Down Expand Up @@ -349,7 +365,11 @@ describe('instantiate client', () => {

describe('withOptions', () => {
test('creates a new client with overridden options', async () => {
const client = new Together({ baseURL: 'http://localhost:5000/', maxRetries: 3, apiKey: 'My API Key' });
const client = new Together({
baseURL: 'http://localhost:5000/',
maxRetries: 3,
apiKey: 'My API Key',
});

const newClient = client.withOptions({
maxRetries: 5,
Expand Down Expand Up @@ -389,7 +409,11 @@ describe('instantiate client', () => {
});

test('respects runtime property changes when creating new client', () => {
const client = new Together({ baseURL: 'http://localhost:5000/', timeout: 1000, apiKey: 'My API Key' });
const client = new Together({
baseURL: 'http://localhost:5000/',
timeout: 1000,
apiKey: 'My API Key',
});

// Modify the client properties directly after creation
client.baseURL = 'http://localhost:6000/';
Expand Down Expand Up @@ -535,7 +559,11 @@ describe('retries', () => {
return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } });
};

const client = new Together({ apiKey: 'My API Key', timeout: 10, fetch: testFetch });
const client = new Together({
apiKey: 'My API Key',
timeout: 10,
fetch: testFetch,
});

expect(await client.request({ path: '/foo', method: 'get' })).toEqual({ a: 1 });
expect(count).toEqual(2);
Expand Down Expand Up @@ -565,7 +593,11 @@ describe('retries', () => {
return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } });
};

const client = new Together({ apiKey: 'My API Key', fetch: testFetch, maxRetries: 4 });
const client = new Together({
apiKey: 'My API Key',
fetch: testFetch,
maxRetries: 4,
});

expect(await client.request({ path: '/foo', method: 'get' })).toEqual({ a: 1 });

Expand All @@ -589,7 +621,11 @@ describe('retries', () => {
capturedRequest = init;
return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } });
};
const client = new Together({ apiKey: 'My API Key', fetch: testFetch, maxRetries: 4 });
const client = new Together({
apiKey: 'My API Key',
fetch: testFetch,
maxRetries: 4,
});

expect(
await client.request({
Expand Down Expand Up @@ -651,7 +687,11 @@ describe('retries', () => {
capturedRequest = init;
return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } });
};
const client = new Together({ apiKey: 'My API Key', fetch: testFetch, maxRetries: 4 });
const client = new Together({
apiKey: 'My API Key',
fetch: testFetch,
maxRetries: 4,
});

expect(
await client.request({
Expand Down