Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,9 @@ ollama.generate(request)
- `logprobs` `<boolean>`: (Optional) Return log probabilities for tokens. Requires model support.
- `top_logprobs` `<number>`: (Optional) Number of top log probabilities to return per token when `logprobs` is enabled.
- `keep_alive` `<string | number>`: (Optional) How long to keep the model loaded. A number (seconds) or a string with a duration unit suffix ("300ms", "1.5h", "2h45m", etc.)
- `width` `<number>`: (Optional, Experimental) Width of the generated image in pixels. For image generation models only.
- `height` `<number>`: (Optional, Experimental) Height of the generated image in pixels. For image generation models only.
- `steps` `<number>`: (Optional, Experimental) Number of diffusion steps. For image generation models only.
- `options` `<Options>`: (Optional) Options to configure the runtime.
- Returns: `<GenerateResponse>`

Expand Down
7 changes: 7 additions & 0 deletions examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,10 @@ To run the examples run:
```sh
npx tsx <folder-name>/<file-name>.ts
```

### Image Generation (Experimental)

> **Note:** Image generation is experimental and currently only available on macOS.

- [image-generation/image-generation.ts](image-generation/image-generation.ts)
- [image-generation/image-generation-stream.ts](image-generation/image-generation-stream.ts) - Streamed progress
29 changes: 29 additions & 0 deletions examples/image-generation/image-generation.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
// Image generation is experimental and currently only available on macOS

import ollama from 'ollama'
import { writeFileSync } from 'fs'

async function main() {
const prompt = 'a sunset over mountains'
console.log(`Prompt: ${prompt}`)

const response = await ollama.generate({
model: 'x/z-image-turbo',
prompt,
stream: true,
})

for await (const part of response) {
if (part.image) {
// Final response contains the image
const imageBuffer = Buffer.from(part.image, 'base64')
writeFileSync('output.png', imageBuffer)
console.log('\nImage saved to output.png')
} else if (part.total) {
// Progress update
process.stdout.write(`\rProgress: ${part.completed}/${part.total}`)
}
}
}

main().catch(console.error)
12 changes: 11 additions & 1 deletion src/interfaces.ts
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,11 @@
logprobs?: boolean
top_logprobs?: number

// Experimental image generation parameters
width?: number
height?: number
steps?: number

options?: Partial<Options>
}

Expand All @@ -77,7 +82,7 @@
function: {
name: string;
arguments: {
[key: string]: any;

Check warning on line 85 in src/interfaces.ts

View workflow job for this annotation

GitHub Actions / test

Unexpected any. Specify a different type
};
};
}
Expand All @@ -90,15 +95,15 @@
type?: string;
parameters?: {
type?: string;
$defs?: any;

Check warning on line 98 in src/interfaces.ts

View workflow job for this annotation

GitHub Actions / test

Unexpected any. Specify a different type
items?: any;

Check warning on line 99 in src/interfaces.ts

View workflow job for this annotation

GitHub Actions / test

Unexpected any. Specify a different type
required?: string[];
properties?: {
[key: string]: {
type?: string | string[];
items?: any;

Check warning on line 104 in src/interfaces.ts

View workflow job for this annotation

GitHub Actions / test

Unexpected any. Specify a different type
description?: string;
enum?: any[];

Check warning on line 106 in src/interfaces.ts

View workflow job for this annotation

GitHub Actions / test

Unexpected any. Specify a different type
};
};
};
Expand Down Expand Up @@ -191,7 +196,7 @@
export interface GenerateResponse {
model: string
created_at: Date
response: string
response?: string
thinking?: string
done: boolean
done_reason: string
Expand All @@ -203,6 +208,11 @@
eval_count: number
eval_duration: number
logprobs?: Logprob[]

// Image generation response fields
image?: string // Base64-encoded generated image data
completed?: number // Number of completed steps (for streaming progress)
total?: number // Total number of steps (for streaming progress)
}

export interface ChatResponse {
Expand Down Expand Up @@ -268,9 +278,9 @@
details: ModelDetails
messages: Message[]
modified_at: Date
model_info: Map<string, any>,

Check warning on line 281 in src/interfaces.ts

View workflow job for this annotation

GitHub Actions / test

Unexpected any. Specify a different type
capabilities: string[],
projector_info?: Map<string, any>

Check warning on line 283 in src/interfaces.ts

View workflow job for this annotation

GitHub Actions / test

Unexpected any. Specify a different type
}

export interface VersionResponse {
Expand Down
86 changes: 86 additions & 0 deletions test/browser.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -56,3 +56,89 @@ describe('Ollama logprob request fields', () => {
)
})
})

describe('Ollama image generation request fields', () => {
it('forwards image generation parameters in generate requests', async () => {
const client = new Ollama()
const spy = vi
.spyOn(client as any, 'processStreamableRequest')
.mockResolvedValue({} as GenerateResponse)

await client.generate({
model: 'dummy-image',
prompt: 'a sunset over mountains',
width: 1024,
height: 768,
steps: 20,
})

expect(spy).toHaveBeenCalledWith(
'generate',
expect.objectContaining({
model: 'dummy-image',
prompt: 'a sunset over mountains',
width: 1024,
height: 768,
steps: 20,
}),
)
})

it('handles image generation response with image field', async () => {
const mockResponse: GenerateResponse = {
model: 'dummy-image',
created_at: new Date(),
done: true,
done_reason: 'stop',
context: [],
total_duration: 1000,
load_duration: 100,
prompt_eval_count: 10,
prompt_eval_duration: 50,
eval_count: 0,
eval_duration: 0,
image: 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==',
}

const client = new Ollama()
vi.spyOn(client as any, 'processStreamableRequest').mockResolvedValue(mockResponse)

const response = await client.generate({
model: 'dummy-image',
prompt: 'a sunset',
})

expect(response.image).toBeDefined()
expect(response.done).toBe(true)
})

it('handles streaming progress fields for image generation', async () => {
const mockResponse: GenerateResponse = {
model: 'dummy-image',
created_at: new Date(),
done: false,
done_reason: '',
context: [],
total_duration: 0,
load_duration: 0,
prompt_eval_count: 0,
prompt_eval_duration: 0,
eval_count: 0,
eval_duration: 0,
completed: 5,
total: 20,
}

const client = new Ollama()
vi.spyOn(client as any, 'processStreamableRequest').mockResolvedValue(mockResponse)

const response = await client.generate({
model: 'dummy-image',
prompt: 'a sunset',
})

expect(response.completed).toBe(5)
expect(response.total).toBe(20)
expect(response.done).toBe(false)
})
})
Loading