Skip to content

Commit fe91357

Browse files
authored
examples: update to use gemma3 (#543)
1 parent d7978cb commit fe91357

13 files changed

+29
-31
lines changed

README.md

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ The Ollama Python library provides the easiest way to integrate Python 3.8+ proj
55
## Prerequisites
66

77
- [Ollama](https://ollama.com/download) should be installed and running
8-
- Pull a model to use with the library: `ollama pull <model>` e.g. `ollama pull llama3.2`
8+
- Pull a model to use with the library: `ollama pull <model>` e.g. `ollama pull gemma3`
99
- See [Ollama.com](https://ollama.com/search) for more information on the models available.
1010

1111
## Install
@@ -20,7 +20,7 @@ pip install ollama
2020
from ollama import chat
2121
from ollama import ChatResponse
2222

23-
response: ChatResponse = chat(model='llama3.2', messages=[
23+
response: ChatResponse = chat(model='gemma3', messages=[
2424
{
2525
'role': 'user',
2626
'content': 'Why is the sky blue?',
@@ -41,7 +41,7 @@ Response streaming can be enabled by setting `stream=True`.
4141
from ollama import chat
4242

4343
stream = chat(
44-
model='llama3.2',
44+
model='gemma3',
4545
messages=[{'role': 'user', 'content': 'Why is the sky blue?'}],
4646
stream=True,
4747
)
@@ -61,7 +61,7 @@ client = Client(
6161
host='http://localhost:11434',
6262
headers={'x-some-header': 'some-value'}
6363
)
64-
response = client.chat(model='llama3.2', messages=[
64+
response = client.chat(model='gemma3', messages=[
6565
{
6666
'role': 'user',
6767
'content': 'Why is the sky blue?',
@@ -79,7 +79,7 @@ from ollama import AsyncClient
7979

8080
async def chat():
8181
message = {'role': 'user', 'content': 'Why is the sky blue?'}
82-
response = await AsyncClient().chat(model='llama3.2', messages=[message])
82+
response = await AsyncClient().chat(model='gemma3', messages=[message])
8383

8484
asyncio.run(chat())
8585
```
@@ -92,7 +92,7 @@ from ollama import AsyncClient
9292

9393
async def chat():
9494
message = {'role': 'user', 'content': 'Why is the sky blue?'}
95-
async for part in await AsyncClient().chat(model='llama3.2', messages=[message], stream=True):
95+
async for part in await AsyncClient().chat(model='gemma3', messages=[message], stream=True):
9696
print(part['message']['content'], end='', flush=True)
9797

9898
asyncio.run(chat())
@@ -105,13 +105,13 @@ The Ollama Python library's API is designed around the [Ollama REST API](https:/
105105
### Chat
106106

107107
```python
108-
ollama.chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
108+
ollama.chat(model='gemma3', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
109109
```
110110

111111
### Generate
112112

113113
```python
114-
ollama.generate(model='llama3.2', prompt='Why is the sky blue?')
114+
ollama.generate(model='gemma3', prompt='Why is the sky blue?')
115115
```
116116

117117
### List
@@ -123,49 +123,49 @@ ollama.list()
123123
### Show
124124

125125
```python
126-
ollama.show('llama3.2')
126+
ollama.show('gemma3')
127127
```
128128

129129
### Create
130130

131131
```python
132-
ollama.create(model='example', from_='llama3.2', system="You are Mario from Super Mario Bros.")
132+
ollama.create(model='example', from_='gemma3', system="You are Mario from Super Mario Bros.")
133133
```
134134

135135
### Copy
136136

137137
```python
138-
ollama.copy('llama3.2', 'user/llama3.2')
138+
ollama.copy('gemma3', 'user/gemma3')
139139
```
140140

141141
### Delete
142142

143143
```python
144-
ollama.delete('llama3.2')
144+
ollama.delete('gemma3')
145145
```
146146

147147
### Pull
148148

149149
```python
150-
ollama.pull('llama3.2')
150+
ollama.pull('gemma3')
151151
```
152152

153153
### Push
154154

155155
```python
156-
ollama.push('user/llama3.2')
156+
ollama.push('user/gemma3')
157157
```
158158

159159
### Embed
160160

161161
```python
162-
ollama.embed(model='llama3.2', input='The sky is blue because of rayleigh scattering')
162+
ollama.embed(model='gemma3', input='The sky is blue because of rayleigh scattering')
163163
```
164164

165165
### Embed (batch)
166166

167167
```python
168-
ollama.embed(model='llama3.2', input=['The sky is blue because of rayleigh scattering', 'Grass is green because of chlorophyll'])
168+
ollama.embed(model='gemma3', input=['The sky is blue because of rayleigh scattering', 'Grass is green because of chlorophyll'])
169169
```
170170

171171
### Ps

examples/async-chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ async def main():
1212
]
1313

1414
client = AsyncClient()
15-
response = await client.chat('llama3.2', messages=messages)
15+
response = await client.chat('gemma3', messages=messages)
1616
print(response['message']['content'])
1717

1818

examples/async-generate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
async def main():
77
client = ollama.AsyncClient()
8-
response = await client.generate('llama3.2', 'Why is the sky blue?')
8+
response = await client.generate('gemma3', 'Why is the sky blue?')
99
print(response['response'])
1010

1111

examples/chat-stream.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,5 @@
77
},
88
]
99

10-
for part in chat('llama3.2', messages=messages, stream=True):
10+
for part in chat('gemma3', messages=messages, stream=True):
1111
print(part['message']['content'], end='', flush=True)
12-
13-
print()

examples/chat-with-history.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
while True:
2323
user_input = input('Chat with history: ')
2424
response = chat(
25-
'llama3.2',
25+
'gemma3',
2626
messages=[*messages, {'role': 'user', 'content': user_input}],
2727
)
2828

examples/chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,5 @@
77
},
88
]
99

10-
response = chat('llama3.2', messages=messages)
10+
response = chat('gemma3', messages=messages)
1111
print(response['message']['content'])

examples/create.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
client = Client()
44
response = client.create(
55
model='my-assistant',
6-
from_='llama3.2',
6+
from_='gemma3',
77
system='You are mario from Super Mario Bros.',
88
stream=False,
99
)

examples/generate-stream.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
from ollama import generate
22

3-
for part in generate('llama3.2', 'Why is the sky blue?', stream=True):
3+
for part in generate('gemma3', 'Why is the sky blue?', stream=True):
44
print(part['response'], end='', flush=True)

examples/generate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
from ollama import generate
22

3-
response = generate('llama3.2', 'Why is the sky blue?')
3+
response = generate('gemma3', 'Why is the sky blue?')
44
print(response['response'])

examples/multimodal-chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
# img = Path(path).read_bytes()
1212

1313
response = chat(
14-
model='llama3.2-vision',
14+
model='gemma3',
1515
messages=[
1616
{
1717
'role': 'user',

0 commit comments

Comments
 (0)