Skip to content

Commit e5c4799

Browse files
authored
Update README.md
Changing all demo code to use llama3 instead of llama2
1 parent e403d74 commit e5c4799

File tree

1 file changed

+14
-14
lines changed

1 file changed

+14
-14
lines changed

README.md

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ pip install ollama
1212

1313
```python
1414
import ollama
15-
response = ollama.chat(model='llama2', messages=[
15+
response = ollama.chat(model='llama3', messages=[
1616
{
1717
'role': 'user',
1818
'content': 'Why is the sky blue?',
@@ -29,7 +29,7 @@ Response streaming can be enabled by setting `stream=True`, modifying function c
2929
import ollama
3030

3131
stream = ollama.chat(
32-
model='llama2',
32+
model='llama3',
3333
messages=[{'role': 'user', 'content': 'Why is the sky blue?'}],
3434
stream=True,
3535
)
@@ -45,13 +45,13 @@ The Ollama Python library's API is designed around the [Ollama REST API](https:/
4545
### Chat
4646

4747
```python
48-
ollama.chat(model='llama2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
48+
ollama.chat(model='llama3', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
4949
```
5050

5151
### Generate
5252

5353
```python
54-
ollama.generate(model='llama2', prompt='Why is the sky blue?')
54+
ollama.generate(model='llama3', prompt='Why is the sky blue?')
5555
```
5656

5757
### List
@@ -63,14 +63,14 @@ ollama.list()
6363
### Show
6464

6565
```python
66-
ollama.show('llama2')
66+
ollama.show('llama3')
6767
```
6868

6969
### Create
7070

7171
```python
7272
modelfile='''
73-
FROM llama2
73+
FROM llama3
7474
SYSTEM You are mario from super mario bros.
7575
'''
7676

@@ -80,31 +80,31 @@ ollama.create(model='example', modelfile=modelfile)
8080
### Copy
8181

8282
```python
83-
ollama.copy('llama2', 'user/llama2')
83+
ollama.copy('llama3', 'user/llama3')
8484
```
8585

8686
### Delete
8787

8888
```python
89-
ollama.delete('llama2')
89+
ollama.delete('llama3')
9090
```
9191

9292
### Pull
9393

9494
```python
95-
ollama.pull('llama2')
95+
ollama.pull('llama3')
9696
```
9797

9898
### Push
9999

100100
```python
101-
ollama.push('user/llama2')
101+
ollama.push('user/llama3')
102102
```
103103

104104
### Embeddings
105105

106106
```python
107-
ollama.embeddings(model='llama2', prompt='The sky is blue because of rayleigh scattering')
107+
ollama.embeddings(model='llama3', prompt='The sky is blue because of rayleigh scattering')
108108
```
109109

110110
## Custom client
@@ -117,7 +117,7 @@ A custom client can be created with the following fields:
117117
```python
118118
from ollama import Client
119119
client = Client(host='http://localhost:11434')
120-
response = client.chat(model='llama2', messages=[
120+
response = client.chat(model='llama3', messages=[
121121
{
122122
'role': 'user',
123123
'content': 'Why is the sky blue?',
@@ -133,7 +133,7 @@ from ollama import AsyncClient
133133

134134
async def chat():
135135
message = {'role': 'user', 'content': 'Why is the sky blue?'}
136-
response = await AsyncClient().chat(model='llama2', messages=[message])
136+
response = await AsyncClient().chat(model='llama3', messages=[message])
137137

138138
asyncio.run(chat())
139139
```
@@ -146,7 +146,7 @@ from ollama import AsyncClient
146146

147147
async def chat():
148148
message = {'role': 'user', 'content': 'Why is the sky blue?'}
149-
async for part in await AsyncClient().chat(model='llama2', messages=[message], stream=True):
149+
async for part in await AsyncClient().chat(model='llama3', messages=[message], stream=True):
150150
print(part['message']['content'], end='', flush=True)
151151

152152
asyncio.run(chat())

0 commit comments

Comments
 (0)