@@ -12,7 +12,7 @@ pip install ollama
12
12
13
13
``` python
14
14
import ollama
15
- response = ollama.chat(model = ' llama3' , messages = [
15
+ response = ollama.chat(model = ' llama3.1 ' , messages = [
16
16
{
17
17
' role' : ' user' ,
18
18
' content' : ' Why is the sky blue?' ,
@@ -29,7 +29,7 @@ Response streaming can be enabled by setting `stream=True`, modifying function c
29
29
import ollama
30
30
31
31
stream = ollama.chat(
32
- model = ' llama3' ,
32
+ model = ' llama3.1 ' ,
33
33
messages = [{' role' : ' user' , ' content' : ' Why is the sky blue?' }],
34
34
stream = True ,
35
35
)
@@ -45,13 +45,13 @@ The Ollama Python library's API is designed around the [Ollama REST API](https:/
45
45
### Chat
46
46
47
47
``` python
48
- ollama.chat(model = ' llama3' , messages = [{' role' : ' user' , ' content' : ' Why is the sky blue?' }])
48
+ ollama.chat(model = ' llama3.1 ' , messages = [{' role' : ' user' , ' content' : ' Why is the sky blue?' }])
49
49
```
50
50
51
51
### Generate
52
52
53
53
``` python
54
- ollama.generate(model = ' llama3' , prompt = ' Why is the sky blue?' )
54
+ ollama.generate(model = ' llama3.1 ' , prompt = ' Why is the sky blue?' )
55
55
```
56
56
57
57
### List
@@ -63,14 +63,14 @@ ollama.list()
63
63
### Show
64
64
65
65
``` python
66
- ollama.show(' llama3' )
66
+ ollama.show(' llama3.1 ' )
67
67
```
68
68
69
69
### Create
70
70
71
71
``` python
72
72
modelfile= '''
73
- FROM llama3
73
+ FROM llama3.1
74
74
SYSTEM You are mario from super mario bros.
75
75
'''
76
76
@@ -80,31 +80,31 @@ ollama.create(model='example', modelfile=modelfile)
80
80
### Copy
81
81
82
82
``` python
83
- ollama.copy(' llama3' , ' user/llama3' )
83
+ ollama.copy(' llama3.1 ' , ' user/llama3.1 ' )
84
84
```
85
85
86
86
### Delete
87
87
88
88
``` python
89
- ollama.delete(' llama3' )
89
+ ollama.delete(' llama3.1 ' )
90
90
```
91
91
92
92
### Pull
93
93
94
94
``` python
95
- ollama.pull(' llama3' )
95
+ ollama.pull(' llama3.1 ' )
96
96
```
97
97
98
98
### Push
99
99
100
100
``` python
101
- ollama.push(' user/llama3' )
101
+ ollama.push(' user/llama3.1 ' )
102
102
```
103
103
104
104
### Embeddings
105
105
106
106
``` python
107
- ollama.embeddings(model = ' llama3' , prompt = ' The sky is blue because of rayleigh scattering' )
107
+ ollama.embeddings(model = ' llama3.1 ' , prompt = ' The sky is blue because of rayleigh scattering' )
108
108
```
109
109
110
110
### Ps
@@ -123,7 +123,7 @@ A custom client can be created with the following fields:
123
123
``` python
124
124
from ollama import Client
125
125
client = Client(host = ' http://localhost:11434' )
126
- response = client.chat(model = ' llama3' , messages = [
126
+ response = client.chat(model = ' llama3.1 ' , messages = [
127
127
{
128
128
' role' : ' user' ,
129
129
' content' : ' Why is the sky blue?' ,
@@ -139,7 +139,7 @@ from ollama import AsyncClient
139
139
140
140
async def chat ():
141
141
message = {' role' : ' user' , ' content' : ' Why is the sky blue?' }
142
- response = await AsyncClient().chat(model = ' llama3' , messages = [message])
142
+ response = await AsyncClient().chat(model = ' llama3.1 ' , messages = [message])
143
143
144
144
asyncio.run(chat())
145
145
```
@@ -152,7 +152,7 @@ from ollama import AsyncClient
152
152
153
153
async def chat ():
154
154
message = {' role' : ' user' , ' content' : ' Why is the sky blue?' }
155
- async for part in await AsyncClient().chat(model = ' llama3' , messages = [message], stream = True ):
155
+ async for part in await AsyncClient().chat(model = ' llama3.1 ' , messages = [message], stream = True ):
156
156
print (part[' message' ][' content' ], end = ' ' , flush = True )
157
157
158
158
asyncio.run(chat())
0 commit comments