Skip to content

Commit 373a2e3

Browse files
authored
Merge pull request #17 from jmorganca/mxyng/response-error
update response error field to match json response
2 parents 467fd83 + 27f0dfe commit 373a2e3

File tree

1 file changed

+23
-23
lines changed

1 file changed

+23
-23
lines changed

ollama/_types.py

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -11,31 +11,31 @@
1111

1212
class BaseGenerateResponse(TypedDict):
1313
model: str
14-
"Model used to generate response."
14+
'Model used to generate response.'
1515

1616
created_at: str
17-
"Time when the request was created."
17+
'Time when the request was created.'
1818

1919
done: bool
20-
"True if response is complete, otherwise False. Useful for streaming to detect the final response."
20+
'True if response is complete, otherwise False. Useful for streaming to detect the final response.'
2121

2222
total_duration: int
23-
"Total duration in nanoseconds."
23+
'Total duration in nanoseconds.'
2424

2525
load_duration: int
26-
"Load duration in nanoseconds."
26+
'Load duration in nanoseconds.'
2727

2828
prompt_eval_count: int
29-
"Number of tokens evaluated in the prompt."
29+
'Number of tokens evaluated in the prompt.'
3030

3131
prompt_eval_duration: int
32-
"Duration of evaluating the prompt in nanoseconds."
32+
'Duration of evaluating the prompt in nanoseconds.'
3333

3434
eval_count: int
35-
"Number of tokens evaluated in inference."
35+
'Number of tokens evaluated in inference.'
3636

3737
eval_duration: int
38-
"Duration of evaluating inference in nanoseconds."
38+
'Duration of evaluating inference in nanoseconds.'
3939

4040

4141
class GenerateResponse(BaseGenerateResponse):
@@ -44,10 +44,10 @@ class GenerateResponse(BaseGenerateResponse):
4444
"""
4545

4646
response: str
47-
"Response content. When streaming, this contains a fragment of the response."
47+
'Response content. When streaming, this contains a fragment of the response.'
4848

4949
context: Sequence[int]
50-
"Tokenized history up to the point of the response."
50+
'Tokenized history up to the point of the response.'
5151

5252

5353
class Message(TypedDict):
@@ -59,7 +59,7 @@ class Message(TypedDict):
5959
"Assumed role of the message. Response messages always has role 'assistant'."
6060

6161
content: str
62-
"Content of the message. Response messages contains message fragments when streaming."
62+
'Content of the message. Response messages contains message fragments when streaming.'
6363

6464
images: NotRequired[Sequence[Any]]
6565
"""
@@ -80,7 +80,7 @@ class ChatResponse(BaseGenerateResponse):
8080
"""
8181

8282
message: Message
83-
"Response message."
83+
'Response message.'
8484

8585

8686
class ProgressResponse(TypedDict):
@@ -134,28 +134,28 @@ class RequestError(Exception):
134134
Common class for request errors.
135135
"""
136136

137-
def __init__(self, content: str):
138-
super().__init__(content)
139-
self.content = content
140-
"Reason for the error."
137+
def __init__(self, error: str):
138+
super().__init__(error)
139+
self.error = error
140+
'Reason for the error.'
141141

142142

143143
class ResponseError(Exception):
144144
"""
145145
Common class for response errors.
146146
"""
147147

148-
def __init__(self, content: str, status_code: int = -1):
148+
def __init__(self, error: str, status_code: int = -1):
149149
try:
150150
# try to parse content as JSON and extract 'error'
151151
# fallback to raw content if JSON parsing fails
152-
content = json.loads(content).get('error', content)
152+
error = json.loads(error).get('error', error)
153153
except json.JSONDecodeError:
154154
...
155155

156-
super().__init__(content)
157-
self.content = content
158-
"Reason for the error."
156+
super().__init__(error)
157+
self.error = error
158+
'Reason for the error.'
159159

160160
self.status_code = status_code
161-
"HTTP status code of the response."
161+
'HTTP status code of the response.'

0 commit comments

Comments
 (0)