Skip to content

Commit dd4626a

Browse files
committed
fix black and pylint
1 parent 3b94b75 commit dd4626a

File tree

2 files changed

+19
-24
lines changed

2 files changed

+19
-24
lines changed

meilisearch/client.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,11 @@
2424

2525
from meilisearch._httprequests import HttpRequests
2626
from meilisearch.config import Config
27-
from meilisearch.errors import MeilisearchApiError, MeilisearchCommunicationError, MeilisearchError
27+
from meilisearch.errors import (
28+
MeilisearchApiError,
29+
MeilisearchCommunicationError,
30+
MeilisearchError,
31+
) # pylint: disable=unused-import
2832
from meilisearch.index import Index
2933
from meilisearch.models.key import Key, KeysResults
3034
from meilisearch.models.task import Batch, BatchResults, Task, TaskInfo, TaskResults

tests/client/test_chat_completions.py

Lines changed: 14 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -37,22 +37,18 @@ def test_create_chat_completion_basic_stream(client):
3737
dummy_lines = [
3838
b'data: {"id":"chatcmpl-1","object":"chat.completion.chunk","choices":[{"delta":{"content":"Hello"}}]}',
3939
b'data: {"id":"chatcmpl-1","object":"chat.completion.chunk","choices":[{"delta":{"content":" world"}}]}',
40-
b'data: [DONE]'
40+
b"data: [DONE]",
4141
]
4242
mock_resp = MockStreamingResponse(dummy_lines)
4343

44-
with patch.object(client.http, 'post_stream', return_value=mock_resp) as mock_post:
44+
with patch.object(client.http, "post_stream", return_value=mock_resp) as mock_post:
4545
messages = [{"role": "user", "content": "Hi"}]
4646
chunks = list(client.create_chat_completion("my-assistant", messages=messages))
4747

4848
# Verify the HTTP call was made correctly
4949
mock_post.assert_called_once_with(
5050
"chats/my-assistant/chat/completions",
51-
body={
52-
"model": "gpt-3.5-turbo",
53-
"messages": messages,
54-
"stream": True
55-
}
51+
body={"model": "gpt-3.5-turbo", "messages": messages, "stream": True},
5652
)
5753

5854
# Verify the chunks are parsed correctly
@@ -77,7 +73,7 @@ def test_create_chat_completion_json_decode_error(client):
7773
]
7874
mock_resp = MockStreamingResponse(dummy_lines)
7975

80-
with patch.object(client.http, 'post_stream', return_value=mock_resp):
76+
with patch.object(client.http, "post_stream", return_value=mock_resp):
8177
messages = [{"role": "user", "content": "Test"}]
8278

8379
with pytest.raises(MeilisearchCommunicationError, match="Failed to parse chat chunk"):
@@ -86,8 +82,10 @@ def test_create_chat_completion_json_decode_error(client):
8682

8783
def test_create_chat_completion_http_error_propagated(client):
8884
"""Test that HTTP errors from post_stream are properly propagated."""
89-
with patch.object(client.http, 'post_stream') as mock_post:
90-
error_response = MockStreamingResponse([], ok=False, status_code=400, text='{"message": "API Error"}')
85+
with patch.object(client.http, "post_stream") as mock_post:
86+
error_response = MockStreamingResponse(
87+
[], ok=False, status_code=400, text='{"message": "API Error"}'
88+
)
9189
mock_post.side_effect = MeilisearchApiError("API Error", error_response)
9290
messages = [{"role": "user", "content": "Test"}]
9391

@@ -100,14 +98,14 @@ def test_get_chat_workspaces(client):
10098
mock_response = {
10199
"results": [
102100
{"uid": "workspace1", "name": "My Workspace", "model": "gpt-3.5-turbo"},
103-
{"uid": "workspace2", "name": "Another Workspace", "model": "gpt-4"}
101+
{"uid": "workspace2", "name": "Another Workspace", "model": "gpt-4"},
104102
],
105103
"offset": 0,
106104
"limit": 20,
107-
"total": 2
105+
"total": 2,
108106
}
109107

110-
with patch.object(client.http, 'get', return_value=mock_response) as mock_get:
108+
with patch.object(client.http, "get", return_value=mock_response) as mock_get:
111109
result = client.get_chat_workspaces()
112110

113111
# Verify the HTTP call was made correctly
@@ -119,18 +117,11 @@ def test_get_chat_workspaces(client):
119117

120118
def test_update_chat_workspace_settings(client):
121119
"""Test basic update_chat_workspace_settings functionality."""
122-
mock_response = {
123-
"model": "gpt-4-turbo",
124-
"temperature": 0.8,
125-
"max_tokens": 1500
126-
}
120+
mock_response = {"model": "gpt-4-turbo", "temperature": 0.8, "max_tokens": 1500}
127121

128-
settings_update = {
129-
"temperature": 0.8,
130-
"max_tokens": 1500
131-
}
122+
settings_update = {"temperature": 0.8, "max_tokens": 1500}
132123

133-
with patch.object(client.http, 'patch', return_value=mock_response) as mock_patch:
124+
with patch.object(client.http, "patch", return_value=mock_response) as mock_patch:
134125
result = client.update_chat_workspace_settings("my-workspace", settings_update)
135126

136127
# Verify the HTTP call was made correctly

0 commit comments

Comments
 (0)