Skip to content

Commit 042609d

Browse files
authored
Remove asynctest dependancy. (#63)
* Remove asynctest, fixes:b/278080256 * black .
1 parent 98ac640 commit 042609d

File tree

2 files changed

+47
-63
lines changed

2 files changed

+47
-63
lines changed

setup.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@ def get_version():
4747
extras_require = {
4848
"dev": [
4949
"absl-py",
50-
"asynctest",
5150
"black",
5251
"nose2",
5352
"pandas",

tests/test_discuss_async.py

Lines changed: 47 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -16,81 +16,66 @@
1616
import sys
1717
import unittest
1818

19-
if sys.version_info < (3, 11):
20-
import asynctest
21-
from asynctest import mock as async_mock
22-
2319
import google.ai.generativelanguage_v1beta3 as glm
2420

2521
from google.generativeai import discuss
2622
from absl.testing import absltest
2723
from absl.testing import parameterized
2824

29-
bases = (parameterized.TestCase,)
30-
31-
if sys.version_info < (3, 11):
32-
bases = bases + (asynctest.TestCase,)
33-
34-
unittest.skipIf(
35-
sys.version_info >= (3, 11), "asynctest is not suported on python 3.11+"
36-
)
37-
3825

39-
class AsyncTests(*bases):
40-
if sys.version_info < (3, 11):
26+
class AsyncTests(parameterized.TestCase, unittest.IsolatedAsyncioTestCase):
27+
async def test_chat_async(self):
28+
client = unittest.mock.AsyncMock()
4129

42-
async def test_chat_async(self):
43-
client = async_mock.MagicMock()
30+
observed_request = None
4431

45-
observed_request = None
46-
47-
async def fake_generate_message(
48-
request: glm.GenerateMessageRequest,
49-
) -> glm.GenerateMessageResponse:
50-
nonlocal observed_request
51-
observed_request = request
52-
return glm.GenerateMessageResponse(
53-
candidates=[
54-
glm.Message(
55-
author="1", content="Why did the chicken cross the road?"
56-
)
57-
]
58-
)
59-
60-
client.generate_message = fake_generate_message
32+
async def fake_generate_message(
33+
request: glm.GenerateMessageRequest,
34+
) -> glm.GenerateMessageResponse:
35+
nonlocal observed_request
36+
observed_request = request
37+
return glm.GenerateMessageResponse(
38+
candidates=[
39+
glm.Message(
40+
author="1", content="Why did the chicken cross the road?"
41+
)
42+
]
43+
)
6144

62-
observed_response = await discuss.chat_async(
45+
client.generate_message = fake_generate_message
46+
47+
observed_response = await discuss.chat_async(
48+
model="models/bard",
49+
context="Example Prompt",
50+
examples=[["Example from human", "Example response from AI"]],
51+
messages=["Tell me a joke"],
52+
temperature=0.75,
53+
candidate_count=1,
54+
client=client,
55+
)
56+
57+
self.assertEqual(
58+
observed_request,
59+
glm.GenerateMessageRequest(
6360
model="models/bard",
64-
context="Example Prompt",
65-
examples=[["Example from human", "Example response from AI"]],
66-
messages=["Tell me a joke"],
61+
prompt=glm.MessagePrompt(
62+
context="Example Prompt",
63+
examples=[
64+
glm.Example(
65+
input=glm.Message(content="Example from human"),
66+
output=glm.Message(content="Example response from AI"),
67+
)
68+
],
69+
messages=[glm.Message(author="0", content="Tell me a joke")],
70+
),
6771
temperature=0.75,
6872
candidate_count=1,
69-
client=client,
70-
)
71-
72-
self.assertEqual(
73-
observed_request,
74-
glm.GenerateMessageRequest(
75-
model="models/bard",
76-
prompt=glm.MessagePrompt(
77-
context="Example Prompt",
78-
examples=[
79-
glm.Example(
80-
input=glm.Message(content="Example from human"),
81-
output=glm.Message(content="Example response from AI"),
82-
)
83-
],
84-
messages=[glm.Message(author="0", content="Tell me a joke")],
85-
),
86-
temperature=0.75,
87-
candidate_count=1,
88-
),
89-
)
90-
self.assertEqual(
91-
observed_response.candidates,
92-
[{"author": "1", "content": "Why did the chicken cross the road?"}],
93-
)
73+
),
74+
)
75+
self.assertEqual(
76+
observed_response.candidates,
77+
[{"author": "1", "content": "Why did the chicken cross the road?"}],
78+
)
9479

9580

9681
if __name__ == "__main__":

0 commit comments

Comments
 (0)