Skip to content

Commit 2ca0634

Browse files
authored
Merge pull request #2 from stainless-sdks/sam/dynamic-agent-chat-completions-domain
feat: dynamically build domain for agents.chat.completions.create()
2 parents f3629f1 + 3dbd194 commit 2ca0634

File tree

3 files changed

+40
-6
lines changed

3 files changed

+40
-6
lines changed

src/gradientai/resources/agents/chat/completions.py

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
5050
def create(
5151
self,
5252
*,
53+
agent_domain: str,
5354
messages: Iterable[completion_create_params.Message],
5455
model: str,
5556
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
@@ -78,6 +79,8 @@ def create(
7879
Creates a model response for the given chat conversation.
7980
8081
Args:
82+
agent_domain: The agent domain to use for the request.
83+
8184
messages: A list of messages comprising the conversation so far.
8285
8386
model: Model ID used to generate the response.
@@ -163,6 +166,7 @@ def create(
163166
def create(
164167
self,
165168
*,
169+
agent_domain: str,
166170
messages: Iterable[completion_create_params.Message],
167171
model: str,
168172
stream: Literal[True],
@@ -191,6 +195,8 @@ def create(
191195
Creates a model response for the given chat conversation.
192196
193197
Args:
198+
agent_domain: The agent domain to use for the request.
199+
194200
messages: A list of messages comprising the conversation so far.
195201
196202
model: Model ID used to generate the response.
@@ -276,6 +282,7 @@ def create(
276282
def create(
277283
self,
278284
*,
285+
agent_domain: str,
279286
messages: Iterable[completion_create_params.Message],
280287
model: str,
281288
stream: bool,
@@ -304,6 +311,8 @@ def create(
304311
Creates a model response for the given chat conversation.
305312
306313
Args:
314+
agent_domain: The agent domain to use for the request.
315+
307316
messages: A list of messages comprising the conversation so far.
308317
309318
model: Model ID used to generate the response.
@@ -385,10 +394,11 @@ def create(
385394
"""
386395
...
387396

388-
@required_args(["messages", "model"], ["messages", "model", "stream"])
397+
@required_args(["agent_domain", "messages", "model"], ["agent_domain", "messages", "model", "stream"])
389398
def create(
390399
self,
391400
*,
401+
agent_domain: str,
392402
messages: Iterable[completion_create_params.Message],
393403
model: str,
394404
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
@@ -416,7 +426,7 @@ def create(
416426
return self._post(
417427
"/chat/completions?agent=true"
418428
if self._client._base_url_overridden
419-
else "https://inference.do-ai.run/v1/chat/completions?agent=true",
429+
else f"https://{agent_domain}/v1/chat/completions?agent=true",
420430
body=maybe_transform(
421431
{
422432
"messages": messages,
@@ -474,6 +484,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon
474484
async def create(
475485
self,
476486
*,
487+
agent_domain: str,
477488
messages: Iterable[completion_create_params.Message],
478489
model: str,
479490
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
@@ -502,6 +513,8 @@ async def create(
502513
Creates a model response for the given chat conversation.
503514
504515
Args:
516+
agent_domain: The agent domain to use for the request.
517+
505518
messages: A list of messages comprising the conversation so far.
506519
507520
model: Model ID used to generate the response.
@@ -587,6 +600,7 @@ async def create(
587600
async def create(
588601
self,
589602
*,
603+
agent_domain: str,
590604
messages: Iterable[completion_create_params.Message],
591605
model: str,
592606
stream: Literal[True],
@@ -615,6 +629,8 @@ async def create(
615629
Creates a model response for the given chat conversation.
616630
617631
Args:
632+
agent_domain: The agent domain to use for the request.
633+
618634
messages: A list of messages comprising the conversation so far.
619635
620636
model: Model ID used to generate the response.
@@ -700,6 +716,7 @@ async def create(
700716
async def create(
701717
self,
702718
*,
719+
agent_domain: str,
703720
messages: Iterable[completion_create_params.Message],
704721
model: str,
705722
stream: bool,
@@ -728,6 +745,8 @@ async def create(
728745
Creates a model response for the given chat conversation.
729746
730747
Args:
748+
agent_domain: The agent domain to use for the request.
749+
731750
messages: A list of messages comprising the conversation so far.
732751
733752
model: Model ID used to generate the response.
@@ -809,10 +828,11 @@ async def create(
809828
"""
810829
...
811830

812-
@required_args(["messages", "model"], ["messages", "model", "stream"])
831+
@required_args(["agent_domain", "messages", "model"], ["agent_domain", "messages", "model", "stream"])
813832
async def create(
814833
self,
815834
*,
835+
agent_domain: str,
816836
messages: Iterable[completion_create_params.Message],
817837
model: str,
818838
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
@@ -840,7 +860,7 @@ async def create(
840860
return await self._post(
841861
"/chat/completions?agent=true"
842862
if self._client._base_url_overridden
843-
else "https://inference.do-ai.run/v1/chat/completions?agent=true",
863+
else f"https://{agent_domain}/v1/chat/completions?agent=true",
844864
body=await async_maybe_transform(
845865
{
846866
"messages": messages,

src/gradientai/resources/chat/completions.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -414,7 +414,6 @@ def create(
414414
extra_body: Body | None = None,
415415
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
416416
) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
417-
418417
# This method requires an inference_key to be set via client argument or environment variable
419418
if not self._client.inference_key:
420419
raise TypeError(
@@ -848,7 +847,6 @@ async def create(
848847
extra_body: Body | None = None,
849848
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
850849
) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
851-
852850
# This method requires an inference_key to be set via client argument or environment variable
853851
if not hasattr(self._client, "inference_key") or not self._client.inference_key:
854852
raise TypeError(

tests/api_resources/agents/chat/test_completions.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ class TestCompletions:
2121
@parametrize
2222
def test_method_create_overload_1(self, client: GradientAI) -> None:
2323
completion = client.agents.chat.completions.create(
24+
agent_domain="inference.do-ai.run",
2425
messages=[
2526
{
2627
"content": "string",
@@ -35,6 +36,7 @@ def test_method_create_overload_1(self, client: GradientAI) -> None:
3536
@parametrize
3637
def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None:
3738
completion = client.agents.chat.completions.create(
39+
agent_domain="inference.do-ai.run",
3840
messages=[
3941
{
4042
"content": "string",
@@ -64,6 +66,7 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N
6466
@parametrize
6567
def test_raw_response_create_overload_1(self, client: GradientAI) -> None:
6668
response = client.agents.chat.completions.with_raw_response.create(
69+
agent_domain="inference.do-ai.run",
6770
messages=[
6871
{
6972
"content": "string",
@@ -82,6 +85,7 @@ def test_raw_response_create_overload_1(self, client: GradientAI) -> None:
8285
@parametrize
8386
def test_streaming_response_create_overload_1(self, client: GradientAI) -> None:
8487
with client.agents.chat.completions.with_streaming_response.create(
88+
agent_domain="inference.do-ai.run",
8589
messages=[
8690
{
8791
"content": "string",
@@ -102,6 +106,7 @@ def test_streaming_response_create_overload_1(self, client: GradientAI) -> None:
102106
@parametrize
103107
def test_method_create_overload_2(self, client: GradientAI) -> None:
104108
completion_stream = client.agents.chat.completions.create(
109+
agent_domain="inference.do-ai.run",
105110
messages=[
106111
{
107112
"content": "string",
@@ -117,6 +122,7 @@ def test_method_create_overload_2(self, client: GradientAI) -> None:
117122
@parametrize
118123
def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None:
119124
completion_stream = client.agents.chat.completions.create(
125+
agent_domain="inference.do-ai.run",
120126
messages=[
121127
{
122128
"content": "string",
@@ -146,6 +152,7 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N
146152
@parametrize
147153
def test_raw_response_create_overload_2(self, client: GradientAI) -> None:
148154
response = client.agents.chat.completions.with_raw_response.create(
155+
agent_domain="inference.do-ai.run",
149156
messages=[
150157
{
151158
"content": "string",
@@ -164,6 +171,7 @@ def test_raw_response_create_overload_2(self, client: GradientAI) -> None:
164171
@parametrize
165172
def test_streaming_response_create_overload_2(self, client: GradientAI) -> None:
166173
with client.agents.chat.completions.with_streaming_response.create(
174+
agent_domain="inference.do-ai.run",
167175
messages=[
168176
{
169177
"content": "string",
@@ -191,6 +199,7 @@ class TestAsyncCompletions:
191199
@parametrize
192200
async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None:
193201
completion = await async_client.agents.chat.completions.create(
202+
agent_domain="inference.do-ai.run",
194203
messages=[
195204
{
196205
"content": "string",
@@ -205,6 +214,7 @@ async def test_method_create_overload_1(self, async_client: AsyncGradientAI) ->
205214
@parametrize
206215
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None:
207216
completion = await async_client.agents.chat.completions.create(
217+
agent_domain="inference.do-ai.run",
208218
messages=[
209219
{
210220
"content": "string",
@@ -234,6 +244,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
234244
@parametrize
235245
async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None:
236246
response = await async_client.agents.chat.completions.with_raw_response.create(
247+
agent_domain="inference.do-ai.run",
237248
messages=[
238249
{
239250
"content": "string",
@@ -252,6 +263,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncGradientA
252263
@parametrize
253264
async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None:
254265
async with async_client.agents.chat.completions.with_streaming_response.create(
266+
agent_domain="inference.do-ai.run",
255267
messages=[
256268
{
257269
"content": "string",
@@ -272,6 +284,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncGra
272284
@parametrize
273285
async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None:
274286
completion_stream = await async_client.agents.chat.completions.create(
287+
agent_domain="inference.do-ai.run",
275288
messages=[
276289
{
277290
"content": "string",
@@ -287,6 +300,7 @@ async def test_method_create_overload_2(self, async_client: AsyncGradientAI) ->
287300
@parametrize
288301
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None:
289302
completion_stream = await async_client.agents.chat.completions.create(
303+
agent_domain="inference.do-ai.run",
290304
messages=[
291305
{
292306
"content": "string",
@@ -316,6 +330,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
316330
@parametrize
317331
async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None:
318332
response = await async_client.agents.chat.completions.with_raw_response.create(
333+
agent_domain="inference.do-ai.run",
319334
messages=[
320335
{
321336
"content": "string",
@@ -334,6 +349,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradientA
334349
@parametrize
335350
async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None:
336351
async with async_client.agents.chat.completions.with_streaming_response.create(
352+
agent_domain="inference.do-ai.run",
337353
messages=[
338354
{
339355
"content": "string",

0 commit comments

Comments
 (0)