Skip to content

Commit 0e5effc

Browse files
feat(api): manual updates
change order
1 parent d510ae0 commit 0e5effc

File tree

6 files changed

+79
-79
lines changed

6 files changed

+79
-79
lines changed

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 76
22
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml
33
openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208
4-
config_hash: 67ce33bbbf8698b50194d8da5fd009d6
4+
config_hash: 9b44ce3fd39c43f2001bc11934e6b1b0

README.md

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ client = GradientAI(
3131
api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted
3232
)
3333

34-
completion = client.chat.completions.create(
34+
completion = client.agents.chat.completions.create(
3535
messages=[
3636
{
3737
"role": "user",
@@ -63,7 +63,7 @@ client = AsyncGradientAI(
6363

6464

6565
async def main() -> None:
66-
completion = await client.chat.completions.create(
66+
completion = await client.agents.chat.completions.create(
6767
messages=[
6868
{
6969
"role": "user",
@@ -105,7 +105,7 @@ async def main() -> None:
105105
api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted
106106
http_client=DefaultAioHttpClient(),
107107
) as client:
108-
completion = await client.chat.completions.create(
108+
completion = await client.agents.chat.completions.create(
109109
messages=[
110110
{
111111
"role": "user",
@@ -129,7 +129,7 @@ from gradientai import GradientAI
129129

130130
client = GradientAI()
131131

132-
stream = client.chat.completions.create(
132+
stream = client.agents.chat.completions.create(
133133
messages=[
134134
{
135135
"role": "user",
@@ -150,7 +150,7 @@ from gradientai import AsyncGradientAI
150150

151151
client = AsyncGradientAI()
152152

153-
stream = await client.chat.completions.create(
153+
stream = await client.agents.chat.completions.create(
154154
messages=[
155155
{
156156
"role": "user",
@@ -182,7 +182,7 @@ from gradientai import GradientAI
182182

183183
client = GradientAI()
184184

185-
completion = client.chat.completions.create(
185+
completion = client.agents.chat.completions.create(
186186
messages=[
187187
{
188188
"content": "string",
@@ -211,7 +211,7 @@ from gradientai import GradientAI
211211
client = GradientAI()
212212

213213
try:
214-
client.chat.completions.create(
214+
client.agents.chat.completions.create(
215215
messages=[
216216
{
217217
"role": "user",
@@ -262,7 +262,7 @@ client = GradientAI(
262262
)
263263

264264
# Or, configure per-request:
265-
client.with_options(max_retries=5).chat.completions.create(
265+
client.with_options(max_retries=5).agents.chat.completions.create(
266266
messages=[
267267
{
268268
"role": "user",
@@ -293,7 +293,7 @@ client = GradientAI(
293293
)
294294

295295
# Override per-request:
296-
client.with_options(timeout=5.0).chat.completions.create(
296+
client.with_options(timeout=5.0).agents.chat.completions.create(
297297
messages=[
298298
{
299299
"role": "user",
@@ -342,7 +342,7 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
342342
from gradientai import GradientAI
343343

344344
client = GradientAI()
345-
response = client.chat.completions.with_raw_response.create(
345+
response = client.agents.chat.completions.with_raw_response.create(
346346
messages=[{
347347
"role": "user",
348348
"content": "What is the capital of France?",
@@ -351,7 +351,7 @@ response = client.chat.completions.with_raw_response.create(
351351
)
352352
print(response.headers.get('X-My-Header'))
353353

354-
completion = response.parse() # get the object that `chat.completions.create()` would have returned
354+
completion = response.parse() # get the object that `agents.chat.completions.create()` would have returned
355355
print(completion.choices)
356356
```
357357

@@ -366,7 +366,7 @@ The above interface eagerly reads the full response body when you make the reque
366366
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
367367

368368
```python
369-
with client.chat.completions.with_streaming_response.create(
369+
with client.agents.chat.completions.with_streaming_response.create(
370370
messages=[
371371
{
372372
"role": "user",

api.md

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,20 +4,6 @@
44
from gradientai.types import APILinks, APIMeta, ChatCompletionTokenLogprob
55
```
66

7-
# Chat
8-
9-
## Completions
10-
11-
Types:
12-
13-
```python
14-
from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse
15-
```
16-
17-
Methods:
18-
19-
- <code title="post /chat/completions">client.chat.completions.<a href="./src/gradientai/resources/chat/completions.py">create</a>(\*\*<a href="src/gradientai/types/chat/completion_create_params.py">params</a>) -> <a href="./src/gradientai/types/chat/completion_create_response.py">CompletionCreateResponse</a></code>
20-
217
# Agents
228

239
Types:
@@ -267,6 +253,20 @@ Methods:
267253
- <code title="post /v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}">client.agents.routes.<a href="./src/gradientai/resources/agents/routes.py">add</a>(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*<a href="src/gradientai/types/agents/route_add_params.py">params</a>) -> <a href="./src/gradientai/types/agents/route_add_response.py">RouteAddResponse</a></code>
268254
- <code title="get /v2/gen-ai/agents/{uuid}/child_agents">client.agents.routes.<a href="./src/gradientai/resources/agents/routes.py">view</a>(uuid) -> <a href="./src/gradientai/types/agents/route_view_response.py">RouteViewResponse</a></code>
269255

256+
# Chat
257+
258+
## Completions
259+
260+
Types:
261+
262+
```python
263+
from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse
264+
```
265+
266+
Methods:
267+
268+
- <code title="post /chat/completions">client.chat.completions.<a href="./src/gradientai/resources/chat/completions.py">create</a>(\*\*<a href="src/gradientai/types/chat/completion_create_params.py">params</a>) -> <a href="./src/gradientai/types/chat/completion_create_response.py">CompletionCreateResponse</a></code>
269+
270270
# ModelProviders
271271

272272
## Anthropic

src/gradientai/_client.py

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -119,18 +119,18 @@ def __init__(
119119

120120
self._default_stream_cls = Stream
121121

122-
@cached_property
123-
def chat(self) -> ChatResource:
124-
from .resources.chat import ChatResource
125-
126-
return ChatResource(self)
127-
128122
@cached_property
129123
def agents(self) -> AgentsResource:
130124
from .resources.agents import AgentsResource
131125

132126
return AgentsResource(self)
133127

128+
@cached_property
129+
def chat(self) -> ChatResource:
130+
from .resources.chat import ChatResource
131+
132+
return ChatResource(self)
133+
134134
@cached_property
135135
def model_providers(self) -> ModelProvidersResource:
136136
from .resources.model_providers import ModelProvidersResource
@@ -359,18 +359,18 @@ def __init__(
359359

360360
self._default_stream_cls = AsyncStream
361361

362-
@cached_property
363-
def chat(self) -> AsyncChatResource:
364-
from .resources.chat import AsyncChatResource
365-
366-
return AsyncChatResource(self)
367-
368362
@cached_property
369363
def agents(self) -> AsyncAgentsResource:
370364
from .resources.agents import AsyncAgentsResource
371365

372366
return AsyncAgentsResource(self)
373367

368+
@cached_property
369+
def chat(self) -> AsyncChatResource:
370+
from .resources.chat import AsyncChatResource
371+
372+
return AsyncChatResource(self)
373+
374374
@cached_property
375375
def model_providers(self) -> AsyncModelProvidersResource:
376376
from .resources.model_providers import AsyncModelProvidersResource
@@ -539,18 +539,18 @@ class GradientAIWithRawResponse:
539539
def __init__(self, client: GradientAI) -> None:
540540
self._client = client
541541

542-
@cached_property
543-
def chat(self) -> chat.ChatResourceWithRawResponse:
544-
from .resources.chat import ChatResourceWithRawResponse
545-
546-
return ChatResourceWithRawResponse(self._client.chat)
547-
548542
@cached_property
549543
def agents(self) -> agents.AgentsResourceWithRawResponse:
550544
from .resources.agents import AgentsResourceWithRawResponse
551545

552546
return AgentsResourceWithRawResponse(self._client.agents)
553547

548+
@cached_property
549+
def chat(self) -> chat.ChatResourceWithRawResponse:
550+
from .resources.chat import ChatResourceWithRawResponse
551+
552+
return ChatResourceWithRawResponse(self._client.chat)
553+
554554
@cached_property
555555
def model_providers(self) -> model_providers.ModelProvidersResourceWithRawResponse:
556556
from .resources.model_providers import ModelProvidersResourceWithRawResponse
@@ -588,18 +588,18 @@ class AsyncGradientAIWithRawResponse:
588588
def __init__(self, client: AsyncGradientAI) -> None:
589589
self._client = client
590590

591-
@cached_property
592-
def chat(self) -> chat.AsyncChatResourceWithRawResponse:
593-
from .resources.chat import AsyncChatResourceWithRawResponse
594-
595-
return AsyncChatResourceWithRawResponse(self._client.chat)
596-
597591
@cached_property
598592
def agents(self) -> agents.AsyncAgentsResourceWithRawResponse:
599593
from .resources.agents import AsyncAgentsResourceWithRawResponse
600594

601595
return AsyncAgentsResourceWithRawResponse(self._client.agents)
602596

597+
@cached_property
598+
def chat(self) -> chat.AsyncChatResourceWithRawResponse:
599+
from .resources.chat import AsyncChatResourceWithRawResponse
600+
601+
return AsyncChatResourceWithRawResponse(self._client.chat)
602+
603603
@cached_property
604604
def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithRawResponse:
605605
from .resources.model_providers import AsyncModelProvidersResourceWithRawResponse
@@ -637,18 +637,18 @@ class GradientAIWithStreamedResponse:
637637
def __init__(self, client: GradientAI) -> None:
638638
self._client = client
639639

640-
@cached_property
641-
def chat(self) -> chat.ChatResourceWithStreamingResponse:
642-
from .resources.chat import ChatResourceWithStreamingResponse
643-
644-
return ChatResourceWithStreamingResponse(self._client.chat)
645-
646640
@cached_property
647641
def agents(self) -> agents.AgentsResourceWithStreamingResponse:
648642
from .resources.agents import AgentsResourceWithStreamingResponse
649643

650644
return AgentsResourceWithStreamingResponse(self._client.agents)
651645

646+
@cached_property
647+
def chat(self) -> chat.ChatResourceWithStreamingResponse:
648+
from .resources.chat import ChatResourceWithStreamingResponse
649+
650+
return ChatResourceWithStreamingResponse(self._client.chat)
651+
652652
@cached_property
653653
def model_providers(self) -> model_providers.ModelProvidersResourceWithStreamingResponse:
654654
from .resources.model_providers import ModelProvidersResourceWithStreamingResponse
@@ -686,18 +686,18 @@ class AsyncGradientAIWithStreamedResponse:
686686
def __init__(self, client: AsyncGradientAI) -> None:
687687
self._client = client
688688

689-
@cached_property
690-
def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
691-
from .resources.chat import AsyncChatResourceWithStreamingResponse
692-
693-
return AsyncChatResourceWithStreamingResponse(self._client.chat)
694-
695689
@cached_property
696690
def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse:
697691
from .resources.agents import AsyncAgentsResourceWithStreamingResponse
698692

699693
return AsyncAgentsResourceWithStreamingResponse(self._client.agents)
700694

695+
@cached_property
696+
def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
697+
from .resources.chat import AsyncChatResourceWithStreamingResponse
698+
699+
return AsyncChatResourceWithStreamingResponse(self._client.chat)
700+
701701
@cached_property
702702
def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithStreamingResponse:
703703
from .resources.model_providers import AsyncModelProvidersResourceWithStreamingResponse

src/gradientai/resources/__init__.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -58,18 +58,18 @@
5858
)
5959

6060
__all__ = [
61-
"ChatResource",
62-
"AsyncChatResource",
63-
"ChatResourceWithRawResponse",
64-
"AsyncChatResourceWithRawResponse",
65-
"ChatResourceWithStreamingResponse",
66-
"AsyncChatResourceWithStreamingResponse",
6761
"AgentsResource",
6862
"AsyncAgentsResource",
6963
"AgentsResourceWithRawResponse",
7064
"AsyncAgentsResourceWithRawResponse",
7165
"AgentsResourceWithStreamingResponse",
7266
"AsyncAgentsResourceWithStreamingResponse",
67+
"ChatResource",
68+
"AsyncChatResource",
69+
"ChatResourceWithRawResponse",
70+
"AsyncChatResourceWithRawResponse",
71+
"ChatResourceWithStreamingResponse",
72+
"AsyncChatResourceWithStreamingResponse",
7373
"ModelProvidersResource",
7474
"AsyncModelProvidersResource",
7575
"ModelProvidersResourceWithRawResponse",

0 commit comments

Comments
 (0)