Skip to content

Commit cd26391

Browse files
committed
gen sdk GCP
1 parent 2282e4a commit cd26391

23 files changed

+236
-117
lines changed

.speakeasy/workflow.lock

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@ sources:
88
- latest
99
mistral-google-cloud-source:
1010
sourceNamespace: mistral-google-cloud-source
11-
sourceRevisionDigest: sha256:d3e3d15303dcc1acb27b8895aa3064328bd5b8013ea635c2bce553b6e647b498
12-
sourceBlobDigest: sha256:db72004ee842a27c3e77980be28a727811e0581daa7a51ad34d142302f8ba2f3
11+
sourceRevisionDigest: sha256:e0fd58ce2dbba068f375d3a23d758b8678c2a68cf4fc7bc46ea7e1b37abe0647
12+
sourceBlobDigest: sha256:0707d8d2566a9ef4ef286bb0abe467f8696ccf83ba73091065d7caf627a06611
1313
tags:
1414
- latest
1515
mistral-openapi:
@@ -29,10 +29,10 @@ targets:
2929
mistralai-gcp-sdk:
3030
source: mistral-google-cloud-source
3131
sourceNamespace: mistral-google-cloud-source
32-
sourceRevisionDigest: sha256:d3e3d15303dcc1acb27b8895aa3064328bd5b8013ea635c2bce553b6e647b498
33-
sourceBlobDigest: sha256:db72004ee842a27c3e77980be28a727811e0581daa7a51ad34d142302f8ba2f3
32+
sourceRevisionDigest: sha256:e0fd58ce2dbba068f375d3a23d758b8678c2a68cf4fc7bc46ea7e1b37abe0647
33+
sourceBlobDigest: sha256:0707d8d2566a9ef4ef286bb0abe467f8696ccf83ba73091065d7caf627a06611
3434
codeSamplesNamespace: mistral-openapi-google-cloud-code-samples
35-
codeSamplesRevisionDigest: sha256:7d95ba7aa230088b9975be341ba638e51cc574b6e863bd3a0f53e9c5ee261bba
35+
codeSamplesRevisionDigest: sha256:0657ec41e473356a5a0eeaca3dff137e9ff16080ec1fb50e72553245aa86ffe5
3636
mistralai-sdk:
3737
source: mistral-openapi
3838
sourceNamespace: mistral-openapi

packages/mistralai_gcp/.speakeasy/gen.lock

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,18 @@
11
lockVersion: 2.0.0
22
id: ec60f2d8-7869-45c1-918e-773d41a8cf74
33
management:
4-
docChecksum: e6c0a4254e61b1f171b409862f717867
4+
docChecksum: d50a06ac34844141709fa2e57cc940c5
55
docVersion: 0.0.2
6-
speakeasyVersion: 1.440.1
7-
generationVersion: 2.460.1
6+
speakeasyVersion: 1.451.1
7+
generationVersion: 2.470.1
88
releaseVersion: 1.2.3
99
configChecksum: 3fc99d7ec7ee057a323b593ebf8fdb8c
1010
published: true
1111
features:
1212
python:
1313
additionalDependencies: 1.0.0
1414
constsAndDefaults: 1.0.5
15-
core: 5.6.5
15+
core: 5.6.8
1616
defaultEnabledRetries: 0.2.0
1717
enumUnions: 0.1.0
1818
envVarSecurityUsage: 0.3.2
@@ -29,11 +29,10 @@ features:
2929
responseFormat: 1.0.1
3030
retries: 3.0.2
3131
sdkHooks: 1.0.0
32-
serverEvents: 1.0.4
32+
serverEvents: 1.0.7
3333
serverEventsSentinels: 0.1.0
3434
serverIDs: 3.0.0
35-
tests: 1.6.0
36-
unions: 3.0.3
35+
unions: 3.0.4
3736
generatedFiles:
3837
- .gitattributes
3938
- .python-version
@@ -187,3 +186,4 @@ examples:
187186
"200":
188187
application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []}
189188
"422": {}
189+
generatedTests: {}

packages/mistralai_gcp/pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,12 @@ google-auth = "2.27.0"
2222
httpx = "^0.27.0"
2323
jsonpath-python = "^1.0.6"
2424
pydantic = "~2.9.2"
25-
python-dateutil = "2.8.2"
25+
python-dateutil = "^2.8.2"
2626
requests = "^2.32.3"
2727
typing-inspect = "^0.9.0"
2828

2929
[tool.poetry.group.dev.dependencies]
30-
mypy = "==1.10.1"
30+
mypy = "==1.13.0"
3131
pylint = "==3.2.3"
3232
pytest = "^8.2.2"
3333
pytest-asyncio = "^0.23.7"

packages/mistralai_gcp/src/mistralai_gcp/chat.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from mistralai_gcp._hooks import HookContext
66
from mistralai_gcp.types import Nullable, OptionalNullable, UNSET
77
from mistralai_gcp.utils import eventstreaming
8-
from typing import Any, AsyncGenerator, Generator, List, Optional, Union
8+
from typing import Any, List, Optional, Union
99

1010

1111
class Chat(BaseSDK):
@@ -40,7 +40,7 @@ def stream(
4040
retries: OptionalNullable[utils.RetryConfig] = UNSET,
4141
server_url: Optional[str] = None,
4242
timeout_ms: Optional[int] = None,
43-
) -> Optional[Generator[models.CompletionEvent, None, None]]:
43+
) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]:
4444
r"""Stream chat completion
4545
4646
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
@@ -132,7 +132,7 @@ def stream(
132132

133133
data: Any = None
134134
if utils.match_response(http_res, "200", "text/event-stream"):
135-
return eventstreaming.stream_events(
135+
return eventstreaming.EventStream(
136136
http_res,
137137
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
138138
sentinel="[DONE]",
@@ -185,7 +185,7 @@ async def stream_async(
185185
retries: OptionalNullable[utils.RetryConfig] = UNSET,
186186
server_url: Optional[str] = None,
187187
timeout_ms: Optional[int] = None,
188-
) -> Optional[AsyncGenerator[models.CompletionEvent, None]]:
188+
) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]:
189189
r"""Stream chat completion
190190
191191
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
@@ -277,7 +277,7 @@ async def stream_async(
277277

278278
data: Any = None
279279
if utils.match_response(http_res, "200", "text/event-stream"):
280-
return eventstreaming.stream_events_async(
280+
return eventstreaming.EventStreamAsync(
281281
http_res,
282282
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
283283
sentinel="[DONE]",

packages/mistralai_gcp/src/mistralai_gcp/fim.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from mistralai_gcp._hooks import HookContext
66
from mistralai_gcp.types import Nullable, OptionalNullable, UNSET
77
from mistralai_gcp.utils import eventstreaming
8-
from typing import Any, AsyncGenerator, Generator, Optional, Union
8+
from typing import Any, Optional, Union
99

1010

1111
class Fim(BaseSDK):
@@ -32,7 +32,7 @@ def stream(
3232
retries: OptionalNullable[utils.RetryConfig] = UNSET,
3333
server_url: Optional[str] = None,
3434
timeout_ms: Optional[int] = None,
35-
) -> Optional[Generator[models.CompletionEvent, None, None]]:
35+
) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]:
3636
r"""Stream fim completion
3737
3838
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
@@ -112,7 +112,7 @@ def stream(
112112

113113
data: Any = None
114114
if utils.match_response(http_res, "200", "text/event-stream"):
115-
return eventstreaming.stream_events(
115+
return eventstreaming.EventStream(
116116
http_res,
117117
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
118118
sentinel="[DONE]",
@@ -157,7 +157,7 @@ async def stream_async(
157157
retries: OptionalNullable[utils.RetryConfig] = UNSET,
158158
server_url: Optional[str] = None,
159159
timeout_ms: Optional[int] = None,
160-
) -> Optional[AsyncGenerator[models.CompletionEvent, None]]:
160+
) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]:
161161
r"""Stream fim completion
162162
163163
Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.
@@ -237,7 +237,7 @@ async def stream_async(
237237

238238
data: Any = None
239239
if utils.match_response(http_res, "200", "text/event-stream"):
240-
return eventstreaming.stream_events_async(
240+
return eventstreaming.EventStreamAsync(
241241
http_res,
242242
lambda raw: utils.unmarshal_json(raw, models.CompletionEvent),
243243
sentinel="[DONE]",

packages/mistralai_gcp/src/mistralai_gcp/httpclient.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,9 @@ def build_request(
4141
) -> httpx.Request:
4242
pass
4343

44+
def close(self) -> None:
45+
pass
46+
4447

4548
@runtime_checkable
4649
class AsyncHttpClient(Protocol):
@@ -76,3 +79,6 @@ def build_request(
7679
extensions: Optional[httpx._types.RequestExtensions] = None,
7780
) -> httpx.Request:
7881
pass
82+
83+
async def aclose(self) -> None:
84+
pass

packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,17 @@
1212
)
1313
from pydantic import model_serializer
1414
from typing import List, Literal, Optional, Union
15-
from typing_extensions import NotRequired, TypedDict
15+
from typing_extensions import NotRequired, TypeAliasType, TypedDict
1616

1717

18-
AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]]
18+
AssistantMessageContentTypedDict = TypeAliasType(
19+
"AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]]
20+
)
1921

2022

21-
AssistantMessageContent = Union[str, List[ContentChunk]]
23+
AssistantMessageContent = TypeAliasType(
24+
"AssistantMessageContent", Union[str, List[ContentChunk]]
25+
)
2226

2327

2428
AssistantMessageRole = Literal["assistant"]

packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,23 +19,30 @@
1919
from mistralai_gcp.utils import get_discriminator
2020
from pydantic import Discriminator, Tag, model_serializer
2121
from typing import List, Optional, Union
22-
from typing_extensions import Annotated, NotRequired, TypedDict
22+
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
2323

2424

25-
ChatCompletionRequestStopTypedDict = Union[str, List[str]]
25+
ChatCompletionRequestStopTypedDict = TypeAliasType(
26+
"ChatCompletionRequestStopTypedDict", Union[str, List[str]]
27+
)
2628
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
2729

2830

29-
ChatCompletionRequestStop = Union[str, List[str]]
31+
ChatCompletionRequestStop = TypeAliasType(
32+
"ChatCompletionRequestStop", Union[str, List[str]]
33+
)
3034
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
3135

3236

33-
ChatCompletionRequestMessagesTypedDict = Union[
34-
SystemMessageTypedDict,
35-
UserMessageTypedDict,
36-
AssistantMessageTypedDict,
37-
ToolMessageTypedDict,
38-
]
37+
ChatCompletionRequestMessagesTypedDict = TypeAliasType(
38+
"ChatCompletionRequestMessagesTypedDict",
39+
Union[
40+
SystemMessageTypedDict,
41+
UserMessageTypedDict,
42+
AssistantMessageTypedDict,
43+
ToolMessageTypedDict,
44+
],
45+
)
3946

4047

4148
ChatCompletionRequestMessages = Annotated[
@@ -49,10 +56,15 @@
4956
]
5057

5158

52-
ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum]
59+
ChatCompletionRequestToolChoiceTypedDict = TypeAliasType(
60+
"ChatCompletionRequestToolChoiceTypedDict",
61+
Union[ToolChoiceTypedDict, ToolChoiceEnum],
62+
)
5363

5464

55-
ChatCompletionRequestToolChoice = Union[ToolChoice, ToolChoiceEnum]
65+
ChatCompletionRequestToolChoice = TypeAliasType(
66+
"ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum]
67+
)
5668

5769

5870
class ChatCompletionRequestTypedDict(TypedDict):

packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -19,23 +19,26 @@
1919
from mistralai_gcp.utils import get_discriminator
2020
from pydantic import Discriminator, Tag, model_serializer
2121
from typing import List, Optional, Union
22-
from typing_extensions import Annotated, NotRequired, TypedDict
22+
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
2323

2424

25-
StopTypedDict = Union[str, List[str]]
25+
StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]])
2626
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
2727

2828

29-
Stop = Union[str, List[str]]
29+
Stop = TypeAliasType("Stop", Union[str, List[str]])
3030
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
3131

3232

33-
MessagesTypedDict = Union[
34-
SystemMessageTypedDict,
35-
UserMessageTypedDict,
36-
AssistantMessageTypedDict,
37-
ToolMessageTypedDict,
38-
]
33+
MessagesTypedDict = TypeAliasType(
34+
"MessagesTypedDict",
35+
Union[
36+
SystemMessageTypedDict,
37+
UserMessageTypedDict,
38+
AssistantMessageTypedDict,
39+
ToolMessageTypedDict,
40+
],
41+
)
3942

4043

4144
Messages = Annotated[
@@ -49,12 +52,15 @@
4952
]
5053

5154

52-
ChatCompletionStreamRequestToolChoiceTypedDict = Union[
53-
ToolChoiceTypedDict, ToolChoiceEnum
54-
]
55+
ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType(
56+
"ChatCompletionStreamRequestToolChoiceTypedDict",
57+
Union[ToolChoiceTypedDict, ToolChoiceEnum],
58+
)
5559

5660

57-
ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum]
61+
ChatCompletionStreamRequestToolChoice = TypeAliasType(
62+
"ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum]
63+
)
5864

5965

6066
class ChatCompletionStreamRequestTypedDict(TypedDict):

packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,12 @@
66
from mistralai_gcp.utils import get_discriminator
77
from pydantic import Discriminator, Tag
88
from typing import Union
9-
from typing_extensions import Annotated
9+
from typing_extensions import Annotated, TypeAliasType
1010

1111

12-
ContentChunkTypedDict = Union[TextChunkTypedDict, ReferenceChunkTypedDict]
12+
ContentChunkTypedDict = TypeAliasType(
13+
"ContentChunkTypedDict", Union[TextChunkTypedDict, ReferenceChunkTypedDict]
14+
)
1315

1416

1517
ContentChunk = Annotated[

0 commit comments

Comments
 (0)