Skip to content

Commit 240d768

Browse files
Merge 'integration_2025-01-23_700905551106' into 'master'
merge branch integration_2025-01-23_700905551106 into master See merge request: !498
2 parents fe51ca6 + ad0142e commit 240d768

File tree

49 files changed

+4089
-245
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+4089
-245
lines changed

meta.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
{
2-
"lasted": "1.0.121",
3-
"meta_commit": "fafa370010121d733a30f1c6f5951f35ba5cc8c2"
2+
"lasted": "1.0.122",
3+
"meta_commit": "ca17b9482f09bd2c1ee2e21388b2de97d7497802"
44
}

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from setuptools import setup, find_packages # noqa: H301
44

55
NAME = "volcengine-python-sdk"
6-
VERSION = "1.0.121"
6+
VERSION = "1.0.122"
77
# To install the library, run the following
88
#
99
# python setup.py install

volcenginesdkark/models/get_api_key_request.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -124,13 +124,6 @@ def resource_type(self, resource_type):
124124
"""
125125
if self._configuration.client_side_validation and resource_type is None:
126126
raise ValueError("Invalid value for `resource_type`, must not be `None`") # noqa: E501
127-
allowed_values = ["endpoint", "bot"] # noqa: E501
128-
if (self._configuration.client_side_validation and
129-
resource_type not in allowed_values):
130-
raise ValueError(
131-
"Invalid value for `resource_type` ({0}), must be one of {1}" # noqa: E501
132-
.format(resource_type, allowed_values)
133-
)
134127

135128
self._resource_type = resource_type
136129

volcenginesdkarkruntime/_client.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ class Ark(SyncAPIClient):
4040
embeddings: resources.Embeddings
4141
tokenization: resources.Tokenization
4242
context: resources.Context
43+
multimodal_embeddings: resources.MultimodalEmbeddings
4344
content_generation: resources.ContentGeneration
4445
batch_chat: resources.BatchChat
4546
model_breaker_map: dict[str, ModelBreaker]
@@ -102,6 +103,7 @@ def __init__(
102103
self.embeddings = resources.Embeddings(self)
103104
self.tokenization = resources.Tokenization(self)
104105
self.context = resources.Context(self)
106+
self.multimodal_embeddings = resources.MultimodalEmbeddings(self)
105107
self.content_generation = resources.ContentGeneration(self)
106108
self.batch_chat = resources.BatchChat(self)
107109
self.model_breaker_map = defaultdict(ModelBreaker)
@@ -146,6 +148,7 @@ class AsyncArk(AsyncAPIClient):
146148
embeddings: resources.AsyncEmbeddings
147149
tokenization: resources.AsyncTokenization
148150
context: resources.AsyncContext
151+
multimodal_embeddings: resources.AsyncMultimodalEmbeddings
149152
content_generation: resources.AsyncContentGeneration
150153
batch_chat: resources.AsyncBatchChat
151154
model_breaker_map: dict[str, ModelBreaker]
@@ -207,6 +210,7 @@ def __init__(
207210
self.embeddings = resources.AsyncEmbeddings(self)
208211
self.tokenization = resources.AsyncTokenization(self)
209212
self.context = resources.AsyncContext(self)
213+
self.multimodal_embeddings = resources.AsyncMultimodalEmbeddings(self)
210214
self.content_generation = resources.AsyncContentGeneration(self)
211215
self.batch_chat = resources.AsyncBatchChat(self)
212216
self.model_breaker_map = defaultdict(ModelBreaker)

volcenginesdkarkruntime/resources/__init__.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from .classification import Classification, AsyncClassification
55
from .bot import BotChat, AsyncBotChat
66
from .context import Context, AsyncContext
7+
from .multimodal_embeddings import MultimodalEmbeddings, AsyncMultimodalEmbeddings
78
from .content_generation import ContentGeneration, AsyncContentGeneration
89
from .batch_chat import BatchChat, AsyncBatchChat
910

@@ -18,8 +19,11 @@
1819
"AsyncTokenization",
1920
"Context",
2021
"AsyncContext",
22+
"MultimodalEmbeddings",
23+
"AsyncMultimodalEmbeddings",
24+
"AsyncContext",
2125
"ContentGeneration",
22-
"AsyncContentGeneration"
26+
"AsyncContentGeneration",
2327
"BatchChat",
2428
"AsyncBatchChat"
2529
]

volcenginesdkarkruntime/resources/bot/completions.py

Lines changed: 56 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -44,33 +44,33 @@ def with_streaming_response(self) -> CompletionsWithStreamingResponse:
4444

4545
@with_sts_token
4646
def create(
47-
self,
48-
*,
49-
messages: Iterable[ChatCompletionMessageParam],
50-
model: str,
51-
frequency_penalty: Optional[float] | None = None,
52-
function_call: completion_create_params.FunctionCall | None = None,
53-
logit_bias: Optional[Dict[str, int]] | None = None,
54-
logprobs: Optional[bool] | None = None,
55-
max_tokens: Optional[int] | None = None,
56-
presence_penalty: Optional[float] | None = None,
57-
stop: Union[Optional[str], List[str]] | None = None,
58-
stream: Optional[Literal[False]] | Literal[True] = False,
59-
stream_options: Optional[ChatCompletionStreamOptionsParam] | None = None,
60-
temperature: Optional[float] | None = None,
61-
tools: Iterable[ChatCompletionToolParam] | None = None,
62-
top_logprobs: Optional[int] | None = None,
63-
top_p: Optional[float] | None = None,
64-
repetition_penalty: Optional[float] | None = None,
65-
n: Optional[int] | None = None,
66-
tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
67-
response_format: completion_create_params.ResponseFormat | None = None,
68-
user: str | None = None,
69-
metadata: Dict[str, object] | None = None,
70-
extra_headers: Headers | None = None,
71-
extra_query: Query | None = None,
72-
extra_body: Body | None = None,
73-
timeout: float | httpx.Timeout | None = None,
47+
self,
48+
*,
49+
messages: Iterable[ChatCompletionMessageParam],
50+
model: str,
51+
frequency_penalty: Optional[float] | None = None,
52+
function_call: completion_create_params.FunctionCall | None = None,
53+
logit_bias: Optional[Dict[str, int]] | None = None,
54+
logprobs: Optional[bool] | None = None,
55+
max_tokens: Optional[int] | None = None,
56+
presence_penalty: Optional[float] | None = None,
57+
stop: Union[Optional[str], List[str]] | None = None,
58+
stream: Optional[Literal[False]] | Literal[True] = False,
59+
stream_options: Optional[ChatCompletionStreamOptionsParam] | None = None,
60+
temperature: Optional[float] | None = None,
61+
tools: Iterable[ChatCompletionToolParam] | None = None,
62+
top_logprobs: Optional[int] | None = None,
63+
top_p: Optional[float] | None = None,
64+
repetition_penalty: Optional[float] | None = None,
65+
n: Optional[int] | None = None,
66+
tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
67+
response_format: completion_create_params.ResponseFormat | None = None,
68+
user: str | None = None,
69+
metadata: Dict[str, object] | None = None,
70+
extra_headers: Headers | None = None,
71+
extra_query: Query | None = None,
72+
extra_body: Body | None = None,
73+
timeout: float | httpx.Timeout | None = None,
7474
) -> BotChatCompletion | Stream[BotChatCompletionChunk]:
7575
return self._post(
7676
"/bots/chat/completions",
@@ -95,7 +95,7 @@ def create(
9595
"n": n,
9696
"tool_choice": tool_choice,
9797
"response_format": response_format,
98-
"metadata": metadata
98+
"metadata": metadata,
9999
},
100100
options=make_request_options(
101101
extra_headers=extra_headers,
@@ -120,33 +120,33 @@ def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
120120

121121
@async_with_sts_token
122122
async def create(
123-
self,
124-
*,
125-
messages: Iterable[ChatCompletionMessageParam],
126-
model: str,
127-
frequency_penalty: Optional[float] | None = None,
128-
function_call: completion_create_params.FunctionCall | None = None,
129-
logit_bias: Optional[Dict[str, int]] | None = None,
130-
logprobs: Optional[bool] | None = None,
131-
max_tokens: Optional[int] | None = None,
132-
presence_penalty: Optional[float] | None = None,
133-
stop: Union[Optional[str], List[str]] | None = None,
134-
stream: Optional[Literal[False]] | Literal[True] = False,
135-
stream_options: Optional[ChatCompletionStreamOptionsParam] | None = None,
136-
temperature: Optional[float] | None = None,
137-
tools: Iterable[ChatCompletionToolParam] | None = None,
138-
top_logprobs: Optional[int] | None = None,
139-
top_p: Optional[float] | None = None,
140-
repetition_penalty: Optional[float] | None = None,
141-
n: Optional[int] | None = None,
142-
tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
143-
response_format: completion_create_params.ResponseFormat | None = None,
144-
user: str | None = None,
145-
metadata: Dict[str, object] | None = None,
146-
extra_headers: Headers | None = None,
147-
extra_query: Query | None = None,
148-
extra_body: Body | None = None,
149-
timeout: float | httpx.Timeout | None = None,
123+
self,
124+
*,
125+
messages: Iterable[ChatCompletionMessageParam],
126+
model: str,
127+
frequency_penalty: Optional[float] | None = None,
128+
function_call: completion_create_params.FunctionCall | None = None,
129+
logit_bias: Optional[Dict[str, int]] | None = None,
130+
logprobs: Optional[bool] | None = None,
131+
max_tokens: Optional[int] | None = None,
132+
presence_penalty: Optional[float] | None = None,
133+
stop: Union[Optional[str], List[str]] | None = None,
134+
stream: Optional[Literal[False]] | Literal[True] = False,
135+
stream_options: Optional[ChatCompletionStreamOptionsParam] | None = None,
136+
temperature: Optional[float] | None = None,
137+
tools: Iterable[ChatCompletionToolParam] | None = None,
138+
top_logprobs: Optional[int] | None = None,
139+
top_p: Optional[float] | None = None,
140+
repetition_penalty: Optional[float] | None = None,
141+
n: Optional[int] | None = None,
142+
tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
143+
response_format: completion_create_params.ResponseFormat | None = None,
144+
user: str | None = None,
145+
metadata: Dict[str, object] | None = None,
146+
extra_headers: Headers | None = None,
147+
extra_query: Query | None = None,
148+
extra_body: Body | None = None,
149+
timeout: float | httpx.Timeout | None = None,
150150
) -> BotChatCompletion | AsyncStream[BotChatCompletionChunk]:
151151
return await self._post(
152152
"/bots/chat/completions",
@@ -171,7 +171,7 @@ async def create(
171171
"n": n,
172172
"tool_choice": tool_choice,
173173
"response_format": response_format,
174-
"metadata": metadata
174+
"metadata": metadata,
175175
},
176176
options=make_request_options(
177177
extra_headers=extra_headers,

0 commit comments

Comments
 (0)