Skip to content

Commit 95bab24

Browse files
committed
Merge 'integration_2024-08-08_369120306946' into 'master'
merge branch integration_2024-08-08_369120306946 into master See merge request: !361
2 parents 5a6662d + ec3ed6a commit 95bab24

File tree

141 files changed

+24893
-81
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

141 files changed

+24893
-81
lines changed

meta.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
{
2-
"lasted": "1.0.94",
3-
"meta_commit": "92cfd1814f057ab80f1ec26a43284b1038a2771f"
2+
"lasted": "1.0.95",
3+
"meta_commit": "b75db8c6be327ebe7c1c1a161814a0d99c097ba1"
44
}

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from setuptools import setup, find_packages # noqa: H301
44

55
NAME = "volcengine-python-sdk"
6-
VERSION = "1.0.94"
6+
VERSION = "1.0.95"
77
# To install the library, run the following
88
#
99
# python setup.py install

volcenginesdkarkruntime/_client.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ class Ark(SyncAPIClient):
3434
chat: resources.Chat
3535
bot_chat: resources.BotChat
3636
embeddings: resources.Embeddings
37+
tokenization: resources.Tokenization
3738

3839
def __init__(
3940
self,
@@ -88,7 +89,7 @@ def __init__(
8889
self.chat = resources.Chat(self)
8990
self.bot_chat = resources.BotChat(self)
9091
self.embeddings = resources.Embeddings(self)
91-
# self.tokenization = resources.Tokenization(self)
92+
self.tokenization = resources.Tokenization(self)
9293
# self.classification = resources.Classification(self)
9394

9495
def _get_endpoint_sts_token(self, endpoint_id: str):
@@ -115,6 +116,7 @@ class AsyncArk(AsyncAPIClient):
115116
chat: resources.AsyncChat
116117
bot_chat: resources.AsyncBotChat
117118
embeddings: resources.AsyncEmbeddings
119+
tokenization: resources.AsyncTokenization
118120

119121
def __init__(
120122
self,
@@ -168,7 +170,7 @@ def __init__(
168170
self.chat = resources.AsyncChat(self)
169171
self.bot_chat = resources.AsyncBotChat(self)
170172
self.embeddings = resources.AsyncEmbeddings(self)
171-
# self.tokenization = resources.AsyncTokenization(self)
173+
self.tokenization = resources.AsyncTokenization(self)
172174
# self.classification = resources.AsyncClassification(self)
173175

174176
def _get_endpoint_sts_token(self, endpoint_id: str):

volcenginesdkarkruntime/_constants.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,15 @@
22

33
import httpx
44

5-
VERSION = ""
5+
VERSION = "1.0.0"
66
BASE_URL = "https://ark.cn-beijing.volces.com/api/v3"
77

88
RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response"
99
CLIENT_REQUEST_HEADER = "X-Client-Request-Id"
1010
SERVER_REQUEST_HEADER = "X-Request-Id"
1111

1212
# default timeout is 1 minutes
13-
DEFAULT_TIMEOUT = httpx.Timeout(timeout=60.0, connect=60.0)
13+
DEFAULT_TIMEOUT = httpx.Timeout(timeout=600.0, connect=60.0)
1414
DEFAULT_MAX_RETRIES = 2
1515
DEFAULT_CONNECTION_LIMITS = httpx.Limits(
1616
max_connections=1000, max_keepalive_connections=100

volcenginesdkarkruntime/_utils/_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ async def wrapper(*args, **kwargs):
7676

7777
def _insert_sts_token(args, kwargs):
7878
assert len(args) > 0
79-
assert "model" in kwargs
79+
assert "model" in kwargs, "you need to support model"
8080

8181
ark_client = args[0]._client
8282
model = kwargs.get("model", "")

volcenginesdkarkruntime/resources/chat/completions.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
completion_create_params,
2626
ChatCompletionStreamOptionsParam,
2727
ChatCompletionToolParam,
28+
ChatCompletionToolChoiceOptionParam
2829
)
2930

3031
__all__ = ["Completions", "AsyncCompletions"]
@@ -47,7 +48,6 @@ def create(
4748
model: str,
4849
frequency_penalty: Optional[float] | None = None,
4950
function_call: completion_create_params.FunctionCall | None = None,
50-
functions: Iterable[completion_create_params.Function] | None = None,
5151
logit_bias: Optional[Dict[str, int]] | None = None,
5252
logprobs: Optional[bool] | None = None,
5353
max_tokens: Optional[int] | None = None,
@@ -59,6 +59,10 @@ def create(
5959
tools: Iterable[ChatCompletionToolParam] | None = None,
6060
top_logprobs: Optional[int] | None = None,
6161
top_p: Optional[float] | None = None,
62+
repetition_penalty: Optional[float] | None = None,
63+
n: Optional[int] | None = None,
64+
tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
65+
response_format: completion_create_params.ResponseFormat | None = None,
6266
user: str | None = None,
6367
extra_headers: Headers | None = None,
6468
extra_query: Query | None = None,
@@ -72,7 +76,6 @@ def create(
7276
"model": model,
7377
"frequency_penalty": frequency_penalty,
7478
"function_call": function_call,
75-
"functions": functions,
7679
"logit_bias": logit_bias,
7780
"logprobs": logprobs,
7881
"max_tokens": max_tokens,
@@ -85,6 +88,10 @@ def create(
8588
"top_logprobs": top_logprobs,
8689
"top_p": top_p,
8790
"user": user,
91+
"repetition_penalty": repetition_penalty,
92+
"n": n,
93+
"tool_choice": tool_choice,
94+
"response_format": response_format,
8895
},
8996
options=make_request_options(
9097
extra_headers=extra_headers,
@@ -115,7 +122,6 @@ async def create(
115122
model: str,
116123
frequency_penalty: Optional[float] | None = None,
117124
function_call: completion_create_params.FunctionCall | None = None,
118-
functions: Iterable[completion_create_params.Function] | None = None,
119125
logit_bias: Optional[Dict[str, int]] | None = None,
120126
logprobs: Optional[bool] | None = None,
121127
max_tokens: Optional[int] | None = None,
@@ -128,6 +134,10 @@ async def create(
128134
top_logprobs: Optional[int] | None = None,
129135
top_p: Optional[float] | None = None,
130136
user: str | None = None,
137+
repetition_penalty: Optional[float] | None = None,
138+
n: Optional[int] | None = None,
139+
tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
140+
response_format: completion_create_params.ResponseFormat | None = None,
131141
extra_headers: Headers | None = None,
132142
extra_query: Query | None = None,
133143
extra_body: Body | None = None,
@@ -140,7 +150,6 @@ async def create(
140150
"model": model,
141151
"frequency_penalty": frequency_penalty,
142152
"function_call": function_call,
143-
"functions": functions,
144153
"logit_bias": logit_bias,
145154
"logprobs": logprobs,
146155
"max_tokens": max_tokens,
@@ -153,6 +162,10 @@ async def create(
153162
"top_logprobs": top_logprobs,
154163
"top_p": top_p,
155164
"user": user,
165+
"repetition_penalty": repetition_penalty,
166+
"n": n,
167+
"tool_choice": tool_choice,
168+
"response_format": response_format,
156169
},
157170
options=make_request_options(
158171
extra_headers=extra_headers,

volcenginesdkarkruntime/resources/tokenization.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,8 @@ def with_raw_response(self) -> TokenizationWithRawResponse:
2727
def create(
2828
self,
2929
*,
30-
input: Union[str, List[str]],
30+
text: Union[str, List[str]],
3131
model: str,
32-
encoding_format: Literal["float", "base64"] = "float",
3332
user: str | None = None,
3433
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
3534
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -41,10 +40,9 @@ def create(
4140
return self._post(
4241
"/tokenization",
4342
body={
44-
"input": input,
43+
"text": text,
4544
"model": model,
4645
"user": user,
47-
"encoding_format": encoding_format,
4846
},
4947
options=make_request_options(
5048
extra_headers=extra_headers,
@@ -65,9 +63,8 @@ def with_raw_response(self) -> AsyncTokenizationWithRawResponse:
6563
async def create(
6664
self,
6765
*,
68-
input: Union[str, List[str]],
66+
text: Union[str, List[str]],
6967
model: str,
70-
encoding_format: Literal["float", "base64"] = "float",
7168
user: str | None = None,
7269
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
7370
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -79,10 +76,9 @@ async def create(
7976
return await self._post(
8077
"/tokenization",
8178
body={
82-
"input": input,
79+
"text": text,
8380
"model": model,
8481
"user": user,
85-
"encoding_format": encoding_format,
8682
},
8783
options=make_request_options(
8884
extra_headers=extra_headers,

volcenginesdkarkruntime/types/chat/chat_completion_tool_choice_option_param.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,5 +8,5 @@
88
__all__ = ["ChatCompletionToolChoiceOptionParam"]
99

1010
ChatCompletionToolChoiceOptionParam = Union[
11-
Literal["none", "auto"], ChatCompletionNamedToolChoiceParam
11+
Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam
1212
]

volcenginesdkarkruntime/types/chat/completion_create_params.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -209,6 +209,13 @@ class ResponseFormat(TypedDict, total=False):
209209
type: Literal["text", "json_object"]
210210
"""Must be one of `text` or `json_object`."""
211211

212+
schema: Optional[Dict[str, object]]
213+
"""If the request only specifies type=`json_object` and no schema is specified, refer to the openai behavior,
214+
the model outputs an arbitrary json object (depending on the user's instruction in the user prompt/system prompt)
215+
216+
Even if the schema is specified, still need to specify the expected json format in user prompt/system prompt
217+
"""
218+
212219

213220
class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase):
214221
stream: Optional[Literal[False]]

volcenginesdkarkruntime/types/create_embedding_response.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,12 @@ class Usage(BaseModel):
1616

1717

1818
class CreateEmbeddingResponse(BaseModel):
19+
id: str
20+
"""A unique identifier for the embeddings."""
21+
22+
created: int
23+
"""The Unix timestamp (in seconds) of when the embeddings was created."""
24+
1925
data: List[Embedding]
2026
"""The list of embeddings generated by the model."""
2127

0 commit comments

Comments
 (0)