Skip to content

Commit bbc110c

Browse files
committed
revert version bug
1 parent ac2caf4 commit bbc110c

File tree

3 files changed

+348
-2
lines changed

3 files changed

+348
-2
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "veadk-python"
3-
version = "0.2.6"
3+
version = "0.2.5"
44
description = "Volcengine agent development kit, integrations with Volcengine cloud services."
55
readme = "README.md"
66
requires-python = ">=3.10"

veadk/models/ark_model.py

Lines changed: 346 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,346 @@
1+
import json
2+
from typing import AsyncGenerator, Dict, List, Optional, Union
3+
4+
from google.adk.models import BaseLlm
5+
from google.adk.models.llm_request import LlmRequest
6+
from google.adk.models.llm_response import LlmResponse
7+
from google.genai import types
8+
from typing_extensions import override
9+
from volcenginesdkarkruntime import Ark
10+
from volcenginesdkarkruntime.types.chat import (
11+
ChatCompletion,
12+
ChatCompletionMessage,
13+
ChatCompletionMessageParam,
14+
ChatCompletionMessageToolCallParam,
15+
ChatCompletionSystemMessageParam,
16+
ChatCompletionToolMessageParam,
17+
ChatCompletionUserMessageParam,
18+
)
19+
from volcenginesdkarkruntime.types.chat.chat_completion_content_part_param import (
20+
ChatCompletionContentPartParam,
21+
ChatCompletionContentPartTextParam,
22+
)
23+
24+
from veadk.utils.misc import safe_json_serialize
25+
26+
27+
def _get_content(
28+
parts: list[types.Part],
29+
) -> ChatCompletionContentPartParam:
30+
"""Converts a list of parts to ARK message content.
31+
32+
Args:
33+
parts: The parts to convert.
34+
35+
Returns:
36+
The ARK message content.
37+
"""
38+
39+
content_objects = []
40+
for part in parts:
41+
if part.text:
42+
if len(parts) == 1:
43+
return part.text
44+
content_objects.append(
45+
ChatCompletionContentPartTextParam(type="text", text=part.text)
46+
)
47+
# elif part.inline_data and part.inline_data.data and part.inline_data.mime_type:
48+
# base64_string = base64.b64encode(part.inline_data.data).decode("utf-8")
49+
# data_uri = f"data:{part.inline_data.mime_type};base64,{base64_string}"
50+
51+
# if part.inline_data.mime_type.startswith("image"):
52+
# # Use full MIME type (e.g., "image/png") for providers that validate it
53+
# format_type = part.inline_data.mime_type
54+
# content_objects.append(
55+
# ChatCompletionContentPartImageParam(
56+
# type="image_url", image_url=data_uri
57+
# )
58+
# )
59+
# else:
60+
# raise ValueError("LiteLlm(BaseLlm) does not support this content part.")
61+
62+
return content_objects
63+
64+
65+
def _content_to_ark_message(
66+
content: types.Content,
67+
) -> Union[ChatCompletionMessageParam, list[ChatCompletionMessageParam]]:
68+
tool_messages = []
69+
for part in content.parts or []:
70+
if part.function_response:
71+
tool_messages.append(
72+
ChatCompletionToolMessageParam(
73+
role="tool",
74+
tool_call_id=part.function_response.id or "",
75+
content=safe_json_serialize(part.function_response.response),
76+
)
77+
)
78+
if tool_messages:
79+
return tool_messages if len(tool_messages) > 1 else tool_messages[0]
80+
81+
message_content = _get_content(content.parts or [])
82+
role = content.role if content.role == "user" else "assistant"
83+
84+
if content.role == "user":
85+
return ChatCompletionUserMessageParam(role="user", content=message_content)
86+
else: # assistant/model
87+
tool_calls = []
88+
content_present = False
89+
for part in content.parts or []:
90+
if part.function_call:
91+
function_call: ChatCompletionMessageToolCallParam = {
92+
"id": part.function_call.id or "",
93+
"type": "function",
94+
"function": {
95+
"name": part.function_call.name,
96+
"arguments": safe_json_serialize(part.function_call.args),
97+
},
98+
}
99+
tool_calls.append(function_call)
100+
elif part.text or part.inline_data:
101+
content_present = True
102+
103+
final_content = message_content if content_present else None
104+
105+
# ChatCompletionAssistantMessageParam
106+
return {
107+
"role": role,
108+
"content": final_content,
109+
"tool_calls": tool_calls or None,
110+
}
111+
112+
113+
def _message_to_generate_content_response(
114+
message: ChatCompletionMessage, is_partial: bool = False
115+
) -> LlmResponse:
116+
"""Converts a litellm message to LlmResponse.
117+
118+
Args:
119+
message: The message to convert.
120+
is_partial: Whether the message is partial.
121+
122+
Returns:
123+
The LlmResponse.
124+
"""
125+
126+
parts = []
127+
if message.content:
128+
parts.append(types.Part.from_text(text=message.content))
129+
130+
if message.tool_calls:
131+
for tool_call in message.tool_calls:
132+
if tool_call.type == "function":
133+
part = types.Part.from_function_call(
134+
name=tool_call.function.name,
135+
args=json.loads(tool_call.function.arguments or "{}"),
136+
)
137+
part.function_call.id = tool_call.id
138+
parts.append(part)
139+
140+
return LlmResponse(
141+
content=types.Content(role="model", parts=parts), partial=is_partial
142+
)
143+
144+
145+
def _model_response_to_generate_content_response(
146+
response: ChatCompletion,
147+
) -> LlmResponse:
148+
"""Converts an ARK response to LlmResponse. Also adds usage metadata.
149+
150+
Args:
151+
response: The model response.
152+
153+
Returns:
154+
The LlmResponse.
155+
"""
156+
157+
message = None
158+
if response.choices:
159+
message = response.choices[0].message
160+
161+
if not message:
162+
raise ValueError("No message in response")
163+
164+
llm_response = _message_to_generate_content_response(message)
165+
if response.usage:
166+
llm_response.usage_metadata = types.GenerateContentResponseUsageMetadata(
167+
prompt_token_count=response.usage.prompt_tokens,
168+
candidates_token_count=response.usage.completion_tokens,
169+
total_token_count=response.usage.total_tokens,
170+
)
171+
return llm_response
172+
173+
174+
def _schema_to_dict(schema: types.Schema) -> dict:
175+
"""
176+
Recursively converts a types.Schema to a pure-python dict
177+
with all enum values written as lower-case strings.
178+
179+
Args:
180+
schema: The schema to convert.
181+
182+
Returns:
183+
The dictionary representation of the schema.
184+
"""
185+
# Dump without json encoding so we still get Enum members
186+
schema_dict = schema.model_dump(exclude_none=True)
187+
188+
# ---- normalise this level ------------------------------------------------
189+
if "type" in schema_dict:
190+
# schema_dict["type"] can be an Enum or a str
191+
t = schema_dict["type"]
192+
schema_dict["type"] = (t.value if isinstance(t, types.Type) else t).lower()
193+
194+
# ---- recurse into `items` -----------------------------------------------
195+
if "items" in schema_dict:
196+
schema_dict["items"] = _schema_to_dict(
197+
schema.items
198+
if isinstance(schema.items, types.Schema)
199+
else types.Schema.model_validate(schema_dict["items"])
200+
)
201+
202+
# ---- recurse into `properties` ------------------------------------------
203+
if "properties" in schema_dict:
204+
new_props = {}
205+
for key, value in schema_dict["properties"].items():
206+
# value is a dict → rebuild a Schema object and recurse
207+
if isinstance(value, dict):
208+
new_props[key] = _schema_to_dict(types.Schema.model_validate(value))
209+
# value is already a Schema instance
210+
elif isinstance(value, types.Schema):
211+
new_props[key] = _schema_to_dict(value)
212+
# plain dict without nested schemas
213+
else:
214+
new_props[key] = value
215+
if "type" in new_props[key]:
216+
new_props[key]["type"] = new_props[key]["type"].lower()
217+
schema_dict["properties"] = new_props
218+
219+
return schema_dict
220+
221+
222+
def _function_declaration_to_tool_param(
223+
function_declaration: types.FunctionDeclaration,
224+
) -> dict:
225+
"""Converts a types.FunctionDeclaration to a openapi spec dictionary.
226+
227+
Args:
228+
function_declaration: The function declaration to convert.
229+
230+
Returns:
231+
The openapi spec dictionary representation of the function declaration.
232+
"""
233+
234+
assert function_declaration.name
235+
236+
properties = {}
237+
if function_declaration.parameters and function_declaration.parameters.properties:
238+
for key, value in function_declaration.parameters.properties.items():
239+
properties[key] = _schema_to_dict(value)
240+
241+
tool_params = {
242+
"type": "function",
243+
"function": {
244+
"name": function_declaration.name,
245+
"description": function_declaration.description or "",
246+
"parameters": {
247+
"type": "object",
248+
"properties": properties,
249+
},
250+
},
251+
}
252+
253+
if function_declaration.parameters.required:
254+
tool_params["function"]["parameters"]["required"] = (
255+
function_declaration.parameters.required
256+
)
257+
258+
return tool_params
259+
260+
261+
def _build_tools(
262+
llm_request: LlmRequest,
263+
) -> List[Dict]:
264+
"""Converts an LlmRequest to ARK inputs and extracts generation params.
265+
266+
Args:
267+
llm_request: The LlmRequest to convert.
268+
269+
Returns:
270+
The ARK inputs (message list, tool dictionary, response format and generation params).
271+
"""
272+
# 2. Convert tool declarations
273+
tools: Optional[List[Dict]] = None
274+
if (
275+
llm_request.config
276+
and llm_request.config.tools
277+
and llm_request.config.tools[0].function_declarations
278+
):
279+
tools = [
280+
_function_declaration_to_tool_param(tool)
281+
for tool in llm_request.config.tools[0].function_declarations
282+
]
283+
284+
return tools
285+
286+
287+
class ArkLLM(BaseLlm):
288+
def __init__(self, model_name: str, api_key: str, **kwargs):
289+
"""Initializes the ArkLLM class.
290+
291+
Args:
292+
model_name: The name of the ArkLLM model.
293+
**kwargs: Additional arguments to pass to the litellm completion api.
294+
"""
295+
super().__init__(model=model_name, **kwargs)
296+
297+
self._ark_client = Ark(api_key=api_key)
298+
self._enable_responses_api = False
299+
300+
# async def _generate_with_responses_api(
301+
# self, llm_request: LlmRequest
302+
# ) -> AsyncGenerator[LlmResponse, None]:
303+
# pass
304+
305+
async def generate_content_async(
306+
self, llm_request: LlmRequest, stream: bool = False
307+
) -> AsyncGenerator[LlmResponse, None]:
308+
"""Generates content asynchronously.
309+
310+
Args:
311+
llm_request: LlmRequest, the request to send to the LiteLlm model.
312+
stream: bool = False, whether to do streaming call.
313+
314+
Yields:
315+
LlmResponse: The model response.
316+
"""
317+
messages: list[ChatCompletionMessageParam] = []
318+
messages.append(
319+
ChatCompletionSystemMessageParam(
320+
content=[
321+
{"type": "text", "text": str(llm_request.config.system_instruction)}
322+
],
323+
role="system",
324+
)
325+
)
326+
for content in llm_request.contents:
327+
messages.append(_content_to_ark_message(content))
328+
329+
tools = _build_tools(llm_request=llm_request)
330+
331+
response: ChatCompletion = self._ark_client.chat.completions.create(
332+
messages=messages, model=self.model, tools=tools
333+
)
334+
335+
yield _model_response_to_generate_content_response(response)
336+
337+
@classmethod
338+
@override
339+
def supported_models(cls) -> list[str]:
340+
"""Provides the list of supported models.
341+
342+
Returns:
343+
A list of supported models.
344+
"""
345+
346+
return []

veadk/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
VERSION = "0.2.6"
15+
VERSION = "0.2.5"

0 commit comments

Comments
 (0)