Skip to content

Commit 1b79e56

Browse files
committed
fix bugs
1 parent 21afe0f commit 1b79e56

File tree

6 files changed

+67
-37
lines changed

6 files changed

+67
-37
lines changed

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "veadk-python"
3-
version = "0.2.8"
3+
version = "0.2.9"
44
description = "Volcengine agent development kit, integrations with Volcengine cloud services."
55
readme = "README.md"
66
requires-python = ">=3.10"
@@ -24,7 +24,7 @@ dependencies = [
2424
"openai<1.100", # For fix https://github.com/BerriAI/litellm/issues/13710
2525
"volcengine-python-sdk>=4.0.3", # For Volcengine API
2626
"volcengine>=1.0.193", # For Volcengine sign
27-
"agent-pilot-sdk>=0.0.9", # Prompt optimization by Volcengine AgentPilot/PromptPilot toolkits
27+
"agent-pilot-sdk>=0.1.2", # Prompt optimization by Volcengine AgentPilot/PromptPilot toolkits
2828
"fastmcp>=2.11.3", # For running MCP
2929
"cookiecutter>=2.6.0", # For cloud deploy
3030
"omegaconf>=2.3.0", # For agent builder

veadk/cli/cli_prompt.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,15 @@
2121
)
2222
@click.option("--feedback", default="", help="Suggestions for prompt optimization")
2323
@click.option("--api-key", default="", help="API Key of PromptPilot")
24+
@click.option("--workspace-id", default="", help="Workspace ID of PromptPilot")
2425
@click.option(
2526
"--model-name",
2627
default="doubao-1.5-pro-32k-250115",
2728
help="Model name for prompt optimization",
2829
)
29-
def prompt(path: str, feedback: str, api_key: str, model_name: str) -> None:
30+
def prompt(
31+
path: str, feedback: str, api_key: str, workspace_id: str, model_name: str
32+
) -> None:
3033
"""Optimize agent system prompt from a local file."""
3134
from pathlib import Path
3235

@@ -56,7 +59,11 @@ def prompt(path: str, feedback: str, api_key: str, model_name: str) -> None:
5659

5760
if not api_key:
5861
api_key = settings.prompt_pilot.api_key
59-
ve_prompt_pilot = VePromptPilot(api_key)
62+
63+
if not workspace_id:
64+
raise ValueError("Please provide workspace_id for PromptPilot service.")
65+
66+
ve_prompt_pilot = VePromptPilot(api_key=api_key, workspace_id=workspace_id)
6067
ve_prompt_pilot.optimize(
6168
agents=agents, feedback=feedback, model_name=model_name
6269
)

veadk/integrations/ve_prompt_pilot/ve_prompt_pilot.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616

1717
import agent_pilot as ap
1818
from agent_pilot.models import TaskType
19-
2019
from veadk import Agent
2120
from veadk.prompts import prompt_optimization
2221
from veadk.utils.logger import get_logger
@@ -26,9 +25,15 @@
2625

2726
class VePromptPilot:
2827
def __init__(
29-
self, api_key: str, path: str = "", task_id: str | None = None
28+
self,
29+
api_key: str,
30+
workspace_id: str,
31+
path: str = "",
32+
task_id: str | None = None,
3033
) -> None:
3134
self.api_key = api_key
35+
self.workspace_id = workspace_id
36+
3237
self.path = path
3338

3439
def optimize(
@@ -57,12 +62,13 @@ def optimize(
5762
usage = None
5863
for chunk in ap.generate_prompt_stream(
5964
task_description=task_description,
60-
current_prompt=agent.instruction,
65+
current_prompt=str(agent.instruction),
6166
model_name=model_name,
6267
task_type=TaskType.DIALOG,
6368
temperature=1.0,
6469
top_p=0.7,
6570
api_key=self.api_key,
71+
workspace_id=self.workspace_id,
6672
): # stream chunks of optimized prompt
6773
# Process each chunk as it arrives
6874
optimized_prompt += chunk.data.content if chunk.data else ""

veadk/integrations/ve_tls/ve_tls.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,9 @@ def __init__(
2828
region: str = "cn-beijing",
2929
):
3030
try:
31-
from veadk.integrations.ve_tls.utils import ve_tls_request
3231
from volcengine.tls.TLSService import TLSService
32+
33+
from veadk.integrations.ve_tls.utils import ve_tls_request
3334
except ImportError:
3435
raise ImportError(
3536
"Please install volcengine SDK before init VeTLS: pip install volcengine"
@@ -164,7 +165,7 @@ def create_tracing_instance(self, log_project_id: str, trace_instance_name: str)
164165
request_headers={"TraceTag": "veadk"},
165166
)
166167

167-
if res["ErrorCode"] == "TraceInsAlreadyExist":
168+
if "ErrorCode" in res and res["ErrorCode"] == "TopicAlreadyExist":
168169
logger.debug(
169170
f"Tracing instance '{trace_instance_name}' already exists. Check its ID."
170171
)

veadk/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
VERSION = "0.2.8"
15+
VERSION = "0.2.9"

veadk_tutorial.ipynb

Lines changed: 43 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -479,14 +479,18 @@
479479
]
480480
},
481481
{
482-
"metadata": {},
483482
"cell_type": "markdown",
484-
"source": "VeADK 还支持你将短期记忆持久化存储在云端,未来的某一时刻你可以加载历史对话。"
483+
"metadata": {},
484+
"source": [
485+
"VeADK 还支持你将短期记忆持久化存储在云端,未来的某一时刻你可以加载历史对话。"
486+
]
485487
},
486488
{
487-
"metadata": {},
488489
"cell_type": "markdown",
489-
"source": "使用 MySQL 作为短期记忆的数据库后端:"
490+
"metadata": {},
491+
"source": [
492+
"使用 MySQL 作为短期记忆的数据库后端:"
493+
]
490494
},
491495
{
492496
"cell_type": "code",
@@ -562,7 +566,9 @@
562566
"metadata": {
563567
"id": "k9wKEHeYxIUT"
564568
},
565-
"source": "如果您使用知识库、长期记忆等进阶功能,请进一步安装 veadk-python 中的扩展包:"
569+
"source": [
570+
"如果您使用知识库、长期记忆等进阶功能,请进一步安装 veadk-python 中的扩展包:"
571+
]
566572
},
567573
{
568574
"cell_type": "code",
@@ -571,7 +577,9 @@
571577
"id": "VZIeRU1QxHrk"
572578
},
573579
"outputs": [],
574-
"source": "%pip install veadk-python[extensions] --quiet"
580+
"source": [
581+
"%pip install veadk-python[extensions] --quiet"
582+
]
575583
},
576584
{
577585
"cell_type": "markdown",
@@ -601,9 +609,7 @@
601609
"# Embedding 配置(使用 OpenSearch 时,需要对文本进行向量化处理)\n",
602610
"# 设置访问火山方舟的 Embedding 模型\n",
603611
"os.environ[\"MODEL_EMBEDDING_NAME\"] = \"doubao-embedding-text-240715\"\n",
604-
"os.environ[\"MODEL_EMBEDDING_API_BASE\"] = (\n",
605-
" \"https://ark.cn-beijing.volces.com/api/v3/\"\n",
606-
")\n",
612+
"os.environ[\"MODEL_EMBEDDING_API_BASE\"] = \"https://ark.cn-beijing.volces.com/api/v3/\"\n",
607613
"os.environ[\"MODEL_EMBEDDING_DIM\"] = \"2560\"\n",
608614
"os.environ[\"MODEL_EMBEDDING_API_KEY\"] = \"\""
609615
]
@@ -637,7 +643,9 @@
637643
"\n",
638644
"# 初始化一个长期记忆,采用 OpenSearch 向量化存储\n",
639645
"# 长期记忆是跨 Session 的\n",
640-
"long_term_memory = LongTermMemory(backend=\"opensearch\", app_name=app_name, user_id=user_id)\n",
646+
"long_term_memory = LongTermMemory(\n",
647+
" backend=\"opensearch\", app_name=app_name, user_id=user_id\n",
648+
")\n",
641649
"\n",
642650
"agent = Agent(long_term_memory=long_term_memory)\n",
643651
"\n",
@@ -783,7 +791,9 @@
783791
"metadata": {
784792
"id": "_jZauBoRztaU"
785793
},
786-
"source": "如果您使用知识库、长期记忆等进阶功能,请进一步安装 veadk-python 中的扩展包:"
794+
"source": [
795+
"如果您使用知识库、长期记忆等进阶功能,请进一步安装 veadk-python 中的扩展包:"
796+
]
787797
},
788798
{
789799
"cell_type": "code",
@@ -792,7 +802,9 @@
792802
"id": "xuozqr1Hzwjz"
793803
},
794804
"outputs": [],
795-
"source": "%pip install veadk-python[extensions] --quiet"
805+
"source": [
806+
"%pip install veadk-python[extensions] --quiet"
807+
]
796808
},
797809
{
798810
"cell_type": "markdown",
@@ -821,9 +833,7 @@
821833
"\n",
822834
"# 设置访问火山方舟的 Embedding 模型\n",
823835
"os.environ[\"MODEL_EMBEDDING_NAME\"] = \"doubao-embedding-text-240715\"\n",
824-
"os.environ[\"MODEL_EMBEDDING_API_BASE\"] = (\n",
825-
" \"https://ark.cn-beijing.volces.com/api/v3/\"\n",
826-
")\n",
836+
"os.environ[\"MODEL_EMBEDDING_API_BASE\"] = \"https://ark.cn-beijing.volces.com/api/v3/\"\n",
827837
"os.environ[\"MODEL_EMBEDDING_DIM\"] = \"2560\"\n",
828838
"os.environ[\"MODEL_EMBEDDING_API_KEY\"] = \"\""
829839
]
@@ -892,7 +902,9 @@
892902
"session_id = \"veadk_playground_session\"\n",
893903
"\n",
894904
"\n",
895-
"knowledgebase = KnowledgeBase(backend=\"opensearch\", app_name=app_name) # 指定 opensearch 后端\n",
905+
"knowledgebase = KnowledgeBase(\n",
906+
" backend=\"opensearch\", app_name=app_name\n",
907+
") # 指定 opensearch 后端\n",
896908
"knowledgebase.add_from_files(files=[knowledgebase_file])\n",
897909
"\n",
898910
"agent = Agent(knowledgebase=knowledgebase)\n",
@@ -1594,8 +1606,8 @@
15941606
"</pre>\n"
15951607
],
15961608
"text/plain": [
1597-
"✨ You're running DeepEval's latest \u001B[38;2;106;0;255mBase Evaluation \u001B[0m\u001B[1;38;2;106;0;255m[\u001B[0m\u001B[38;2;106;0;255mGEval\u001B[0m\u001B[1;38;2;106;0;255m]\u001B[0m\u001B[38;2;106;0;255m Metric\u001B[0m! \u001B[1;38;2;55;65;81m(\u001B[0m\u001B[38;2;55;65;81musing \u001B[0m\u001B[3;38;2;55;65;81mNone\u001B[0m\u001B[38;2;55;65;81m \u001B[0m\u001B[1;38;2;55;65;81m(\u001B[0m\u001B[38;2;55;65;81mLocal Model\u001B[0m\u001B[1;38;2;55;65;81m)\u001B[0m\u001B[38;2;55;65;81m, \u001B[0m\u001B[38;2;55;65;81mstrict\u001B[0m\u001B[38;2;55;65;81m=\u001B[0m\u001B[3;38;2;55;65;81mFalse\u001B[0m\u001B[38;2;55;65;81m, \u001B[0m\n",
1598-
"\u001B[38;2;55;65;81masync_mode\u001B[0m\u001B[38;2;55;65;81m=\u001B[0m\u001B[3;38;2;55;65;81mTrue\u001B[0m\u001B[1;38;2;55;65;81m)\u001B[0m\u001B[38;2;55;65;81m...\u001B[0m\n"
1609+
"✨ You're running DeepEval's latest \u001b[38;2;106;0;255mBase Evaluation \u001b[0m\u001b[1;38;2;106;0;255m[\u001b[0m\u001b[38;2;106;0;255mGEval\u001b[0m\u001b[1;38;2;106;0;255m]\u001b[0m\u001b[38;2;106;0;255m Metric\u001b[0m! \u001b[1;38;2;55;65;81m(\u001b[0m\u001b[38;2;55;65;81musing \u001b[0m\u001b[3;38;2;55;65;81mNone\u001b[0m\u001b[38;2;55;65;81m \u001b[0m\u001b[1;38;2;55;65;81m(\u001b[0m\u001b[38;2;55;65;81mLocal Model\u001b[0m\u001b[1;38;2;55;65;81m)\u001b[0m\u001b[38;2;55;65;81m, \u001b[0m\u001b[38;2;55;65;81mstrict\u001b[0m\u001b[38;2;55;65;81m=\u001b[0m\u001b[3;38;2;55;65;81mFalse\u001b[0m\u001b[38;2;55;65;81m, \u001b[0m\n",
1610+
"\u001b[38;2;55;65;81masync_mode\u001b[0m\u001b[38;2;55;65;81m=\u001b[0m\u001b[3;38;2;55;65;81mTrue\u001b[0m\u001b[1;38;2;55;65;81m)\u001b[0m\u001b[38;2;55;65;81m...\u001b[0m\n"
15991611
]
16001612
},
16011613
"metadata": {},
@@ -1608,7 +1620,7 @@
16081620
"</pre>\n"
16091621
],
16101622
"text/plain": [
1611-
"✨ You're running DeepEval's latest \u001B[38;2;106;0;255mTool Correctness Metric\u001B[0m! \u001B[1;38;2;55;65;81m(\u001B[0m\u001B[38;2;55;65;81musing \u001B[0m\u001B[3;38;2;55;65;81mNone\u001B[0m\u001B[38;2;55;65;81m, \u001B[0m\u001B[38;2;55;65;81mstrict\u001B[0m\u001B[38;2;55;65;81m=\u001B[0m\u001B[3;38;2;55;65;81mFalse\u001B[0m\u001B[38;2;55;65;81m, \u001B[0m\u001B[38;2;55;65;81masync_mode\u001B[0m\u001B[38;2;55;65;81m=\u001B[0m\u001B[3;38;2;55;65;81mTrue\u001B[0m\u001B[1;38;2;55;65;81m)\u001B[0m\u001B[38;2;55;65;81m...\u001B[0m\n"
1623+
"✨ You're running DeepEval's latest \u001b[38;2;106;0;255mTool Correctness Metric\u001b[0m! \u001b[1;38;2;55;65;81m(\u001b[0m\u001b[38;2;55;65;81musing \u001b[0m\u001b[3;38;2;55;65;81mNone\u001b[0m\u001b[38;2;55;65;81m, \u001b[0m\u001b[38;2;55;65;81mstrict\u001b[0m\u001b[38;2;55;65;81m=\u001b[0m\u001b[3;38;2;55;65;81mFalse\u001b[0m\u001b[38;2;55;65;81m, \u001b[0m\u001b[38;2;55;65;81masync_mode\u001b[0m\u001b[38;2;55;65;81m=\u001b[0m\u001b[3;38;2;55;65;81mTrue\u001b[0m\u001b[1;38;2;55;65;81m)\u001b[0m\u001b[38;2;55;65;81m...\u001b[0m\n"
16121624
]
16131625
},
16141626
"metadata": {},
@@ -1689,14 +1701,14 @@
16891701
"text/plain": [
16901702
"\n",
16911703
"\n",
1692-
"\u001B[38;2;5;245;141m✓\u001B[0m Evaluation completed 🎉! \u001B[1m(\u001B[0mtime taken: \u001B[1;36m24.\u001B[0m09s | token cost: \u001B[1;36m0.0\u001B[0m USD\u001B[1m)\u001B[0m\n",
1693-
"» Test Results \u001B[1m(\u001B[0m\u001B[1;36m1\u001B[0m total tests\u001B[1m)\u001B[0m:\n",
1694-
" » Pass Rate: \u001B[1;36m100.0\u001B[0m% | Passed: \u001B[1;32m1\u001B[0m | Failed: \u001B[1;31m0\u001B[0m\n",
1704+
"\u001b[38;2;5;245;141m✓\u001b[0m Evaluation completed 🎉! \u001b[1m(\u001b[0mtime taken: \u001b[1;36m24.\u001b[0m09s | token cost: \u001b[1;36m0.0\u001b[0m USD\u001b[1m)\u001b[0m\n",
1705+
"» Test Results \u001b[1m(\u001b[0m\u001b[1;36m1\u001b[0m total tests\u001b[1m)\u001b[0m:\n",
1706+
" » Pass Rate: \u001b[1;36m100.0\u001b[0m% | Passed: \u001b[1;32m1\u001b[0m | Failed: \u001b[1;31m0\u001b[0m\n",
16951707
"\n",
16961708
" ================================================================================ \n",
16971709
"\n",
16981710
"» What to share evals with your team, or a place for your test cases to live? ❤️ 🏡\n",
1699-
" » Run \u001B[1;32m'deepeval view'\u001B[0m to analyze and save testing results on \u001B[38;2;106;0;255mConfident AI\u001B[0m.\n",
1711+
" » Run \u001b[1;32m'deepeval view'\u001b[0m to analyze and save testing results on \u001b[38;2;106;0;255mConfident AI\u001b[0m.\n",
17001712
"\n",
17011713
"\n"
17021714
]
@@ -1780,7 +1792,7 @@
17801792
"outputs": [],
17811793
"source": [
17821794
"# 安装火山引擎提供的依赖\n",
1783-
"%pip install agent-pilot-sdk>=0.0.9"
1795+
"%pip install agent-pilot-sdk>=0.1.2"
17841796
]
17851797
},
17861798
{
@@ -1789,7 +1801,7 @@
17891801
"id": "5IjI4lrHSZcD"
17901802
},
17911803
"source": [
1792-
"设置 KEY 来访问服务"
1804+
"您可以从 Prompt Pilot 产品[官方页面](https://promptpilot.volcengine.com/)获取 KEY 和 Workspace ID,在下方设置后访问服务"
17931805
]
17941806
},
17951807
{
@@ -1802,7 +1814,8 @@
18021814
"source": [
18031815
"import os\n",
18041816
"\n",
1805-
"os.environ[\"PROMPT_PILOT_API_KEY\"] = \"\""
1817+
"os.environ[\"PROMPT_PILOT_API_KEY\"] = \"\"\n",
1818+
"os.environ[\"PROMPT_PILOT_WORKSPACE_ID\"] = \"\""
18061819
]
18071820
},
18081821
{
@@ -1871,7 +1884,10 @@
18711884
"source": [
18721885
"from veadk.integrations.ve_prompt_pilot.ve_prompt_pilot import VePromptPilot\n",
18731886
"\n",
1874-
"prompt_pilot = VePromptPilot(api_key=os.getenv(\"PROMPT_PILOT_API_KEY\"))\n",
1887+
"prompt_pilot = VePromptPilot(\n",
1888+
" api_key=os.getenv(\"PROMPT_PILOT_API_KEY\"),\n",
1889+
" workspace_id=os.getenv(\"PROMPT_PILOT_WORKSPACE_ID\"),\n",
1890+
")\n",
18751891
"\n",
18761892
"refined_prompt = prompt_pilot.optimize(agents=[agent])"
18771893
]

0 commit comments

Comments
 (0)