Skip to content

Commit 2665a6e

Browse files
authored
一些兼容 pydantic<3,>=1.9.0 的代码, 报文处理的业务 (#25)
一些兼容 pydantic<3,>=1.9.0 的代码, 报文处理的业务请求参数和响应参数可通过配置扩充 兼容了一些参数 top_p:1,temperture:0(do_sample重写false,参数top_p temperture不生效) 图像理解部分, image_url参数base64内容包含 data:image/jpeg;base64兼容 删除jwt认证逻辑
1 parent 2a2943e commit 2665a6e

38 files changed

+2100
-130
lines changed

README.md

Lines changed: 117 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,14 @@ pip install zhipuai
2929
- 开放平台[接口文档](https://open.bigmodel.cn/dev/api)以及[使用指南](https://open.bigmodel.cn/dev/howuse/)中有更多的 demo 示例,请在 demo 中使用自己的 ApiKey 进行测试。
3030

3131
### 创建Client
32+
sdk支持通过环境变量配置APIKey
33+
- env
3234

35+
`ZHIPUAI_API_KEY`: 您的APIKey
36+
37+
`ZHIPUAI_BASE_URL`: 您的API地址
38+
39+
- 也支持通过代码传入APIKey
3340
```python
3441
from zhipuai import ZhipuAI
3542

@@ -41,19 +48,31 @@ client = ZhipuAI(
4148
### 同步调用
4249

4350
```python
44-
from zhipuai import ZhipuAI
45-
client = ZhipuAI(api_key="") # 填写您自己的APIKey
51+
from zhipuai import ZhipuAI
52+
53+
client = ZhipuAI() # 填写您自己的APIKey
4654
response = client.chat.completions.create(
47-
model="", # 填写需要调用的模型名称
48-
messages=[
49-
{"role": "user", "content": "你好"},
50-
{"role": "assistant", "content": "我是人工智能助手"},
51-
{"role": "user", "content": "你叫什么名字"},
52-
{"role": "assistant", "content": "我叫chatGLM"},
53-
{"role": "user", "content": "你都可以做些什么事"}
54-
],
55+
model="glm-4", # 填写需要调用的模型名称
56+
messages=[
57+
{"role": "user", "content": "作为一名营销专家,请为我的产品创作一个吸引人的slogan"},
58+
{"role": "assistant", "content": "当然,为了创作一个吸引人的slogan,请告诉我一些关于您产品的信息"},
59+
{"role": "user", "content": "智谱AI开放平台"},
60+
{"role": "assistant", "content": "智启未来,谱绘无限一智谱AI,让创新触手可及!"},
61+
{"role": "user", "content": "创造一个更精准、吸引人的slogan"}
62+
],
63+
tools=[
64+
{
65+
"type": "web_search",
66+
"web_search": {
67+
"search_query": "帮我看看清华的升学率",
68+
"search_result": True,
69+
}
70+
}
71+
],
72+
# 拓展参数
73+
extra_body={"temperature": 0.5, "max_tokens": 50},
5574
)
56-
print(response.choices[0].message)
75+
print(response)
5776
```
5877

5978
### SSE 调用
@@ -73,6 +92,82 @@ for chunk in response:
7392
print(chunk.choices[0].delta)
7493
```
7594

95+
### 多模态
96+
```python
97+
98+
99+
# Function to encode the image
100+
def encode_image(image_path):
101+
import base64
102+
with open(image_path, "rb") as image_file:
103+
return base64.b64encode(image_file.read()).decode('utf-8')
104+
105+
106+
def test_completions_vis():
107+
client = ZhipuAI() # 填写您自己的APIKey
108+
base64_image = encode_image("img/MetaGLM.png")
109+
response = client.chat.completions.create(
110+
model="glm-4v", # 填写需要调用的模型名称
111+
extra_body={"temperature": 0.5, "max_tokens": 50},
112+
messages=[
113+
{
114+
"role": "user",
115+
"content": [
116+
{
117+
"type": "text",
118+
"text": "图里有什么"
119+
},
120+
121+
# {
122+
# "type": "image_url",
123+
# "image_url": {
124+
# "url": "https://img1.baidu.com/it/u=1369931113,3388870256&fm=253&app=138&size=w931&n=0&f=JPEG&fmt=auto?sec=1703696400&t=f3028c7a1dca43a080aeb8239f09cc2f"
125+
# }
126+
# },
127+
{
128+
"type": "image_url",
129+
"image_url": {
130+
"url": f"data:image/jpeg;base64,{base64_image}"
131+
}
132+
}
133+
]
134+
}
135+
]
136+
)
137+
print(response)
138+
139+
test_completions_vis()
140+
```
141+
142+
### 角色扮演
143+
> 提供能力的模型名称,请从官网获取
144+
```python
145+
146+
def test_completions_charglm():
147+
client = ZhipuAI() # 请填写您自己的APIKey
148+
response = client.chat.completions.create(
149+
model="charglm-3", # 填写需要调用的模型名称
150+
messages=[
151+
{
152+
"role": "user",
153+
"content": "请问你在做什么"
154+
}
155+
],
156+
extra_body={
157+
"meta": {
158+
"user_info": "我是陆星辰,是一个男性,是一位知名导演,也是苏梦远的合作导演。我擅长拍摄音乐题材的电影。苏梦远对我的态度是尊敬的,并视我为良师益友。",
159+
"bot_info": "苏梦远,本名苏远心,是一位当红的国内女歌手及演员。在参加选秀节目后,凭借独特的嗓音及出众的舞台魅力迅速成名,进入娱乐圈。她外表美丽动人,但真正的魅力在于她的才华和勤奋。苏梦远是音乐学院毕业的优秀生,善于创作,拥有多首热门原创歌曲。除了音乐方面的成就,她还热衷于慈善事业,积极参加公益活动,用实际行动传递正能量。在工作中,她对待工作非常敬业,拍戏时总是全身心投入角色,赢得了业内人士的赞誉和粉丝的喜爱。虽然在娱乐圈,但她始终保持低调、谦逊的态度,深得同行尊重。在表达时,苏梦远喜欢使用“我们”和“一起”,强调团队精神。",
160+
"bot_name": "苏梦远",
161+
"user_name": "陆星辰"
162+
},
163+
}
164+
)
165+
print(response)
166+
test_completions_charglm()
167+
```
168+
169+
170+
76171
### 异常处理
77172

78173
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `openai.APIConnectionError` is raised.
@@ -116,3 +211,14 @@ Error codes are as followed:
116211
| >=500 | `InternalServerError` |
117212
| N/A | `APIConnectionError` |
118213

214+
215+
216+
### 更新日志
217+
218+
`2024-4-23`
219+
- 一些兼容 `pydantic<3,>=1.9.0 ` 的代码,
220+
- 报文处理的业务请求参数和响应参数可通过配置扩充
221+
- 兼容了一些参数 `top_p:1`,`temperture:0`(do_sample重写false,参数top_p temperture不生效)
222+
- 图像理解部分, image_url参数base64内容包含 `data:image/jpeg;base64`兼容
223+
- 删除jwt认证逻辑
224+

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
httpx>=0.23.0
2-
pydantic>=2.5.2
2+
pydantic<3,>=1.9.0
33
cachetools>=4.2.2
44
pyjwt~=2.8.0

tests/agent/glm3_agent.py

Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
"""
2+
This file is a modified version for ChatGLM3-6B the original glm3_agent.py file from the langchain repo.
3+
"""
4+
5+
import json
6+
import logging
7+
import typing
8+
from typing import Sequence, Optional, Union
9+
10+
import langchain_core.prompts
11+
import langchain_core.messages
12+
from langchain_core.runnables import Runnable, RunnablePassthrough
13+
from langchain.agents.agent import AgentOutputParser
14+
from langchain.agents.structured_chat.output_parser import StructuredChatOutputParser
15+
from langchain.prompts.chat import ChatPromptTemplate
16+
from langchain.output_parsers import OutputFixingParser
17+
from langchain.schema import AgentAction, AgentFinish, OutputParserException
18+
from langchain.schema.language_model import BaseLanguageModel
19+
from langchain.tools.base import BaseTool
20+
from pydantic.v1 import Field
21+
22+
from pydantic.v1.schema import model_schema
23+
24+
logger = logging.getLogger(__name__)
25+
26+
SYSTEM_PROMPT = "Answer the following questions as best as you can. You have access to the following tools:\n{tools}"
27+
HUMAN_MESSAGE = "Let's start! Human:{input}\n\n{agent_scratchpad}"
28+
29+
30+
class StructuredGLM3ChatOutputParser(AgentOutputParser):
31+
"""
32+
Output parser with retries for the structured chat agent.
33+
"""
34+
base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser)
35+
output_fixing_parser: Optional[OutputFixingParser] = None
36+
37+
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
38+
print(text)
39+
40+
special_tokens = ["Action:", "<|observation|>"]
41+
first_index = min([text.find(token) if token in text else len(text) for token in special_tokens])
42+
text = text[:first_index]
43+
44+
if "tool_call" in text:
45+
action_end = text.find("```")
46+
action = text[:action_end].strip()
47+
params_str_start = text.find("(") + 1
48+
params_str_end = text.rfind(")")
49+
params_str = text[params_str_start:params_str_end]
50+
51+
params_pairs = [param.split("=") for param in params_str.split(",") if "=" in param]
52+
params = {pair[0].strip(): pair[1].strip().strip("'\"") for pair in params_pairs}
53+
54+
action_json = {
55+
"action": action,
56+
"action_input": params
57+
}
58+
else:
59+
action_json = {
60+
"action": "Final Answer",
61+
"action_input": text
62+
}
63+
action_str = f"""
64+
Action:
65+
```
66+
{json.dumps(action_json, ensure_ascii=False)}
67+
```"""
68+
try:
69+
if self.output_fixing_parser is not None:
70+
parsed_obj: Union[
71+
AgentAction, AgentFinish
72+
] = self.output_fixing_parser.parse(action_str)
73+
else:
74+
parsed_obj = self.base_parser.parse(action_str)
75+
return parsed_obj
76+
except Exception as e:
77+
raise OutputParserException(f"Could not parse LLM output: {text}") from e
78+
79+
@property
80+
def _type(self) -> str:
81+
return "StructuredGLM3ChatOutputParser"
82+
83+
84+
def create_structured_glm3_chat_agent(
85+
llm: BaseLanguageModel, tools: Sequence[BaseTool]
86+
) -> Runnable:
87+
tools_json = []
88+
for tool in tools:
89+
tool_schema = model_schema(tool.args_schema) if tool.args_schema else {}
90+
description = tool.description.split(" - ")[
91+
1].strip() if tool.description and " - " in tool.description else tool.description
92+
parameters = {k: {sub_k: sub_v for sub_k, sub_v in v.items() if sub_k != 'title'} for k, v in
93+
tool_schema.get("properties", {}).items()}
94+
simplified_config_langchain = {
95+
"name": tool.name,
96+
"description": description,
97+
"parameters": parameters
98+
}
99+
tools_json.append(simplified_config_langchain)
100+
tools = "\n".join([str(tool) for tool in tools_json])
101+
102+
prompt = ChatPromptTemplate(
103+
input_variables=["input", "agent_scratchpad"],
104+
input_types={'chat_history': typing.List[typing.Union[
105+
langchain_core.messages.ai.AIMessage,
106+
langchain_core.messages.human.HumanMessage,
107+
langchain_core.messages.chat.ChatMessage,
108+
langchain_core.messages.system.SystemMessage,
109+
langchain_core.messages.function.FunctionMessage,
110+
langchain_core.messages.tool.ToolMessage]]
111+
},
112+
messages=[
113+
langchain_core.prompts.SystemMessagePromptTemplate(
114+
prompt=langchain_core.prompts.PromptTemplate(
115+
input_variables=['tools'],
116+
template=SYSTEM_PROMPT)
117+
),
118+
langchain_core.prompts.MessagesPlaceholder(
119+
variable_name='chat_history',
120+
optional=True
121+
),
122+
langchain_core.prompts.HumanMessagePromptTemplate(
123+
prompt=langchain_core.prompts.PromptTemplate(
124+
input_variables=['agent_scratchpad', 'input'],
125+
template=HUMAN_MESSAGE
126+
)
127+
)
128+
]
129+
130+
).partial(tools=tools)
131+
132+
llm_with_stop = llm.bind(stop=["<|observation|>"])
133+
agent = (
134+
RunnablePassthrough.assign(
135+
agent_scratchpad=lambda x: x["intermediate_steps"],
136+
)
137+
| prompt
138+
| llm_with_stop
139+
| StructuredGLM3ChatOutputParser()
140+
)
141+
return agent

tests/agent/test_agent.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
from langchain.agents import AgentExecutor, create_tool_calling_agent
2+
from langchain_community.chat_models import ChatZhipuAI
3+
from langchain_community.tools import ShellTool
4+
from langchain_community.tools.tavily_search import TavilySearchResults
5+
from langchain_core.prompts import ChatPromptTemplate
6+
7+
if __name__ == "__main__":
8+
tools = [ShellTool()]
9+
10+
prompt = ChatPromptTemplate.from_messages(
11+
[
12+
(
13+
"system",
14+
"You are a helpful assistant. Make sure to use the terminal tool for information.",
15+
),
16+
("placeholder", "{chat_history}"),
17+
("human", "{input}"),
18+
("placeholder", "{agent_scratchpad}"),
19+
]
20+
)
21+
22+
llm = ChatZhipuAI()
23+
# Construct the Tools agent
24+
agent = create_tool_calling_agent(llm, tools, prompt)
25+
26+
# Create an agent executor by passing in the agent and tools
27+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
28+
agent_executor.invoke({"input": "查看本地目录"})

0 commit comments

Comments
 (0)