Skip to content

Commit 208285e

Browse files
committed
add test
1 parent a8a676a commit 208285e

File tree

1 file changed

+152
-0
lines changed

1 file changed

+152
-0
lines changed
Lines changed: 152 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,152 @@
1+
import os
2+
import json
3+
import time
4+
from openai import OpenAI
5+
6+
# ==========================================
7+
# 1. 逻辑定义 (Old vs New)
8+
# ==========================================
9+
10+
def old_wrap_thinking_by_reasoning_content(delta_dict: dict, is_reasoning: bool) -> tuple[str, bool]:
11+
"""[OLD] PR 修改前的逻辑"""
12+
content = delta_dict.get("content") or ""
13+
reasoning_content = delta_dict.get("reasoning_content")
14+
output = content
15+
16+
if reasoning_content:
17+
if not is_reasoning:
18+
output = "<think>\n" + reasoning_content
19+
is_reasoning = True
20+
else:
21+
output = reasoning_content
22+
else:
23+
# 旧逻辑缺陷
24+
if is_reasoning and content:
25+
output = "\n</think>" + content
26+
is_reasoning = False
27+
28+
return output, is_reasoning
29+
30+
def new_wrap_thinking_by_reasoning_content(delta_dict: dict, is_reasoning: bool) -> tuple[str, bool]:
31+
"""[NEW] PR 修改后的逻辑"""
32+
content = delta_dict.get("content") or ""
33+
reasoning_content = delta_dict.get("reasoning_content")
34+
output = content
35+
36+
if reasoning_content:
37+
if not is_reasoning:
38+
output = "<think>\n" + reasoning_content
39+
is_reasoning = True
40+
else:
41+
output = reasoning_content
42+
else:
43+
# 新逻辑
44+
if is_reasoning:
45+
is_reasoning = False
46+
if not reasoning_content:
47+
output = "\n</think>"
48+
if content:
49+
output += content
50+
51+
return output, is_reasoning
52+
53+
def get_reasoning_from_chunk(delta) -> str | None:
54+
val = getattr(delta, "reasoning_content", None)
55+
if val is not None: return val
56+
if hasattr(delta, "model_extra") and delta.model_extra:
57+
return delta.model_extra.get("reasoning_content")
58+
if hasattr(delta, "__dict__"):
59+
return delta.__dict__.get("reasoning_content")
60+
return None
61+
62+
def mock_weather_tool(city: str):
63+
return json.dumps({"city": city, "weather": "Sunny", "temperature": "25°C", "humidity": "40%"})
64+
65+
66+
def main():
67+
api_key = "your_api_key"
68+
base_url = "your_base_url"
69+
model = "your_model"
70+
client = OpenAI(api_key=api_key, base_url=base_url)
71+
72+
73+
# --- Round 1 ---
74+
msgs = [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "北京天气如何?"}]
75+
tools = [{"type": "function", "function": {"name": "get_weather", "parameters": {"type": "object", "properties": {"city": {"type": "string"}}, "required": ["city"]}}}]
76+
77+
print("\n[Request 1] ...")
78+
r1_deltas = []
79+
r1_reasoning = ""
80+
tool_calls = []
81+
82+
try:
83+
for chunk in client.chat.completions.create(model=model, messages=msgs, tools=tools, stream=True,extra_body={"thinking": {"type": "enabled"}}):
84+
if not chunk.choices: continue
85+
d = chunk.choices[0].delta
86+
r1_deltas.append(d)
87+
88+
rc = get_reasoning_from_chunk(d)
89+
if rc: r1_reasoning += rc
90+
if d.tool_calls:
91+
for tc in d.tool_calls:
92+
if len(tool_calls) <= tc.index:
93+
tool_calls.append({"id": tc.id, "function": {"name": tc.function.name, "arguments": ""}})
94+
tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
95+
except Exception as e:
96+
print(e); return
97+
98+
print(f"R1 Done. Tool Calls: {len(tool_calls)}")
99+
100+
# --- Tool Exec ---
101+
msgs.append({"role": "assistant", "tool_calls": [{"id": t["id"], "type": "function", "function": t["function"]} for t in tool_calls], "reasoning_content": r1_reasoning})
102+
for t in tool_calls:
103+
msgs.append({"role": "tool", "tool_call_id": t["id"], "content": mock_weather_tool("Beijing")})
104+
105+
# --- Round 2 ---
106+
print("\n[Request 2] ...")
107+
r2_deltas = []
108+
try:
109+
for chunk in client.chat.completions.create(model=model, messages=msgs, tools=tools, stream=True,extra_body={"thinking": {"type": "enabled"}}):
110+
if not chunk.choices: continue
111+
r2_deltas.append(chunk.choices[0].delta)
112+
except Exception as e:
113+
print(e); return
114+
115+
print("R2 Done.")
116+
117+
# --- Contrast ---
118+
print("\n" + "="*80)
119+
print("OUTPUT VISUALIZATION")
120+
print("="*80)
121+
122+
for label, proc_func in [("OLD_LOGIC", old_wrap_thinking_by_reasoning_content), ("NEW_LOGIC", new_wrap_thinking_by_reasoning_content)]:
123+
print(f"\n>>> Mode: {label} <<<")
124+
125+
# Simulate Dify Internal: Reset is_reasoning per Invoke
126+
127+
# 1. Generate R1 Output
128+
is_reasoning = False
129+
r1_text = ""
130+
for d in r1_deltas:
131+
# Reconstruct dict
132+
dct = {"content": d.content, "reasoning_content": get_reasoning_from_chunk(d)}
133+
out, is_reasoning = proc_func(dct, is_reasoning)
134+
r1_text += out
135+
136+
# 2. Generate R2 Output
137+
is_reasoning = False # Reset for new request
138+
r2_text = ""
139+
for d in r2_deltas:
140+
dct = {"content": d.content, "reasoning_content": get_reasoning_from_chunk(d)}
141+
out, is_reasoning = proc_func(dct, is_reasoning)
142+
r2_text += out
143+
144+
# Final Visual Check
145+
print(f"\n--- Human Readability ({label}) ---")
146+
print(f"AI: {r1_text}")
147+
print(f"[System: Tool Result used...]")
148+
print(f"AI: {r2_text}")
149+
print("\n" + "="*80)
150+
151+
if __name__ == "__main__":
152+
main()

0 commit comments

Comments
 (0)