Skip to content

Commit 724e1de

Browse files
authored
Merge branch 'main' into dependabot/pip/models/gemini/protobuf-6.33.5
2 parents 3f5475a + 303d1bb commit 724e1de

File tree

234 files changed

+5101
-1356
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

234 files changed

+5101
-1356
lines changed

.gitignore

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,3 +188,8 @@ pyrightconfig.json
188188
mypy.ini
189189
tox.ini
190190
*.difypkg
191+
models/openai_api_compatible/PR_DESCRIPTION.md
192+
193+
# local venv
194+
models/volcengine/.venv/
195+
models/volcengine/uv.lock

agent-strategies/cot_agent/manifest.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
version: 0.0.31
1+
version: 0.0.32
22
type: plugin
33
author: "langgenius"
44
name: "agent"

agent-strategies/cot_agent/output_parser/cot_output_parser.py

Lines changed: 57 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,9 @@
77
from dify_plugin.interfaces.agent import AgentScratchpadUnit
88

99
PREFIX_DELIMITERS = frozenset({"\n", " ", ""})
10+
# Tags injected by Gemini when include_thoughts=True; stripped so ReAct sees only Thought:/Action:/FinalAnswer:
11+
THINK_START = "<think>"
12+
THINK_END = "</think>"
1013

1114

1215
class ReactState(Enum):
@@ -115,11 +118,63 @@ def step(self, delta: str) -> tuple[bool, ReactChunk | None, bool, bool]:
115118
answer_matcher = PrefixMatcher(ReactState.ANSWER)
116119
thought_matcher = PrefixMatcher(ReactState.THINKING)
117120

121+
_in_think = False
122+
_think_buf = ""
123+
_think_depth = 0
118124
for response in llm_response:
119125
if response.delta.usage:
120126
usage_dict["usage"] = response.delta.usage
121-
response_content = response.delta.message.content
122-
if not isinstance(response_content, str):
127+
raw = response.delta.message.content
128+
if isinstance(raw, str):
129+
response_content = raw
130+
elif isinstance(raw, list):
131+
# Plugins (e.g. Gemini) send content as list; some items may be non-text (e.g. image)
132+
parts = [
133+
s
134+
for c in raw
135+
if isinstance(s := (getattr(c, "data", None) or getattr(c, "text", None)), str)
136+
]
137+
response_content = "".join(parts)
138+
else:
139+
continue
140+
if not response_content:
141+
continue
142+
# When include_thoughts=True, Gemini injects <think>...</think>; strip across chunks so
143+
# ReAct parser only sees Thought:/Action:/FinalAnswer: from the model reply.
144+
# Nested <think> tags are supported via a depth counter.
145+
if THINK_START in response_content or THINK_END in response_content or _in_think:
146+
buf = _think_buf + response_content
147+
_think_buf = ""
148+
out = []
149+
i = 0
150+
while i < len(buf):
151+
if _in_think:
152+
end_j = buf.find(THINK_END, i)
153+
start_j = buf.find(THINK_START, i)
154+
if end_j == -1 and start_j == -1:
155+
_think_buf = buf[i:]
156+
break
157+
if start_j != -1 and (end_j == -1 or start_j < end_j):
158+
_think_depth += 1
159+
i = start_j + len(THINK_START)
160+
else:
161+
j = end_j
162+
_think_depth -= 1
163+
if _think_depth <= 0:
164+
_in_think = False
165+
_think_depth = 0
166+
i = j + len(THINK_END)
167+
else:
168+
j = buf.find(THINK_START, i)
169+
if j == -1:
170+
out.append(buf[i:])
171+
break
172+
out.append(buf[i:j])
173+
_in_think = True
174+
_think_depth = 1
175+
i = j + len(THINK_START)
176+
response_content = "".join(out)
177+
if not response_content:
123178
continue
124179

125180
# stream

agent-strategies/cot_agent/strategies/ReAct.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,7 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
121121
run_agent_state = True
122122
llm_usage: dict[str, Optional[LLMUsage]] = {"usage": None}
123123
final_answer = ""
124+
final_answer_already_streamed = False # True when final_answer = thought (no action), already sent as THINKING
124125
prompt_messages: list[PromptMessage] = []
125126

126127
# Init model
@@ -217,7 +218,9 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
217218
assert isinstance(react_chunk, ReactChunk)
218219
chunk_state = react_chunk.state
219220
chunk = react_chunk.content
220-
yield self.create_text_message(chunk)
221+
# Stream TEXT only for THINKING; ANSWER is sent once at end via final_answer
222+
if chunk_state != ReactState.ANSWER:
223+
yield self.create_text_message(chunk)
221224
if chunk_state == ReactState.ANSWER:
222225
final_answer += chunk
223226
elif chunk_state == ReactState.THINKING:
@@ -266,6 +269,7 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
266269
)
267270
if not scratchpad.action:
268271
final_answer = scratchpad.thought
272+
final_answer_already_streamed = True # thought was already streamed as THINKING chunks
269273
else:
270274
if scratchpad.action.action_name.lower() == "final answer":
271275
try:
@@ -419,7 +423,8 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]:
419423
)
420424
iteration_step += 1
421425

422-
# yield self.create_text_message(final_answer)
426+
if not final_answer_already_streamed and final_answer:
427+
yield self.create_text_message(final_answer)
423428

424429
# If context is a list of dict, create retriever resource message
425430
if isinstance(react_params.context, list):

datasources/github/datasources/github.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,8 @@ def _get_pages(self, datasource_parameters: dict[str, Any]) -> DatasourceGetPage
8080
raise ValueError(
8181
f"Invalid 'affiliation' parts: {', '.join(parts - allowed_affiliations)}. Allowed: {', '.join(allowed_affiliations)}.")
8282

83-
_type = datasource_parameters.get("type", "all")
84-
if _type not in {"all", "owner", "public", "private", "member"}:
83+
_type = datasource_parameters.get("type")
84+
if _type and _type not in {"all", "owner", "public", "private", "member"}:
8585
raise ValueError(
8686
f"Invalid 'type' parameter: {_type}. Allowed values are: all, owner, public, private, member.")
8787

datasources/github/manifest.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
version: 0.4.0
1+
version: 0.4.1
22
type: plugin
33
author: langgenius
44
name: github_datasource

models/aihubmix/manifest.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
version: 0.0.16
1+
version: 0.0.17
22
type: plugin
33
author: langgenius
44
name: aihubmix

models/aihubmix/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ dependencies = [
1212
"openai~=2.3.0",
1313
"google-genai>=1.56.0",
1414
"anthropic~=0.57.1",
15-
"pillow~=11.3.0",
15+
"pillow~=12.1.1",
1616
"urllib3>=2.6.3",
1717
]
1818

models/aihubmix/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,5 @@ httpx>=0.28.1,<0.29.dev0
33
openai~=2.3.0
44
google-genai>=1.56.0
55
anthropic~=0.57.1
6-
pillow~=11.3.0
6+
pillow~=12.1.1
77
urllib3>=2.6.3

0 commit comments

Comments
 (0)