Skip to content

Commit 77f2af6

Browse files
committed
finalise
1 parent 1da12ce commit 77f2af6

File tree

11 files changed

+315
-238
lines changed

11 files changed

+315
-238
lines changed

patchwork/common/multiturn_strategy/agentic_strategy.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ def __init__(self, llm_client: LlmClient, tool_set: dict[str, Tool]):
2323
self.llm_client = llm_client
2424
self.tool_set = tool_set
2525
self.history: list[ChatCompletionMessageParam] = []
26+
self.request_tokens = 0
27+
self.response_tokens = 0
2628

2729
def generate_reply(self, message: str) -> str:
2830
self.history.append(dict(role="user", content=message))
@@ -36,6 +38,9 @@ def generate_reply(self, message: str) -> str:
3638
if is_prompt_safe < 0:
3739
raise ValueError("The subsequent prompt is not supported, due to large size.")
3840
response = self.llm_client.chat_completion(**input_kwargs)
41+
self.response_tokens = response.usage.completion_tokens
42+
self.request_tokens = response.usage.prompt_tokens
43+
3944
choices = response.choices or []
4045

4146
message_content = ""
@@ -145,6 +150,17 @@ def __is_session_completed(self) -> bool:
145150

146151
return False
147152

153+
def usage(self):
154+
request_tokens = 0
155+
response_tokens = 0
156+
for role in [self.__assistant_role, self.__user_role]:
157+
request_tokens += role.request_tokens
158+
response_tokens += role.response_tokens
159+
return {
160+
"request_tokens": request_tokens,
161+
"response_tokens": response_tokens,
162+
}
163+
148164
def execute(self, limit: int | None = None) -> None:
149165
message = self.__render_prompt(self.__user_prompt_template)
150166
try:

patchwork/common/multiturn_strategy/agentic_strategy_v2.py

Lines changed: 51 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from pydantic import BaseModel
88
from pydantic_ai import Agent
99
from pydantic_ai.models.anthropic import AnthropicModel
10+
from pydantic_ai.result import RunResult
1011
from typing_extensions import Any, Dict, Optional, Union
1112

1213
from patchwork.common.client.llm.utils import example_json_to_base_model
@@ -31,14 +32,14 @@ class Config:
3132

3233
class AgenticStrategyV2:
3334
def __init__(
34-
self,
35-
api_key: str,
36-
template_data: dict[str, str],
37-
system_prompt_template: str,
38-
user_prompt_template: str,
39-
agent_configs: list[AgentConfig],
40-
example_json: Union[str, dict[str, Any]] = '{"output":"output text"}',
41-
limit: Optional[int] = None,
35+
self,
36+
api_key: str,
37+
template_data: dict[str, str],
38+
system_prompt_template: str,
39+
user_prompt_template: str,
40+
agent_configs: list[AgentConfig],
41+
example_json: Union[str, dict[str, Any]] = '{"output":"output text"}',
42+
limit: Optional[int] = None,
4243
):
4344
self.__limit = limit
4445
self.__template_data = template_data
@@ -66,6 +67,19 @@ def __init__(
6667

6768
self.__agents.append(agent)
6869

70+
self.__request_tokens = 0
71+
self.__response_tokens = 0
72+
73+
def reset(self):
74+
self.__request_tokens = 0
75+
self.__response_tokens = 0
76+
77+
def usage(self):
78+
return {
79+
"request_tokens": self.__request_tokens,
80+
"response_tokens": self.__response_tokens,
81+
}
82+
6983
def execute(self, limit: Optional[int] = None) -> dict:
7084
agents_result = dict()
7185
loop = asyncio.new_event_loop()
@@ -75,8 +89,13 @@ def execute(self, limit: Optional[int] = None) -> dict:
7589
message_history = None
7690
agent_output = None
7791
for i in range(limit or self.__limit or sys.maxsize):
78-
agent_output = loop.run_until_complete(agent.run(user_message, message_history=message_history))
92+
agent_output: RunResult[Any] = loop.run_until_complete(
93+
agent.run(user_message, message_history=message_history)
94+
)
7995
message_history = agent_output.all_messages()
96+
self.__request_tokens += agent_output.usage().request_tokens or 0
97+
self.__response_tokens += agent_output.usage().response_tokens or 0
98+
8099
if getattr(agent_output.data, _COMPLETION_FLAG_ATTRIBUTE, False):
81100
break
82101
user_message = "Please continue"
@@ -88,27 +107,38 @@ def execute(self, limit: Optional[int] = None) -> dict:
88107
return dict()
89108

90109
if len(agents_result) == 1:
91-
final_result = loop.run_until_complete(self.__summariser.run(
92-
"From the actions taken by the assistant. Please give me the result.",
93-
message_history=next(v for _, v in agents_result.items()).all_messages(),
94-
))
110+
final_result = loop.run_until_complete(
111+
self.__summariser.run(
112+
"From the actions taken by the assistant. Please give me the result.",
113+
message_history=next(v for _, v in agents_result.items()).all_messages(),
114+
)
115+
)
95116
else:
96117
agent_summaries = []
97118
for agent_result in agents_result.values():
98-
agent_summary_result = loop.run_until_complete(self.__summariser.run(
99-
"From the actions taken by the assistant. Please give me the result.",
100-
message_history=agent_result.all_messages(),
101-
))
119+
agent_summary_result = loop.run_until_complete(
120+
self.__summariser.run(
121+
"From the actions taken by the assistant. Please give me the result.",
122+
message_history=agent_result.all_messages(),
123+
)
124+
)
125+
self.__request_token += agent_summary_result.usage().request_tokens or 0
126+
self.__response_token += agent_summary_result.usage().response_tokens or 0
127+
102128
agent_summary = getattr(agent_summary_result.data, _MESSAGE_ATTRIBUTE, None)
103129
if agent_summary is None:
104130
continue
105131

106132
agent_summaries.append(agent_summary)
107133
agent_summary_list = "\n* " + "\n* ".join(agent_summaries)
108-
final_result = loop.run_until_complete(self.__summariser.run(
109-
"Please give me the result from the following summary of what the assistants have done."
110-
+ agent_summary_list,
111-
))
134+
final_result = loop.run_until_complete(
135+
self.__summariser.run(
136+
"Please give me the result from the following summary of what the assistants have done."
137+
+ agent_summary_list,
138+
)
139+
)
140+
self.__request_tokens += final_result.usage().request_tokens or 0
141+
self.__response_tokens += final_result.usage().response_tokens or 0
112142

113143
loop.close()
114144
return final_result.data.dict()

patchwork/common/multiturn_strategy/analyze_implement.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,14 @@ def _reset(self):
4040
self._stage = STAGE.ANALYSIS
4141
self.run_count = 0
4242
self.stage_run_counts = defaultdict(int)
43+
self.__request_tokens = 0
44+
self.__response_tokens = 0
45+
46+
def usage(self):
47+
return dict(
48+
request_tokens=self.__request_tokens,
49+
response_tokens=self.__response_tokens,
50+
)
4351

4452
def __run_prompt(self, messages: list[ChatCompletionMessageParam]) -> list[ChatCompletionMessageParam]:
4553
input_kwargs = dict(
@@ -52,6 +60,8 @@ def __run_prompt(self, messages: list[ChatCompletionMessageParam]) -> list[ChatC
5260
if is_prompt_safe < 0:
5361
raise ValueError("The subsequent prompt is not supported, due to large size.")
5462
response = self.llm_client.chat_completion(**input_kwargs)
63+
self.__request_tokens += response.usage.prompt_tokens
64+
self.__response_tokens += response.usage.response_tokens
5565
new_messages = [choice.message.to_dict() for choice in response.choices]
5666
messages.extend(new_messages)
5767
return messages

patchwork/steps/AgenticLLM/AgenticLLM.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,4 +27,5 @@ def run(self) -> dict:
2727
return dict(
2828
conversation_history=self.agentic_strategy.history,
2929
tool_records=self.agentic_strategy.tool_records,
30+
**self.agentic_strategy.usage(),
3031
)

patchwork/steps/AgenticLLM/typed.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,5 +38,5 @@ class AgenticLLMInputs(TypedDict, total=False):
3838
class AgenticLLMOutputs(TypedDict):
3939
conversation_history: List[Dict]
4040
tool_records: List[Dict]
41-
# request_tokens: int
42-
# response_tokens: int
41+
request_tokens: int
42+
response_tokens: int

patchwork/steps/AgenticLLMV2/AgenticLLMV2.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,5 @@ def __init__(self, inputs):
3232
)
3333

3434
def run(self) -> dict:
35-
return self.agentic_strategy.execute(limit=self.conversation_limit)
35+
result = self.agentic_strategy.execute(limit=self.conversation_limit)
36+
return {**result, **self.agentic_strategy.usage()}

patchwork/steps/AgenticLLMV2/typed.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,4 +15,5 @@ class AgenticLLMV2Inputs(TypedDict, total=False):
1515

1616

1717
class AgenticLLMV2Outputs(TypedDict):
18-
pass
18+
request_tokens: int
19+
response_tokens: int

patchwork/steps/FixIssue/FixIssue.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,4 +178,4 @@ def run(self):
178178
# Git-specific errors (untracked files, etc) - keep empty diff
179179
logger.warning(f"Could not get git diff for {file}: {str(e)}")
180180

181-
return dict(modified_files=modified_files)
181+
return dict(modified_files=modified_files, **self.multiturn_llm_call.usage())

patchwork/steps/FixIssue/typed.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,3 +55,5 @@ class ModifiedFile(TypedDict):
5555

5656
class FixIssueOutputs(TypedDict):
5757
modified_files: List[ModifiedFile]
58+
request_tokens: int
59+
response_tokens: int

0 commit comments

Comments
 (0)