Skip to content

Commit 5671111

Browse files
committed
Merge remote-tracking branch 'origin/main' into log-analysis
2 parents 238b79c + 87706a1 commit 5671111

File tree

8 files changed

+171
-110
lines changed

8 files changed

+171
-110
lines changed

.github/workflows/test.yml

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,6 @@
11
name: Tests
22

33
on:
4-
pull_request:
5-
types:
6-
- ready_for_review
7-
- review_requested
84
push:
95
branches-ignore:
106
- main
@@ -15,15 +11,12 @@ on:
1511
- generateunittests-*
1612
- generatecodeusageexample-*
1713
- resolveissue-*
18-
1914
- demo*
2015

2116
# Credits to https://blog.maximeheckel.com/posts/building-perfect-github-action-frontend-teams/#you-are-terminated
2217
concurrency:
2318
# Here the group is defined by the head_ref of the PR
2419
group: ${{ github.head_ref || github.ref_name }}
25-
# Here we specify that we'll cancel any "in progress" workflow of the same group. Thus if we push, ammend a commit and push
26-
# again the previous workflow will be cancelled, thus saving us github action build minutes and avoid any conflicts
2720
cancel-in-progress: true
2821

2922
jobs:
@@ -211,7 +204,7 @@ jobs:
211204
run: |
212205
source .venv/bin/activate
213206
patchwork PRReview --log debug \
214-
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
207+
--anthropic_api_key=${{ secrets.ANTHROPIC_API_KEY }} \
215208
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
216209
--pr_url=https://github.com/patched-codes/patchwork/pull/${{ steps.findPr.outputs.number }} \
217210
--disable_telemetry

patchwork/common/client/llm/openai_.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from typing_extensions import Dict, Iterable, List, Optional, Union
1515

1616
from patchwork.common.client.llm.protocol import NOT_GIVEN, LlmClient, NotGiven
17+
from patchwork.logger import logger
1718

1819

1920
@functools.lru_cache
@@ -36,6 +37,7 @@ class OpenAiLlmClient(LlmClient):
3637
"o1-mini": 128_000,
3738
"gpt-4o-mini": 128_000,
3839
"gpt-4o": 128_000,
40+
"o3-mini": 128_000,
3941
}
4042

4143
def __init__(self, api_key: str, base_url=None, **kwargs):
@@ -87,7 +89,12 @@ def is_prompt_supported(
8789

8890
model_limit = self.__get_model_limits(model)
8991
token_count = 0
90-
encoding = tiktoken.encoding_for_model(model)
92+
encoding = None
93+
try:
94+
encoding = tiktoken.encoding_for_model(model)
95+
except Exception as e:
96+
logger.error(f"Error getting encoding for model {model}: {e}, using gpt-4o as fallback")
97+
encoding = tiktoken.encoding_for_model("gpt-4o")
9198
for message in messages:
9299
message_token_count = len(encoding.encode(message.get("content")))
93100
token_count = token_count + message_token_count
Lines changed: 111 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -1,124 +1,144 @@
1-
import json
21
from pathlib import Path
32

43
import yaml
54

65
from patchwork.common.utils.progress_bar import PatchflowProgressBar
76
from patchwork.common.utils.step_typing import validate_steps_with_inputs
87
from patchwork.step import Step
9-
from patchwork.steps import (
10-
LLM,
11-
CallLLM,
12-
CreatePRComment,
13-
ExtractModelResponse,
14-
PreparePR,
15-
PreparePrompt,
16-
ReadPRDiffs,
17-
)
8+
from patchwork.steps import CreatePRComment, ReadPRDiffs, SimplifiedLLMOnce
189

1910
_DEFAULT_PROMPT_JSON = Path(__file__).parent / "pr_review_prompt.json"
2011
_DEFAULT_INPUT_FILE = Path(__file__).parent / "defaults.yml"
2112

2213

23-
_NONE = "none"
24-
_SHORT = "short"
25-
_LONG = "long"
26-
_SUMMARY_LEVEL = {
27-
_NONE: 0,
28-
_SHORT: 1,
29-
_LONG: 2,
30-
}
31-
32-
3314
class PRReview(Step):
3415
def __init__(self, inputs: dict):
3516
PatchflowProgressBar(self).register_steps(
36-
CallLLM,
37-
CreatePRComment,
38-
ExtractModelResponse,
39-
PreparePR,
40-
PreparePrompt,
4117
ReadPRDiffs,
18+
SimplifiedLLMOnce,
19+
CreatePRComment,
4220
)
4321
final_inputs = yaml.safe_load(_DEFAULT_INPUT_FILE.read_text())
4422
final_inputs.update(inputs)
4523

46-
if "prompt_template_file" not in final_inputs.keys():
47-
final_inputs["prompt_template_file"] = _DEFAULT_PROMPT_JSON
48-
49-
diff_summary = final_inputs.get("diff_summary", _LONG)
50-
if diff_summary.lower() not in _SUMMARY_LEVEL.keys():
51-
raise ValueError(f"Invalid diff_summary, accepted diff_summary values: {_SUMMARY_LEVEL.keys()}")
52-
self.verbosity = _SUMMARY_LEVEL[diff_summary.lower()]
53-
54-
self.is_suggestion_required = bool(final_inputs.get("diff_suggestion"))
55-
5624
validate_steps_with_inputs(
5725
set(final_inputs.keys()).union(
5826
{
59-
"prompt_id",
60-
"prompt_values",
61-
"modified_code_files",
27+
"user_prompt",
28+
"prompt_value",
29+
"json_schema",
6230
"pr_comment",
6331
}
6432
),
6533
ReadPRDiffs,
66-
LLM,
67-
PreparePR,
34+
SimplifiedLLMOnce,
6835
CreatePRComment,
6936
)
7037

7138
self.inputs = final_inputs
7239

7340
def run(self) -> dict:
74-
if self.verbosity == _SUMMARY_LEVEL[_NONE]:
75-
return dict()
76-
77-
outputs = ReadPRDiffs(self.inputs).run()
78-
self.inputs["prompt_values"] = outputs["diffs"]
79-
80-
outputs = LLM(
81-
dict(
82-
prompt_id="diffreview-suggestion" if self.is_suggestion_required else "diffreview",
83-
model_response_format=dict(type="json_object"),
84-
**self.inputs,
85-
)
86-
).run()
87-
self.inputs.update(outputs)
88-
89-
summaries = []
90-
for raw_response, prompt_values in zip(self.inputs["openai_responses"], self.inputs["prompt_values"]):
91-
response = json.loads(raw_response)
92-
summary = {}
93-
if "path" in prompt_values.keys():
94-
summary["path"] = prompt_values["path"]
95-
if "review" in response.keys():
96-
summary["commit_message"] = response["review"]
97-
if "suggestion" in response.keys():
98-
summary["patch_message"] = response["suggestion"]
99-
summaries.append(summary)
100-
101-
header = ""
102-
if self.verbosity > _SUMMARY_LEVEL[_SHORT]:
103-
filtered_summaries = [
104-
str(summary["commit_message"]) for summary in summaries if summary.get("commit_message")
105-
]
106-
self.inputs["prompt_id"] = "diffreview_summary"
107-
self.inputs["prompt_values"] = [{"diffreviews": "\n".join(filtered_summaries)}]
108-
109-
outputs = PreparePrompt(self.inputs).run()
110-
self.inputs.update(outputs)
111-
outputs = CallLLM(self.inputs).run()
112-
self.inputs.update(outputs)
113-
header = self.inputs["openai_responses"][0]
114-
115-
self.inputs["pr_header"] = header
116-
self.inputs["modified_code_files"] = summaries
117-
outputs = PreparePR(self.inputs).run()
118-
self.inputs.update(outputs)
119-
120-
self.inputs["pr_comment"] = self.inputs["pr_body"]
121-
outputs = CreatePRComment(self.inputs).run()
122-
self.inputs.update(outputs)
123-
124-
return self.inputs
41+
pr_diffs_outputs = ReadPRDiffs(self.inputs).run()
42+
43+
reviews = []
44+
for diffs in iter(pr_diffs_outputs["diffs"]):
45+
llm1_outputs = SimplifiedLLMOnce(
46+
dict(
47+
prompt_value=diffs,
48+
user_prompt="""\
49+
Analyze the following code diff against the provided rules:
50+
51+
<CODE_DIFF>
52+
{{diff}}
53+
</CODE_DIFF>
54+
55+
<RULES>
56+
- Do not ignore potential bugs in the code.
57+
- Do not overlook possible security vulnerabilities introduced by code modifications.
58+
- Do not deviate from the original coding standards established in the pull request.
59+
</RULES>
60+
61+
For each rule, determine if there\'s a violation. Use the following chain of thought process:
62+
63+
1. Understand the rule
64+
2. Examine the diff line by line
65+
3. Identify any potential violations
66+
4. Determine the specific line numbers of violations
67+
5. Summarize your findings
68+
69+
Rule 1:
70+
1. Rule understanding: [Briefly explain the rule]
71+
2. Diff examination: [Describe how you\'re examining the diff]
72+
3. Potential violations: [List any potential violations you\'ve identified]
73+
4. Line numbers: [If violations exist, list the specific line numbers]
74+
5. Summary: [Summarize your findings for this rule]
75+
76+
Rule 2:
77+
[Repeat the above structure for each rule]
78+
79+
Now, carefully review your reasoning in the section above. Ensure that your conclusions are consistent with the analysis you\'ve done for each rule.
80+
81+
Your review should have the following markdown format:
82+
83+
<REVIEW_FORMAT>
84+
## File Changed: `{{path}}`
85+
86+
Details: [If rule violation include brief prescriptive explanation]
87+
88+
Affected Code Snippet:
89+
[Original code enclosed in a code block from the file that is affected by this violation. If no violation, write "N/A"]
90+
91+
Start Line: [Starting Line number of the affected code. If no violation, write "N/A"]
92+
93+
End Line: [Ending Line number of the affected code. If no violation, write "N/A"]
94+
95+
-------------
96+
97+
Details: [If rule violation include brief prescriptive explanation]
98+
99+
Affected Code Snippet:
100+
[Original code enclosed in a code block from the file that is affected by this violation. If no violation, write "N/A"]
101+
102+
Start Line: [Starting Line number of the affected code. If no violation, write "N/A"]
103+
104+
End Line: [Ending Line number of the affected code. If no violation, write "N/A"]
105+
106+
-------------
107+
108+
... (continue for all rules)
109+
</REVIEW_FORMAT>
110+
111+
Ensure that you include all rules in your response, even if there\'s no violation. The output should directly reflect the reasoning in your thinking section.
112+
""",
113+
json_schema={"review": "The markdown text of the reviews"},
114+
**self.inputs,
115+
)
116+
).run()
117+
118+
llm2_outputs = SimplifiedLLMOnce(
119+
dict(
120+
prompt_value=llm1_outputs,
121+
user_prompt="""\
122+
You are a software manager compiling code reviews from all teams. You are given a list of code reviews. You have to remove code reviews that is either not actionable or useful. Do not change the accepted reviews, return the original review for the response. Do not remove the path from the review.
123+
124+
<code_reviews>
125+
{{review}}
126+
<code_reviews>
127+
128+
You should return an empty response if there are no code reviews that is actionable or useful.
129+
""",
130+
json_schema={"review": "The reviews curated"},
131+
**self.inputs,
132+
)
133+
).run()
134+
135+
review = llm2_outputs.get("review")
136+
if review is not None and len(str(review)) > 0:
137+
reviews.append(review)
138+
139+
if len(reviews) > 0:
140+
reviews_str = "\n".join(reviews)
141+
else:
142+
reviews_str = "No issues found."
143+
144+
return CreatePRComment(dict(pr_comment=reviews_str, **self.inputs)).run()

patchwork/patchflows/PRReview/defaults.yml

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,4 @@
11
# PRReview Inputs
2-
diff_summary: long
3-
diff_suggestion: false
4-
52

63
# ReadPRDiffs Inputs
74
# github_api_key: required-for-github-scm
@@ -14,7 +11,7 @@ diff_suggestion: false
1411
# CallLLM Inputs
1512
# openai_api_key: required-for-chatgpt
1613
# google_api_key: required-for-gemini
17-
# model: gpt-4o
14+
model: claude-3-5-sonnet-latest
1815
# client_base_url: https://api.openai.com/v1
1916
# Example HF model
2017
# client_base_url: https://api-inference.huggingface.co/models/codellama/CodeLlama-70b-Instruct-hf/v1

0 commit comments

Comments
 (0)