Skip to content
This repository was archived by the owner on Mar 6, 2024. It is now read-only.

Commit b8da2c3

Browse files
authored
limit the number of files that can be reviewed by this tool (#27)
<!-- This is an auto-generated comment: release notes by openai --> ### Summary by OpenAI Purpose: Limit the number of files that can be reviewed by this tool. - New Feature: Added a new input option `max_files` to specify the maximum number of files that can be reviewed. - Bug fix: Added checks to ensure that the number of files being reviewed does not exceed this limit. - Refactor: Updated the code to improve readability and maintainability. <!-- end of auto-generated comment: release notes by openai -->
1 parent 74aa9b1 commit b8da2c3

File tree

6 files changed

+59
-15
lines changed

6 files changed

+59
-15
lines changed

action.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,10 @@ inputs:
99
required: false
1010
description: 'Enable debug mode'
1111
default: 'false'
12+
max_files:
13+
required: false
14+
description: 'Max files to review'
15+
default: '30'
1216
temperature:
1317
required: false
1418
description: 'Temperature for GPT model'

dist/index.js

Lines changed: 20 additions & 7 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/bot.ts

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,8 @@ export class Bot {
4848
}
4949

5050
private chat_ = async (message: string, ids: Ids): Promise<[string, Ids]> => {
51+
// record timing
52+
const start = Date.now()
5153
if (!message) {
5254
return ['', {}]
5355
}
@@ -57,13 +59,15 @@ export class Bot {
5759

5860
let response: openai.ChatMessage | null = null
5961
if (this.turbo) {
60-
let opts: openai.SendMessageOptions = {}
62+
const opts: openai.SendMessageOptions = {}
6163
if (ids.parentMessageId) {
6264
opts.parentMessageId = ids.parentMessageId
6365
}
6466
response = await this.turbo.sendMessage(message, opts)
6567
try {
68+
const end = Date.now()
6669
core.info(`response: ${JSON.stringify(response)}`)
70+
core.info(`openai response time: ${end - start} ms`)
6771
} catch (e: any) {
6872
core.info(
6973
`response: ${response}, failed to stringify: ${e}, backtrace: ${e.stack}`

src/main.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import {codeReview} from './review.js'
66
async function run(): Promise<void> {
77
const options: Options = new Options(
88
core.getBooleanInput('debug'),
9+
core.getInput('max_files'),
910
core.getBooleanInput('review_comment_lgtm'),
1011
core.getMultilineInput('path_filters'),
1112
core.getInput('system_message'),

src/options.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,19 +141,22 @@ export class Inputs {
141141

142142
export class Options {
143143
debug: boolean
144+
max_files: number
144145
review_comment_lgtm: boolean
145146
path_filters: PathFilter
146147
system_message: string
147148
temperature: number
148149

149150
constructor(
150151
debug: boolean,
152+
max_files = '30',
151153
review_comment_lgtm = false,
152154
path_filters: string[] | null = null,
153155
system_message = '',
154156
temperature = '0.0'
155157
) {
156158
this.debug = debug
159+
this.max_files = parseInt(max_files)
157160
this.review_comment_lgtm = review_comment_lgtm
158161
this.path_filters = new PathFilter(path_filters)
159162
this.system_message = system_message
@@ -188,7 +191,7 @@ export class PathFilter {
188191
}
189192

190193
check(path: string): boolean {
191-
let include_all = this.rules.length == 0
194+
let include_all = this.rules.length === 0
192195
let matched = false
193196
for (const [rule, exclude] of this.rules) {
194197
if (exclude) {

src/review.ts

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,16 @@ const context = github.context
1414
const repo = context.repo
1515

1616
const MAX_TOKENS_FOR_EXTRA_CONTENT = 2500
17+
const comment_tag =
18+
'<!-- This is an auto-generated comment: summarize by openai -->'
1719

1820
export const codeReview = async (
1921
bot: Bot,
2022
options: Options,
2123
prompts: Prompts
2224
) => {
25+
const commenter: Commenter = new Commenter()
26+
2327
if (
2428
context.eventName !== 'pull_request' &&
2529
context.eventName !== 'pull_request_target'
@@ -29,14 +33,11 @@ export const codeReview = async (
2933
)
3034
return
3135
}
32-
3336
if (!context.payload.pull_request) {
3437
core.warning(`Skipped: context.payload.pull_request is null`)
3538
return
3639
}
3740

38-
const commenter: Commenter = new Commenter()
39-
4041
const inputs: Inputs = new Inputs()
4142
inputs.title = context.payload.pull_request.title
4243
if (context.payload.pull_request.body) {
@@ -57,6 +58,22 @@ export const codeReview = async (
5758
const {files, commits} = diff.data
5859
if (!files) {
5960
core.warning(`Skipped: diff.data.files is null`)
61+
await commenter.comment(
62+
`Skipped: no files to review`,
63+
comment_tag,
64+
'replace'
65+
)
66+
return
67+
}
68+
69+
// check if we are exceeding max_files
70+
if (files.length > options.max_files) {
71+
core.warning("Skipped: too many files to review, can't handle it")
72+
await commenter.comment(
73+
`Skipped: too many files to review, can't handle it`,
74+
comment_tag,
75+
'replace'
76+
)
6077
return
6178
}
6279

@@ -145,9 +162,11 @@ export const codeReview = async (
145162
inputs.summary = summarize_final_response
146163

147164
next_summarize_ids = summarize_final_response_ids
148-
const tag =
149-
'<!-- This is an auto-generated comment: summarize by openai -->'
150-
await commenter.comment(`${summarize_final_response}`, tag, 'replace')
165+
await commenter.comment(
166+
`${summarize_final_response}`,
167+
comment_tag,
168+
'replace'
169+
)
151170
}
152171

153172
// final release notes

0 commit comments

Comments
 (0)