Skip to content
This repository was archived by the owner on Mar 6, 2024. It is now read-only.

Commit df2286c

Browse files
authored
add config options (#63)
<!-- This is an auto-generated comment: release notes by openai --> ### Summary by OpenAI **Release Notes** New Feature: This pull request adds new configuration options related to OpenAI API and improves the bot's performance. Users can now configure model selection, retries, timeout, and concurrency limit for the OpenAI API. The changes also include a new function to limit concurrency and improve chat session management during reviews. <!-- end of auto-generated comment: release notes by openai -->
1 parent aa060c6 commit df2286c

File tree

6 files changed

+70
-27
lines changed

6 files changed

+70
-27
lines changed

action.yml

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,6 @@ inputs:
1313
required: false
1414
description: 'Max files to review. Less than or equal to 0 means no limit.'
1515
default: '40'
16-
temperature:
17-
required: false
18-
description: 'Temperature for GPT model'
19-
default: '0.0'
2016
review_comment_lgtm:
2117
required: false
2218
description: 'Leave comments even if the patch is LGTM'
@@ -49,6 +45,27 @@ inputs:
4945
!**/gen/**
5046
!**/_gen/**
5147
!**/vendor/**
48+
openai_model:
49+
required: false
50+
description: 'Model to use'
51+
default: 'gpt-3.5-turbo'
52+
openai_model_temperature:
53+
required: false
54+
description: 'Temperature for GPT model'
55+
default: '0.0'
56+
openai_retries:
57+
required: false
58+
description:
59+
'How many times to retry openai API in case of timeouts or errors?'
60+
default: '5'
61+
openai_timeout_ms:
62+
required: false
63+
description: 'Timeout for openai API call in millis'
64+
default: '60000'
65+
openai_concurrency_limit:
66+
required: false
67+
description: 'How many concurrent API calls to make to openai servers?'
68+
default: '4'
5269
system_message:
5370
required: false
5471
description: 'System message to be sent to OpenAI'

dist/index.js

Lines changed: 19 additions & 11 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/bot.ts

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ export class Bot {
2424
apiKey: process.env.OPENAI_API_KEY,
2525
debug: options.debug,
2626
completionParams: {
27-
temperature: options.temperature
27+
temperature: options.openai_model_temperature,
28+
model: options.openai_model
2829
}
2930
})
3031
} else {
@@ -60,7 +61,7 @@ export class Bot {
6061

6162
if (this.api) {
6263
const opts: openai.SendMessageOptions = {
63-
timeoutMs: 60000
64+
timeoutMs: this.options.openai_timeout_ms
6465
}
6566
if (ids.parentMessageId) {
6667
opts.parentMessageId = ids.parentMessageId
@@ -69,7 +70,7 @@ export class Bot {
6970
response = await utils.retry(
7071
this.api.sendMessage.bind(this.api),
7172
[message, opts],
72-
3
73+
this.options.openai_retries
7374
)
7475
} catch (e: any) {
7576
core.info(

src/main.ts

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,11 @@ async function run(): Promise<void> {
1111
core.getBooleanInput('review_comment_lgtm'),
1212
core.getMultilineInput('path_filters'),
1313
core.getInput('system_message'),
14-
core.getInput('temperature')
14+
core.getInput('openai_model'),
15+
core.getInput('openai_model_temperature'),
16+
core.getInput('openai_retries'),
17+
core.getInput('openai_timeout_ms'),
18+
core.getInput('openai_concurrency_limit')
1519
)
1620
const prompts: Prompts = new Prompts(
1721
core.getInput('review_beginning'),

src/options.ts

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -197,23 +197,34 @@ export class Options {
197197
review_comment_lgtm: boolean
198198
path_filters: PathFilter
199199
system_message: string
200-
temperature: number
200+
openai_model: string
201+
openai_model_temperature: number
202+
openai_retries: number
203+
openai_timeout_ms: number
204+
openai_concurrency_limit: number
201205

202206
constructor(
203207
debug: boolean,
204208
max_files = '40',
205209
review_comment_lgtm = false,
206210
path_filters: string[] | null = null,
207211
system_message = '',
208-
temperature = '0.0'
212+
openai_model = 'gpt-3.5-turbo',
213+
openai_model_temperature = '0.0',
214+
openai_retries = '3',
215+
openai_timeout_ms = '60000',
216+
openai_concurrency_limit = '4'
209217
) {
210218
this.debug = debug
211219
this.max_files = parseInt(max_files)
212220
this.review_comment_lgtm = review_comment_lgtm
213221
this.path_filters = new PathFilter(path_filters)
214222
this.system_message = system_message
215-
// convert temperature to number
216-
this.temperature = parseFloat(temperature)
223+
this.openai_model = openai_model
224+
this.openai_model_temperature = parseFloat(openai_model_temperature)
225+
this.openai_retries = parseInt(openai_retries)
226+
this.openai_timeout_ms = parseInt(openai_timeout_ms)
227+
this.openai_concurrency_limit = parseInt(openai_concurrency_limit)
217228
}
218229

219230
check_path(path: string): boolean {

src/review.ts

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,6 @@ const token = core.getInput('token')
1313
const octokit = new Octokit({auth: `token ${token}`})
1414
const context = github.context
1515
const repo = context.repo
16-
const limit = pLimit(4)
17-
1816
const MAX_TOKENS_FOR_EXTRA_CONTENT = 2500
1917

2018
export const codeReview = async (
@@ -24,6 +22,8 @@ export const codeReview = async (
2422
) => {
2523
const commenter: Commenter = new Commenter()
2624

25+
const openai_concurrency_limit = pLimit(options.openai_concurrency_limit)
26+
2727
if (
2828
context.eventName !== 'pull_request' &&
2929
context.eventName !== 'pull_request_target'
@@ -191,7 +191,9 @@ export const codeReview = async (
191191
}
192192
const summaryPromises = files_to_review.map(
193193
async ([filename, file_content, file_diff]) =>
194-
limit(async () => generateSummary(filename, file_content, file_diff))
194+
openai_concurrency_limit(async () =>
195+
generateSummary(filename, file_content, file_diff)
196+
)
195197
)
196198

197199
const summaries = (await Promise.all(summaryPromises)).filter(
@@ -253,7 +255,7 @@ Tips:
253255
// Use Promise.all to run file review processes in parallel
254256
const reviewPromises = files_to_review.map(
255257
async ([filename, file_content, file_diff, patches]) =>
256-
limit(async () => {
258+
openai_concurrency_limit(async () => {
257259
// reset chat session for each file while reviewing
258260
let next_review_ids = review_begin_ids
259261

0 commit comments

Comments
 (0)