Skip to content
This repository was archived by the owner on Mar 6, 2024. It is now read-only.

Commit 6f45f40

Browse files
Feature/prompt limit issue (#3) -> Prompt limit increase with split the prompt if exceeded number of tokens
Prompt limit increase with split the prompt if exceeded number of tokens
1 parent 44244a9 commit 6f45f40

File tree

9 files changed

+378
-48
lines changed

9 files changed

+378
-48
lines changed

.github/workflows/openai-review.yml

+1-2
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,7 @@ on:
1111
types: [created]
1212

1313
concurrency:
14-
group:
15-
${{ github.repository }}-${{ github.event.number || github.head_ref ||
14+
group: ${{ github.repository }}-${{ github.event.number || github.head_ref ||
1615
github.sha }}-${{ github.workflow }}-${{ github.event_name ==
1716
'pull_request_review_comment' && 'pr_comment' || 'pr' }}
1817
cancel-in-progress: ${{ github.event_name != 'pull_request_review_comment' }}

README.md

+18-10
Original file line numberDiff line numberDiff line change
@@ -43,14 +43,23 @@ configure the required environment variables, such as `GITHUB_TOKEN` and
4343
`OPENAI_API_KEY`. For more information on usage, examples, contributing, and
4444
FAQs, you can refer to the sections below.
4545

46-
- [Overview](#overview)
47-
- [Professional Version of CodeRabbit](#professional-version-of-coderabbit)
48-
- [Reviewer Features](#reviewer-features)
49-
- [Install instructions](#install-instructions)
50-
- [Conversation with CodeRabbit](#conversation-with-coderabbit)
51-
- [Examples](#examples)
52-
- [Contribute](#contribute)
53-
- [FAQs](#faqs)
46+
- [AI-based PR reviewer and summarizer](#ai-based-pr-reviewer-and-summarizer)
47+
- [Overview](#overview)
48+
- [Reviewer Features:](#reviewer-features)
49+
- [CodeRabbit Pro](#coderabbit-pro)
50+
- [Install instructions](#install-instructions)
51+
- [Environment variables](#environment-variables)
52+
- [Models: `gpt-4` and `gpt-3.5-turbo`](#models-gpt-4-and-gpt-35-turbo)
53+
- [Prompts \& Configuration](#prompts--configuration)
54+
- [Conversation with CodeRabbit](#conversation-with-coderabbit)
55+
- [Ignoring PRs](#ignoring-prs)
56+
- [Examples](#examples)
57+
- [Contribute](#contribute)
58+
- [Developing](#developing)
59+
- [FAQs](#faqs)
60+
- [Review pull requests from forks](#review-pull-requests-from-forks)
61+
- [Inspect the messages between OpenAI server](#inspect-the-messages-between-openai-server)
62+
- [Disclaimer](#disclaimer)
5463

5564
## CodeRabbit Pro
5665

@@ -209,12 +218,11 @@ Install the dependencies
209218
$ npm install
210219
```
211220

212-
Build the typescript and package it for distribution
213-
214221
```bash
215222
$ npm run build && npm run package
216223
```
217224

225+
218226
## FAQs
219227

220228
### Review pull requests from forks

__tests__/tokenizer.test.ts

+47
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
import {splitPrompt} from './../src/tokenizer' // Import your module with the splitPrompt function
2+
3+
describe('splitPrompt function', () => {
4+
it('should split a prompt into smaller pieces', async () => {
5+
const maxTokens = 10 // Adjust this as needed
6+
const prompt = 'This is a test prompt for splitting into smaller pieces.'
7+
8+
const result = await splitPrompt(maxTokens, prompt)
9+
10+
// Calculate the expected output based on the maxTokens value
11+
const expectedOutput = [
12+
'This is a',
13+
'test',
14+
'prompt for',
15+
'splitting',
16+
'into',
17+
'smaller',
18+
'pieces.'
19+
]
20+
21+
expect(result).toEqual(expectedOutput)
22+
})
23+
24+
it('should handle a prompt smaller than maxTokens', async () => {
25+
const maxTokens = 100 // A large value
26+
const prompt = 'A very short prompt.'
27+
28+
const result = await splitPrompt(maxTokens, prompt)
29+
30+
// The prompt is already smaller than maxTokens, so it should return an array with the entire prompt.
31+
const expectedOutput = 'A very short prompt.'
32+
33+
expect(result).toEqual(expectedOutput)
34+
})
35+
36+
it('should handle an empty prompt', async () => {
37+
const maxTokens = 10
38+
const prompt = ''
39+
40+
const result = await splitPrompt(maxTokens, prompt)
41+
42+
// An empty prompt should result in an empty array.
43+
const expectedOutput: string[] | string = ''
44+
45+
expect(result).toEqual(expectedOutput)
46+
})
47+
})

0 commit comments

Comments
 (0)