Skip to content

Commit 96225e1

Browse files
Stainless Botstainless-app[bot]
Stainless Bot
authored andcommitted
feat(api): support storing chat completions, enabling evals and model distillation in the dashboard (#1112)
Learn more at http://openai.com/devday2024
1 parent 52c0bb5 commit 96225e1

File tree

4 files changed

+46
-2
lines changed

4 files changed

+46
-2
lines changed

src/resources/chat/chat.ts

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ export type ChatModel =
1616
| 'gpt-4o'
1717
| 'gpt-4o-2024-08-06'
1818
| 'gpt-4o-2024-05-13'
19+
| 'gpt-4o-realtime-preview-2024-10-01'
1920
| 'chatgpt-4o-latest'
2021
| 'gpt-4o-mini'
2122
| 'gpt-4o-mini-2024-07-18'

src/resources/chat/completions.ts

+18-2
Original file line numberDiff line numberDiff line change
@@ -727,8 +727,12 @@ export type ChatCompletionCreateParams =
727727

728728
export interface ChatCompletionCreateParamsBase {
729729
/**
730-
* A list of messages comprising the conversation so far.
731-
* [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
730+
* A list of messages comprising the conversation so far. Depending on the
731+
* [model](https://platform.openai.com/docs/models) you use, different message
732+
* types (modalities) are supported, like
733+
* [text](https://platform.openai.com/docs/guides/text-generation),
734+
* [images](https://platform.openai.com/docs/guides/vision), and
735+
* [audio](https://platform.openai.com/docs/guides/audio).
732736
*/
733737
messages: Array<ChatCompletionMessageParam>;
734738

@@ -806,6 +810,12 @@ export interface ChatCompletionCreateParamsBase {
806810
*/
807811
max_tokens?: number | null;
808812

813+
/**
814+
* Developer-defined tags and values used for filtering completions in the
815+
* [dashboard](https://platform.openai.com/completions).
816+
*/
817+
metadata?: Record<string, string> | null;
818+
809819
/**
810820
* How many chat completion choices to generate for each input message. Note that
811821
* you will be charged based on the number of generated tokens across all of the
@@ -889,6 +899,12 @@ export interface ChatCompletionCreateParamsBase {
889899
*/
890900
stop?: string | null | Array<string>;
891901

902+
/**
903+
* Whether or not to store the output of this completion request for traffic
904+
* logging in the [dashboard](https://platform.openai.com/completions).
905+
*/
906+
store?: boolean | null;
907+
892908
/**
893909
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
894910
* sent as data-only

src/resources/completions.ts

+25
Original file line numberDiff line numberDiff line change
@@ -125,18 +125,43 @@ export interface CompletionUsage {
125125
* Breakdown of tokens used in a completion.
126126
*/
127127
completion_tokens_details?: CompletionUsage.CompletionTokensDetails;
128+
129+
/**
130+
* Breakdown of tokens used in the prompt.
131+
*/
132+
prompt_tokens_details?: CompletionUsage.PromptTokensDetails;
128133
}
129134

130135
export namespace CompletionUsage {
131136
/**
132137
* Breakdown of tokens used in a completion.
133138
*/
134139
export interface CompletionTokensDetails {
140+
/**
141+
* Audio input tokens generated by the model.
142+
*/
143+
audio_tokens?: number;
144+
135145
/**
136146
* Tokens generated by the model for reasoning.
137147
*/
138148
reasoning_tokens?: number;
139149
}
150+
151+
/**
152+
* Breakdown of tokens used in the prompt.
153+
*/
154+
export interface PromptTokensDetails {
155+
/**
156+
* Audio input tokens present in the prompt.
157+
*/
158+
audio_tokens?: number;
159+
160+
/**
161+
* Cached tokens present in the prompt.
162+
*/
163+
cached_tokens?: number;
164+
}
140165
}
141166

142167
export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming;

tests/api-resources/chat/completions.test.ts

+2
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,15 @@ describe('resource completions', () => {
3434
logprobs: true,
3535
max_completion_tokens: 0,
3636
max_tokens: 0,
37+
metadata: { foo: 'string' },
3738
n: 1,
3839
parallel_tool_calls: true,
3940
presence_penalty: -2,
4041
response_format: { type: 'text' },
4142
seed: -9007199254740991,
4243
service_tier: 'auto',
4344
stop: 'string',
45+
store: true,
4446
stream: false,
4547
stream_options: { include_usage: true },
4648
temperature: 1,

0 commit comments

Comments
 (0)