Skip to content

Commit 0ad2fe0

Browse files
fix(api): correct some Responses types (#1391)
1 parent d031182 commit 0ad2fe0

File tree

6 files changed

+42
-20
lines changed

6 files changed

+42
-20
lines changed

.stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 81
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml

src/resources/batches.ts

+4-4
Original file line numberDiff line numberDiff line change
@@ -220,11 +220,11 @@ export interface BatchCreateParams {
220220

221221
/**
222222
* The endpoint to be used for all requests in the batch. Currently
223-
* `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
224-
* Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
225-
* embedding inputs across all requests in the batch.
223+
* `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
224+
* are supported. Note that `/v1/embeddings` batches are also restricted to a
225+
* maximum of 50,000 embedding inputs across all requests in the batch.
226226
*/
227-
endpoint: '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions';
227+
endpoint: '/v1/responses' | '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions';
228228

229229
/**
230230
* The ID of an uploaded file that contains requests for the new batch.

src/resources/chat/completions/completions.ts

+12-6
Original file line numberDiff line numberDiff line change
@@ -377,10 +377,13 @@ export interface ChatCompletionChunk {
377377
/**
378378
* An optional field that will only be present when you set
379379
* `stream_options: {"include_usage": true}` in your request. When present, it
380-
* contains a null value except for the last chunk which contains the token usage
381-
* statistics for the entire request.
380+
* contains a null value **except for the last chunk** which contains the token
381+
* usage statistics for the entire request.
382+
*
383+
* **NOTE:** If the stream is interrupted or cancelled, you may not receive the
384+
* final usage chunk which contains the total token usage for the request.
382385
*/
383-
usage?: CompletionsAPI.CompletionUsage | null;
386+
usage?: CompletionsAPI.CompletionUsage;
384387
}
385388

386389
export namespace ChatCompletionChunk {
@@ -551,7 +554,7 @@ export namespace ChatCompletionContentPart {
551554
/**
552555
* The name of the file, used when passing the file to the model as a string.
553556
*/
554-
file_name?: string;
557+
filename?: string;
555558
}
556559
}
557560
}
@@ -930,8 +933,11 @@ export interface ChatCompletionStreamOptions {
930933
/**
931934
* If set, an additional chunk will be streamed before the `data: [DONE]` message.
932935
* The `usage` field on this chunk shows the token usage statistics for the entire
933-
* request, and the `choices` field will always be an empty array. All other chunks
934-
* will also include a `usage` field, but with a null value.
936+
* request, and the `choices` field will always be an empty array.
937+
*
938+
* All other chunks will also include a `usage` field, but with a null value.
939+
* **NOTE:** If the stream is interrupted, you may not receive the final usage
940+
* chunk which contains the total token usage for the request.
935941
*/
936942
include_usage?: boolean;
937943
}

src/resources/responses/responses.ts

+21-5
Original file line numberDiff line numberDiff line change
@@ -1298,11 +1298,6 @@ export interface ResponseFunctionCallArgumentsDoneEvent {
12981298
* for more information.
12991299
*/
13001300
export interface ResponseFunctionToolCall {
1301-
/**
1302-
* The unique ID of the function tool call.
1303-
*/
1304-
id: string;
1305-
13061301
/**
13071302
* A JSON string of the arguments to pass to the function.
13081303
*/
@@ -1323,6 +1318,11 @@ export interface ResponseFunctionToolCall {
13231318
*/
13241319
type: 'function_call';
13251320

1321+
/**
1322+
* The unique ID of the function tool call.
1323+
*/
1324+
id?: string;
1325+
13261326
/**
13271327
* The status of the item. One of `in_progress`, `completed`, or `incomplete`.
13281328
* Populated when items are returned via API.
@@ -2241,6 +2241,11 @@ export interface ResponseUsage {
22412241
*/
22422242
input_tokens: number;
22432243

2244+
/**
2245+
* A detailed breakdown of the input tokens.
2246+
*/
2247+
input_tokens_details: ResponseUsage.InputTokensDetails;
2248+
22442249
/**
22452250
* The number of output tokens.
22462251
*/
@@ -2258,6 +2263,17 @@ export interface ResponseUsage {
22582263
}
22592264

22602265
export namespace ResponseUsage {
2266+
/**
2267+
* A detailed breakdown of the input tokens.
2268+
*/
2269+
export interface InputTokensDetails {
2270+
/**
2271+
* The number of tokens that were retrieved from the cache.
2272+
* [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
2273+
*/
2274+
cached_tokens: number;
2275+
}
2276+
22612277
/**
22622278
* A detailed breakdown of the output tokens.
22632279
*/

src/resources/shared.ts

+2-2
Original file line numberDiff line numberDiff line change
@@ -171,10 +171,10 @@ export interface Reasoning {
171171
* supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
172172
* result in faster responses and fewer tokens used on reasoning in a response.
173173
*/
174-
effort: ReasoningEffort | null;
174+
effort?: ReasoningEffort | null;
175175

176176
/**
177-
* **o-series models only**
177+
* **computer_use_preview only**
178178
*
179179
* A summary of the reasoning performed by the model. This can be useful for
180180
* debugging and understanding the model's reasoning process. One of `concise` or

tests/api-resources/batches.test.ts

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ describe('resource batches', () => {
1212
test('create: only required params', async () => {
1313
const responsePromise = client.batches.create({
1414
completion_window: '24h',
15-
endpoint: '/v1/chat/completions',
15+
endpoint: '/v1/responses',
1616
input_file_id: 'input_file_id',
1717
});
1818
const rawResponse = await responsePromise.asResponse();
@@ -27,7 +27,7 @@ describe('resource batches', () => {
2727
test('create: required and optional params', async () => {
2828
const response = await client.batches.create({
2929
completion_window: '24h',
30-
endpoint: '/v1/chat/completions',
30+
endpoint: '/v1/responses',
3131
input_file_id: 'input_file_id',
3232
metadata: { foo: 'string' },
3333
});

0 commit comments

Comments
 (0)