From a6ace169716f9263e7dd6bc5d634a90bc0fe63e4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 20:32:32 +0000 Subject: [PATCH] chore(api): updates to supported Voice IDs --- .stats.yml | 4 ++-- src/openai/resources/audio/speech.py | 16 ++++++++----- .../resources/beta/realtime/sessions.py | 16 +++++++++---- src/openai/resources/responses/input_items.py | 13 +++++++++- src/openai/resources/responses/responses.py | 24 +++++++++---------- .../types/audio/speech_create_params.py | 11 ++++++--- .../types/beta/realtime/realtime_response.py | 9 +++++-- .../beta/realtime/response_create_event.py | 8 +++++-- .../realtime/response_create_event_param.py | 6 +++-- src/openai/types/beta/realtime/session.py | 6 ++++- .../beta/realtime/session_create_params.py | 6 +++-- .../beta/realtime/session_create_response.py | 6 ++++- .../beta/realtime/session_update_event.py | 8 +++++-- .../realtime/session_update_event_param.py | 6 +++-- .../transcription_session_create_params.py | 7 +++--- .../realtime/transcription_session_update.py | 7 +++--- .../transcription_session_update_param.py | 7 +++--- .../types/chat/chat_completion_audio_param.py | 7 +++++- .../types/responses/input_item_list_params.py | 9 +++++++ src/openai/types/responses/response.py | 4 ++-- .../types/responses/response_create_params.py | 4 ++-- ...response_format_text_json_schema_config.py | 14 +++++------ ...se_format_text_json_schema_config_param.py | 14 +++++------ tests/api_resources/audio/test_speech.py | 16 ++++++------- .../beta/realtime/test_sessions.py | 4 ++-- tests/api_resources/chat/test_completions.py | 8 +++---- .../responses/test_input_items.py | 2 ++ 27 files changed, 158 insertions(+), 84 deletions(-) diff --git a/.stats.yml b/.stats.yml index fe93204292..4d1276a5e6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml -openapi_spec_hash: 0c255269b89767eae26f4d4dc22d3cbd +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml +openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e config_hash: d36e491b0afc4f79e3afad4b3c9bec70 diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index 529e3a47ea..1ee53db9d5 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -53,7 +53,9 @@ def create( *, input: str, model: Union[str, SpeechModel], - voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"], + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + ], instructions: str | NotGiven = NOT_GIVEN, response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, @@ -75,8 +77,8 @@ def create( `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the - voices are available in the + `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + `verse`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). instructions: Control the voice of your generated audio with additional instructions. Does not @@ -142,7 +144,9 @@ async def create( *, input: str, model: Union[str, SpeechModel], - voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"], + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + ], instructions: str | NotGiven = NOT_GIVEN, response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, @@ -164,8 +168,8 @@ async def create( `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the - voices are available in the + `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + `verse`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). instructions: Control the voice of your generated audio with additional instructions. Does not diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index 5884e54de2..3e1c956fe4 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -65,7 +65,10 @@ def create( tool_choice: str | NotGiven = NOT_GIVEN, tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, - voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN, + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -147,7 +150,8 @@ def create( voice: The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are - `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + `shimmer`, and `verse`. extra_headers: Send extra headers @@ -227,7 +231,10 @@ async def create( tool_choice: str | NotGiven = NOT_GIVEN, tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, - voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN, + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -309,7 +316,8 @@ async def create( voice: The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are - `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + `shimmer`, and `verse`. extra_headers: Send extra headers diff --git a/src/openai/resources/responses/input_items.py b/src/openai/resources/responses/input_items.py index e341393cd1..ee0e628169 100644 --- a/src/openai/resources/responses/input_items.py +++ b/src/openai/resources/responses/input_items.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Any, cast +from typing import Any, List, cast from typing_extensions import Literal import httpx @@ -17,6 +17,7 @@ from ..._base_client import AsyncPaginator, make_request_options from ...types.responses import input_item_list_params from ...types.responses.response_item import ResponseItem +from ...types.responses.response_includable import ResponseIncludable __all__ = ["InputItems", "AsyncInputItems"] @@ -47,6 +48,7 @@ def list( *, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -64,6 +66,9 @@ def list( before: An item ID to list items before, used in pagination. + include: Additional fields to include in the response. See the `include` parameter for + Response creation above for more information. + limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -94,6 +99,7 @@ def list( { "after": after, "before": before, + "include": include, "limit": limit, "order": order, }, @@ -130,6 +136,7 @@ def list( *, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -147,6 +154,9 @@ def list( before: An item ID to list items before, used in pagination. + include: Additional fields to include in the response. See the `include` parameter for + Response creation above for more information. + limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -177,6 +187,7 @@ def list( { "after": after, "before": before, + "include": include, "limit": limit, "order": order, }, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index ab48509e6e..1e549b0f4e 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -138,8 +138,8 @@ def create( context. When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. + response will not be carried over to the next response. This makes it simple to + swap out system (or developer) messages in new responses. max_output_tokens: An upper bound for the number of tokens that can be generated for a response, including visible output tokens and @@ -310,8 +310,8 @@ def create( context. When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. + response will not be carried over to the next response. This makes it simple to + swap out system (or developer) messages in new responses. max_output_tokens: An upper bound for the number of tokens that can be generated for a response, including visible output tokens and @@ -475,8 +475,8 @@ def create( context. When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. + response will not be carried over to the next response. This makes it simple to + swap out system (or developer) messages in new responses. max_output_tokens: An upper bound for the number of tokens that can be generated for a response, including visible output tokens and @@ -795,8 +795,8 @@ async def create( context. When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. + response will not be carried over to the next response. This makes it simple to + swap out system (or developer) messages in new responses. max_output_tokens: An upper bound for the number of tokens that can be generated for a response, including visible output tokens and @@ -967,8 +967,8 @@ async def create( context. When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. + response will not be carried over to the next response. This makes it simple to + swap out system (or developer) messages in new responses. max_output_tokens: An upper bound for the number of tokens that can be generated for a response, including visible output tokens and @@ -1132,8 +1132,8 @@ async def create( context. When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. + response will not be carried over to the next response. This makes it simple to + swap out system (or developer) messages in new responses. max_output_tokens: An upper bound for the number of tokens that can be generated for a response, including visible output tokens and diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py index 958680710b..a4fc020532 100644 --- a/src/openai/types/audio/speech_create_params.py +++ b/src/openai/types/audio/speech_create_params.py @@ -20,11 +20,16 @@ class SpeechCreateParams(TypedDict, total=False): `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. """ - voice: Required[Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]] + voice: Required[ + Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + ] + ] """The voice to use when generating the audio. - Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`, - `sage` and `shimmer`. Previews of the voices are available in the + Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, + `nova`, `sage`, `shimmer`, and `verse`. Previews of the voices are available in + the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). """ diff --git a/src/openai/types/beta/realtime/realtime_response.py b/src/openai/types/beta/realtime/realtime_response.py index 4c3c83d666..8ecfb91c31 100644 --- a/src/openai/types/beta/realtime/realtime_response.py +++ b/src/openai/types/beta/realtime/realtime_response.py @@ -80,8 +80,13 @@ class RealtimeResponse(BaseModel): will become the input for later turns. """ - voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + voice: Union[ + str, + Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], + None, + ] = None """ The voice the model used to respond. Current voice options are `alloy`, `ash`, - `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + `verse`. """ diff --git a/src/openai/types/beta/realtime/response_create_event.py b/src/openai/types/beta/realtime/response_create_event.py index d6c5fda926..3b8a6de8df 100644 --- a/src/openai/types/beta/realtime/response_create_event.py +++ b/src/openai/types/beta/realtime/response_create_event.py @@ -101,12 +101,16 @@ class Response(BaseModel): tools: Optional[List[ResponseTool]] = None """Tools (functions) available to the model.""" - voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + voice: Union[ + str, + Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], + None, + ] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo` `sage`, `shimmer` and `verse`. + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/response_create_event_param.py b/src/openai/types/beta/realtime/response_create_event_param.py index c02fe1b34e..c569d507a0 100644 --- a/src/openai/types/beta/realtime/response_create_event_param.py +++ b/src/openai/types/beta/realtime/response_create_event_param.py @@ -102,12 +102,14 @@ class Response(TypedDict, total=False): tools: Iterable[ResponseTool] """Tools (functions) available to the model.""" - voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + ] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo` `sage`, `shimmer` and `verse`. + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py index 3ed53ff5f8..6acde57f09 100644 --- a/src/openai/types/beta/realtime/session.py +++ b/src/openai/types/beta/realtime/session.py @@ -218,7 +218,11 @@ class Session(BaseModel): natural conversations, but may have a higher latency. """ - voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + voice: Union[ + str, + Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], + None, + ] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index fe4a1c8636..eadee29b28 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -113,12 +113,14 @@ class SessionCreateParams(TypedDict, total=False): natural conversations, but may have a higher latency. """ - voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + ] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo` `sage`, `shimmer` and `verse`. + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session_create_response.py b/src/openai/types/beta/realtime/session_create_response.py index c26e62bef1..3cc8ca15ce 100644 --- a/src/openai/types/beta/realtime/session_create_response.py +++ b/src/openai/types/beta/realtime/session_create_response.py @@ -141,7 +141,11 @@ class SessionCreateResponse(BaseModel): speech. """ - voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + voice: Union[ + str, + Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], + None, + ] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index 00180f593d..ba34b0260b 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -222,12 +222,16 @@ class Session(BaseModel): natural conversations, but may have a higher latency. """ - voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + voice: Union[ + str, + Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], + None, + ] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo` `sage`, `shimmer` and `verse`. + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index b8bce8fbd0..0984d39e91 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -220,12 +220,14 @@ class Session(TypedDict, total=False): natural conversations, but may have a higher latency. """ - voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + ] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo` `sage`, `shimmer` and `verse`. + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/transcription_session_create_params.py b/src/openai/types/beta/realtime/transcription_session_create_params.py index 4066dc4c5d..1cf511f0b5 100644 --- a/src/openai/types/beta/realtime/transcription_session_create_params.py +++ b/src/openai/types/beta/realtime/transcription_session_create_params.py @@ -96,9 +96,10 @@ class InputAudioTranscription(TypedDict, total=False): class TurnDetection(TypedDict, total=False): create_response: bool - """ - Whether or not to automatically generate a response when a VAD stop event + """Whether or not to automatically generate a response when a VAD stop event occurs. + + Not available for transcription sessions. """ eagerness: Literal["low", "medium", "high", "auto"] @@ -113,7 +114,7 @@ class TurnDetection(TypedDict, total=False): """ Whether or not to automatically interrupt any ongoing response with output to the default conversation (i.e. `conversation` of `auto`) when a VAD start event - occurs. + occurs. Not available for transcription sessions. """ prefix_padding_ms: int diff --git a/src/openai/types/beta/realtime/transcription_session_update.py b/src/openai/types/beta/realtime/transcription_session_update.py index 043ac02e07..c3e8f011c8 100644 --- a/src/openai/types/beta/realtime/transcription_session_update.py +++ b/src/openai/types/beta/realtime/transcription_session_update.py @@ -50,9 +50,10 @@ class SessionInputAudioTranscription(BaseModel): class SessionTurnDetection(BaseModel): create_response: Optional[bool] = None - """ - Whether or not to automatically generate a response when a VAD stop event + """Whether or not to automatically generate a response when a VAD stop event occurs. + + Not available for transcription sessions. """ eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None @@ -67,7 +68,7 @@ class SessionTurnDetection(BaseModel): """ Whether or not to automatically interrupt any ongoing response with output to the default conversation (i.e. `conversation` of `auto`) when a VAD start event - occurs. + occurs. Not available for transcription sessions. """ prefix_padding_ms: Optional[int] = None diff --git a/src/openai/types/beta/realtime/transcription_session_update_param.py b/src/openai/types/beta/realtime/transcription_session_update_param.py index 997a36d77b..549c49011b 100644 --- a/src/openai/types/beta/realtime/transcription_session_update_param.py +++ b/src/openai/types/beta/realtime/transcription_session_update_param.py @@ -50,9 +50,10 @@ class SessionInputAudioTranscription(TypedDict, total=False): class SessionTurnDetection(TypedDict, total=False): create_response: bool - """ - Whether or not to automatically generate a response when a VAD stop event + """Whether or not to automatically generate a response when a VAD stop event occurs. + + Not available for transcription sessions. """ eagerness: Literal["low", "medium", "high", "auto"] @@ -67,7 +68,7 @@ class SessionTurnDetection(TypedDict, total=False): """ Whether or not to automatically interrupt any ongoing response with output to the default conversation (i.e. `conversation` of `auto`) when a VAD start event - occurs. + occurs. Not available for transcription sessions. """ prefix_padding_ms: int diff --git a/src/openai/types/chat/chat_completion_audio_param.py b/src/openai/types/chat/chat_completion_audio_param.py index 6321417826..b902f2667f 100644 --- a/src/openai/types/chat/chat_completion_audio_param.py +++ b/src/openai/types/chat/chat_completion_audio_param.py @@ -2,6 +2,7 @@ from __future__ import annotations +from typing import Union from typing_extensions import Literal, Required, TypedDict __all__ = ["ChatCompletionAudioParam"] @@ -14,7 +15,11 @@ class ChatCompletionAudioParam(TypedDict, total=False): Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. """ - voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] + voice: Required[ + Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + ] + ] """The voice the model uses to respond. Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, and diff --git a/src/openai/types/responses/input_item_list_params.py b/src/openai/types/responses/input_item_list_params.py index e0b71f1ac5..6555d26788 100644 --- a/src/openai/types/responses/input_item_list_params.py +++ b/src/openai/types/responses/input_item_list_params.py @@ -2,8 +2,11 @@ from __future__ import annotations +from typing import List from typing_extensions import Literal, TypedDict +from .response_includable import ResponseIncludable + __all__ = ["InputItemListParams"] @@ -14,6 +17,12 @@ class InputItemListParams(TypedDict, total=False): before: str """An item ID to list items before, used in pagination.""" + include: List[ResponseIncludable] + """Additional fields to include in the response. + + See the `include` parameter for Response creation above for more information. + """ + limit: int """A limit on the number of objects to be returned. diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 198ef8df74..92e5dbcb8b 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -47,8 +47,8 @@ class Response(BaseModel): context. When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. + response will not be carried over to the next response. This makes it simple to + swap out system (or developer) messages in new responses. """ metadata: Optional[Metadata] = None diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 651050c50d..ed82e678e5 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -64,8 +64,8 @@ class ResponseCreateParamsBase(TypedDict, total=False): context. When using along with `previous_response_id`, the instructions from a previous - response will be not be carried over to the next response. This makes it simple - to swap out system (or developer) messages in new responses. + response will not be carried over to the next response. This makes it simple to + swap out system (or developer) messages in new responses. """ max_output_tokens: Optional[int] diff --git a/src/openai/types/responses/response_format_text_json_schema_config.py b/src/openai/types/responses/response_format_text_json_schema_config.py index 3cf066370f..001fcf5bab 100644 --- a/src/openai/types/responses/response_format_text_json_schema_config.py +++ b/src/openai/types/responses/response_format_text_json_schema_config.py @@ -11,6 +11,13 @@ class ResponseFormatTextJSONSchemaConfig(BaseModel): + name: str + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The schema for the response format, described as a JSON Schema object. Learn how @@ -26,13 +33,6 @@ class ResponseFormatTextJSONSchemaConfig(BaseModel): how to respond in the format. """ - name: Optional[str] = None - """The name of the response format. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - strict: Optional[bool] = None """ Whether to enable strict schema adherence when generating the output. If set to diff --git a/src/openai/types/responses/response_format_text_json_schema_config_param.py b/src/openai/types/responses/response_format_text_json_schema_config_param.py index 211c5d1eff..f293a80c5a 100644 --- a/src/openai/types/responses/response_format_text_json_schema_config_param.py +++ b/src/openai/types/responses/response_format_text_json_schema_config_param.py @@ -9,6 +9,13 @@ class ResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False): + name: Required[str] + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + schema: Required[Dict[str, object]] """ The schema for the response format, described as a JSON Schema object. Learn how @@ -24,13 +31,6 @@ class ResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False): how to respond in the format. """ - name: str - """The name of the response format. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - strict: Optional[bool] """ Whether to enable strict schema adherence when generating the output. If set to diff --git a/tests/api_resources/audio/test_speech.py b/tests/api_resources/audio/test_speech.py index ff5f51596e..5c1480b72e 100644 --- a/tests/api_resources/audio/test_speech.py +++ b/tests/api_resources/audio/test_speech.py @@ -28,7 +28,7 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None: speech = client.audio.speech.create( input="input", model="string", - voice="alloy", + voice="ash", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) assert speech.json() == {"foo": "bar"} @@ -40,7 +40,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou speech = client.audio.speech.create( input="input", model="string", - voice="alloy", + voice="ash", instructions="instructions", response_format="mp3", speed=0.25, @@ -56,7 +56,7 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No response = client.audio.speech.with_raw_response.create( input="input", model="string", - voice="alloy", + voice="ash", ) assert response.is_closed is True @@ -71,7 +71,7 @@ def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter) with client.audio.speech.with_streaming_response.create( input="input", model="string", - voice="alloy", + voice="ash", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -92,7 +92,7 @@ async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRo speech = await async_client.audio.speech.create( input="input", model="string", - voice="alloy", + voice="ash", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) assert speech.json() == {"foo": "bar"} @@ -104,7 +104,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re speech = await async_client.audio.speech.create( input="input", model="string", - voice="alloy", + voice="ash", instructions="instructions", response_format="mp3", speed=0.25, @@ -120,7 +120,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock: response = await async_client.audio.speech.with_raw_response.create( input="input", model="string", - voice="alloy", + voice="ash", ) assert response.is_closed is True @@ -135,7 +135,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_ async with async_client.audio.speech.with_streaming_response.create( input="input", model="string", - voice="alloy", + voice="ash", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py index c0a426a417..f432b7d277 100644 --- a/tests/api_resources/beta/realtime/test_sessions.py +++ b/tests/api_resources/beta/realtime/test_sessions.py @@ -56,7 +56,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "threshold": 0, "type": "server_vad", }, - voice="alloy", + voice="ash", ) assert_matches_type(SessionCreateResponse, session, path=["response"]) @@ -123,7 +123,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "threshold": 0, "type": "server_vad", }, - voice="alloy", + voice="ash", ) assert_matches_type(SessionCreateResponse, session, path=["response"]) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 3c4a9e4a19..303a16a587 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -47,7 +47,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: model="gpt-4o", audio={ "format": "wav", - "voice": "alloy", + "voice": "ash", }, frequency_penalty=-2, function_call="none", @@ -174,7 +174,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: stream=True, audio={ "format": "wav", - "voice": "alloy", + "voice": "ash", }, frequency_penalty=-2, function_call="none", @@ -457,7 +457,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn model="gpt-4o", audio={ "format": "wav", - "voice": "alloy", + "voice": "ash", }, frequency_penalty=-2, function_call="none", @@ -584,7 +584,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn stream=True, audio={ "format": "wav", - "voice": "alloy", + "voice": "ash", }, frequency_penalty=-2, function_call="none", diff --git a/tests/api_resources/responses/test_input_items.py b/tests/api_resources/responses/test_input_items.py index 77a156b5ac..2528943c06 100644 --- a/tests/api_resources/responses/test_input_items.py +++ b/tests/api_resources/responses/test_input_items.py @@ -31,6 +31,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: response_id="response_id", after="after", before="before", + include=["file_search_call.results"], limit=0, order="asc", ) @@ -84,6 +85,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N response_id="response_id", after="after", before="before", + include=["file_search_call.results"], limit=0, order="asc", )