diff --git a/.stats.yml b/.stats.yml index 6cc7757636..ac652c9271 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b04761ffd2adad3cc19a6dc6fc696ac445878219972f891881a967340fa9a6b0.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4097c2f86beb3f3bb021775cd1dfa240e960caf842aeefc2e08da4dc0851ea79.yml diff --git a/api.md b/api.md index 31be4e06a7..cb78f55ca6 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,14 @@ # Shared Types ```python -from openai.types import ErrorObject, FunctionDefinition, FunctionParameters +from openai.types import ( + ErrorObject, + FunctionDefinition, + FunctionParameters, + ResponseFormatJSONObject, + ResponseFormatJSONSchema, + ResponseFormatText, +) ``` # Completions @@ -35,6 +42,7 @@ from openai.types.chat import ( ChatCompletionChunk, ChatCompletionContentPart, ChatCompletionContentPartImage, + ChatCompletionContentPartRefusal, ChatCompletionContentPartText, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, @@ -288,7 +296,6 @@ Types: ```python from openai.types.beta import ( - AssistantResponseFormat, AssistantResponseFormatOption, AssistantToolChoice, AssistantToolChoiceFunction, @@ -381,6 +388,8 @@ from openai.types.beta.threads import ( MessageDeleted, MessageDelta, MessageDeltaEvent, + RefusalContentBlock, + RefusalDeltaBlock, Text, TextContentBlock, TextContentBlockParam, diff --git a/src/openai/_client.py b/src/openai/_client.py index 8b404e234d..d3ee6cf0f1 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -151,7 +151,7 @@ def __init__( @property @override def qs(self) -> Querystring: - return Querystring(array_format="comma") + return Querystring(array_format="brackets") @property @override @@ -365,7 +365,7 @@ def __init__( @property @override def qs(self) -> Querystring: - return Querystring(array_format="comma") + return Querystring(array_format="brackets") @property @override diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index b4dc3cfdd6..441390d24b 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -88,6 +88,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -233,6 +238,11 @@ def update( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -453,6 +463,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -598,6 +613,11 @@ async def update( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 92e6c2e1c8..d84a7161aa 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -134,6 +134,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -264,6 +269,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -390,6 +400,11 @@ def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -924,6 +939,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1054,6 +1074,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1180,6 +1205,11 @@ async def create( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index daecb74b7c..6ec4a14a7e 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -314,6 +314,11 @@ def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -443,6 +448,11 @@ def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -568,6 +578,11 @@ def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -942,6 +957,11 @@ async def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1071,6 +1091,11 @@ async def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. @@ -1196,6 +1221,11 @@ async def create_and_run( [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 07a35f577b..3dcd3774d7 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -142,6 +142,8 @@ def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -338,6 +340,8 @@ def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -527,6 +531,8 @@ def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -791,6 +797,8 @@ async def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -987,6 +995,8 @@ async def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -1176,6 +1186,8 @@ async def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with + [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index 61bd3bfbe5..cbd3cbbfba 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -49,7 +49,7 @@ def with_streaming_response(self) -> JobsWithStreamingResponse: def create( self, *, - model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]], + model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]], training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, @@ -74,7 +74,7 @@ def create( Args: model: The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. @@ -104,7 +104,7 @@ def create( name. For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. validation_file: The ID of an uploaded file that contains validation data. @@ -329,7 +329,7 @@ def with_streaming_response(self) -> AsyncJobsWithStreamingResponse: async def create( self, *, - model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]], + model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]], training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, @@ -354,7 +354,7 @@ async def create( Args: model: The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. @@ -384,7 +384,7 @@ async def create( name. For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. validation_file: The ID of an uploaded file that contains validation data. diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 84916962cc..f621fb67c5 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -9,6 +9,9 @@ ErrorObject as ErrorObject, FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters, + ResponseFormatText as ResponseFormatText, + ResponseFormatJSONObject as ResponseFormatJSONObject, + ResponseFormatJSONSchema as ResponseFormatJSONSchema, ) from .upload import Upload as Upload from .embedding import Embedding as Embedding diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py index d851a3619c..9c5ddfdbe0 100644 --- a/src/openai/types/beta/__init__.py +++ b/src/openai/types/beta/__init__.py @@ -23,7 +23,6 @@ from .assistant_create_params import AssistantCreateParams as AssistantCreateParams from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams -from .assistant_response_format import AssistantResponseFormat as AssistantResponseFormat from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam @@ -31,7 +30,6 @@ from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction -from .assistant_response_format_param import AssistantResponseFormatParam as AssistantResponseFormatParam from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py index 4e5adc766e..c6a0a4cfcf 100644 --- a/src/openai/types/beta/assistant.py +++ b/src/openai/types/beta/assistant.py @@ -89,6 +89,11 @@ class Assistant(BaseModel): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index c10f7f57ad..84cd4425d1 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -60,6 +60,11 @@ class AssistantCreateParams(TypedDict, total=False): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/assistant_response_format.py b/src/openai/types/beta/assistant_response_format.py deleted file mode 100644 index f53bdaf62a..0000000000 --- a/src/openai/types/beta/assistant_response_format.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["AssistantResponseFormat"] - - -class AssistantResponseFormat(BaseModel): - type: Optional[Literal["text", "json_object"]] = None - """Must be one of `text` or `json_object`.""" diff --git a/src/openai/types/beta/assistant_response_format_option.py b/src/openai/types/beta/assistant_response_format_option.py index 6ce390f6d6..6f06a3442f 100644 --- a/src/openai/types/beta/assistant_response_format_option.py +++ b/src/openai/types/beta/assistant_response_format_option.py @@ -3,8 +3,12 @@ from typing import Union from typing_extensions import Literal, TypeAlias -from .assistant_response_format import AssistantResponseFormat +from ..shared.response_format_text import ResponseFormatText +from ..shared.response_format_json_object import ResponseFormatJSONObject +from ..shared.response_format_json_schema import ResponseFormatJSONSchema __all__ = ["AssistantResponseFormatOption"] -AssistantResponseFormatOption: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormat] +AssistantResponseFormatOption: TypeAlias = Union[ + Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema +] diff --git a/src/openai/types/beta/assistant_response_format_option_param.py b/src/openai/types/beta/assistant_response_format_option_param.py index 8100088723..680a060c3c 100644 --- a/src/openai/types/beta/assistant_response_format_option_param.py +++ b/src/openai/types/beta/assistant_response_format_option_param.py @@ -5,8 +5,13 @@ from typing import Union from typing_extensions import Literal, TypeAlias -from .assistant_response_format_param import AssistantResponseFormatParam +from ...types import shared_params __all__ = ["AssistantResponseFormatOptionParam"] -AssistantResponseFormatOptionParam: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormatParam] +AssistantResponseFormatOptionParam: TypeAlias = Union[ + Literal["auto"], + shared_params.ResponseFormatText, + shared_params.ResponseFormatJSONObject, + shared_params.ResponseFormatJSONSchema, +] diff --git a/src/openai/types/beta/assistant_response_format_param.py b/src/openai/types/beta/assistant_response_format_param.py deleted file mode 100644 index 96e1d02115..0000000000 --- a/src/openai/types/beta/assistant_response_format_param.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["AssistantResponseFormatParam"] - - -class AssistantResponseFormatParam(TypedDict, total=False): - type: Literal["text", "json_object"] - """Must be one of `text` or `json_object`.""" diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index b401e1a891..ade565819f 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -49,6 +49,11 @@ class AssistantUpdateParams(TypedDict, total=False): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/file_search_tool.py b/src/openai/types/beta/file_search_tool.py index e2711b9b3d..26ab1cb83f 100644 --- a/src/openai/types/beta/file_search_tool.py +++ b/src/openai/types/beta/file_search_tool.py @@ -12,8 +12,8 @@ class FileSearch(BaseModel): max_num_results: Optional[int] = None """The maximum number of results the file search tool should output. - The default is 20 for gpt-4\\** models and 5 for gpt-3.5-turbo. This number should - be between 1 and 50 inclusive. + The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number + should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the diff --git a/src/openai/types/beta/file_search_tool_param.py b/src/openai/types/beta/file_search_tool_param.py index 115f86a444..666719f8cd 100644 --- a/src/openai/types/beta/file_search_tool_param.py +++ b/src/openai/types/beta/file_search_tool_param.py @@ -11,8 +11,8 @@ class FileSearch(TypedDict, total=False): max_num_results: int """The maximum number of results the file search tool should output. - The default is 20 for gpt-4\\** models and 5 for gpt-3.5-turbo. This number should - be between 1 and 50 inclusive. + The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number + should be between 1 and 50 inclusive. Note that the file search tool may output fewer than `max_num_results` results. See the diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 62cff921e2..7490b25ef3 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -100,6 +100,11 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/threads/__init__.py b/src/openai/types/beta/threads/__init__.py index 023d76fc13..70853177bd 100644 --- a/src/openai/types/beta/threads/__init__.py +++ b/src/openai/types/beta/threads/__init__.py @@ -25,11 +25,13 @@ from .text_content_block import TextContentBlock as TextContentBlock from .message_delta_event import MessageDeltaEvent as MessageDeltaEvent from .message_list_params import MessageListParams as MessageListParams +from .refusal_delta_block import RefusalDeltaBlock as RefusalDeltaBlock from .file_path_annotation import FilePathAnnotation as FilePathAnnotation from .image_url_delta_block import ImageURLDeltaBlock as ImageURLDeltaBlock from .message_content_delta import MessageContentDelta as MessageContentDelta from .message_create_params import MessageCreateParams as MessageCreateParams from .message_update_params import MessageUpdateParams as MessageUpdateParams +from .refusal_content_block import RefusalContentBlock as RefusalContentBlock from .image_file_delta_block import ImageFileDeltaBlock as ImageFileDeltaBlock from .image_url_content_block import ImageURLContentBlock as ImageURLContentBlock from .file_citation_annotation import FileCitationAnnotation as FileCitationAnnotation diff --git a/src/openai/types/beta/threads/message_content.py b/src/openai/types/beta/threads/message_content.py index 7b718c3ca9..b313d35af6 100644 --- a/src/openai/types/beta/threads/message_content.py +++ b/src/openai/types/beta/threads/message_content.py @@ -5,11 +5,13 @@ from ...._utils import PropertyInfo from .text_content_block import TextContentBlock +from .refusal_content_block import RefusalContentBlock from .image_url_content_block import ImageURLContentBlock from .image_file_content_block import ImageFileContentBlock __all__ = ["MessageContent"] MessageContent: TypeAlias = Annotated[ - Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock], PropertyInfo(discriminator="type") + Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock, RefusalContentBlock], + PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/beta/threads/message_content_delta.py b/src/openai/types/beta/threads/message_content_delta.py index 667172c08f..b6e7dfa45a 100644 --- a/src/openai/types/beta/threads/message_content_delta.py +++ b/src/openai/types/beta/threads/message_content_delta.py @@ -5,11 +5,13 @@ from ...._utils import PropertyInfo from .text_delta_block import TextDeltaBlock +from .refusal_delta_block import RefusalDeltaBlock from .image_url_delta_block import ImageURLDeltaBlock from .image_file_delta_block import ImageFileDeltaBlock __all__ = ["MessageContentDelta"] MessageContentDelta: TypeAlias = Annotated[ - Union[ImageFileDeltaBlock, TextDeltaBlock, ImageURLDeltaBlock], PropertyInfo(discriminator="type") + Union[ImageFileDeltaBlock, TextDeltaBlock, RefusalDeltaBlock, ImageURLDeltaBlock], + PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/beta/threads/refusal_content_block.py b/src/openai/types/beta/threads/refusal_content_block.py new file mode 100644 index 0000000000..d54f948554 --- /dev/null +++ b/src/openai/types/beta/threads/refusal_content_block.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["RefusalContentBlock"] + + +class RefusalContentBlock(BaseModel): + refusal: str + + type: Literal["refusal"] + """Always `refusal`.""" diff --git a/src/openai/types/beta/threads/refusal_delta_block.py b/src/openai/types/beta/threads/refusal_delta_block.py new file mode 100644 index 0000000000..dbd8e62697 --- /dev/null +++ b/src/openai/types/beta/threads/refusal_delta_block.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["RefusalDeltaBlock"] + + +class RefusalDeltaBlock(BaseModel): + index: int + """The index of the refusal part in the message.""" + + type: Literal["refusal"] + """Always `refusal`.""" + + refusal: Optional[str] = None diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index 81d10d4a56..0579e229d8 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -171,6 +171,11 @@ class Run(BaseModel): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index e0c42fd23f..d3e6d9c476 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -97,6 +97,11 @@ class RunCreateParamsBase(TypedDict, total=False): [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which guarantees the model will match your supplied JSON schema. Learn + more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. diff --git a/src/openai/types/beta/vector_stores/vector_store_file.py b/src/openai/types/beta/vector_stores/vector_store_file.py index 4762de0ebd..65096e8dad 100644 --- a/src/openai/types/beta/vector_stores/vector_store_file.py +++ b/src/openai/types/beta/vector_stores/vector_store_file.py @@ -17,7 +17,7 @@ class LastError(BaseModel): - code: Literal["internal_error", "file_not_found", "parsing_error", "unhandled_mime_type"] + code: Literal["server_error", "unsupported_file", "invalid_file"] """One of `server_error` or `rate_limit_exceeded`.""" message: str diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index 0ba812ff9b..df3b48149c 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -37,6 +37,9 @@ from .chat_completion_tool_choice_option_param import ( ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam, ) +from .chat_completion_content_part_refusal_param import ( + ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam, +) from .chat_completion_function_call_option_param import ( ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam, ) diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index 5f4eaf3366..4b53e70890 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -15,6 +15,9 @@ class ChoiceLogprobs(BaseModel): content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" + refusal: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message refusal tokens with log probability information.""" + class Choice(BaseModel): finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"] diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 8f7357b96c..2429d41d33 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -2,12 +2,16 @@ from __future__ import annotations -from typing import Iterable, Optional -from typing_extensions import Literal, Required, TypedDict +from typing import Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam +from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam -__all__ = ["ChatCompletionAssistantMessageParam", "FunctionCall"] +__all__ = ["ChatCompletionAssistantMessageParam", "ContentArrayOfContentPart", "FunctionCall"] + +ContentArrayOfContentPart: TypeAlias = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam] class FunctionCall(TypedDict, total=False): @@ -27,7 +31,7 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" - content: Optional[str] + content: Union[str, Iterable[ContentArrayOfContentPart], None] """The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. @@ -47,5 +51,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): role. """ + refusal: Optional[str] + """The refusal message by the assistant.""" + tool_calls: Iterable[ChatCompletionMessageToolCallParam] """The tool calls generated by the model, such as function calls.""" diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index 65643c7e60..9ec6dc4bdb 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -67,6 +67,9 @@ class ChoiceDelta(BaseModel): model. """ + refusal: Optional[str] = None + """The refusal message generated by the model.""" + role: Optional[Literal["system", "user", "assistant", "tool"]] = None """The role of the author of this message.""" @@ -77,6 +80,9 @@ class ChoiceLogprobs(BaseModel): content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" + refusal: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message refusal tokens with log probability information.""" + class Choice(BaseModel): delta: ChoiceDelta diff --git a/src/openai/types/chat/chat_completion_content_part_refusal_param.py b/src/openai/types/chat/chat_completion_content_part_refusal_param.py new file mode 100644 index 0000000000..c18c7db770 --- /dev/null +++ b/src/openai/types/chat/chat_completion_content_part_refusal_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionContentPartRefusalParam"] + + +class ChatCompletionContentPartRefusalParam(TypedDict, total=False): + refusal: Required[str] + """The refusal message generated by the model.""" + + type: Required[Literal["refusal"]] + """The type of the content part.""" diff --git a/src/openai/types/chat/chat_completion_message.py b/src/openai/types/chat/chat_completion_message.py index 8db7d17d24..492bb68c85 100644 --- a/src/openai/types/chat/chat_completion_message.py +++ b/src/openai/types/chat/chat_completion_message.py @@ -26,6 +26,9 @@ class ChatCompletionMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" + refusal: Optional[str] = None + """The refusal message generated by the model.""" + role: Literal["assistant"] """The role of the author of this message.""" diff --git a/src/openai/types/chat/chat_completion_system_message_param.py b/src/openai/types/chat/chat_completion_system_message_param.py index 94bb3f636c..172ccea09e 100644 --- a/src/openai/types/chat/chat_completion_system_message_param.py +++ b/src/openai/types/chat/chat_completion_system_message_param.py @@ -2,13 +2,16 @@ from __future__ import annotations +from typing import Union, Iterable from typing_extensions import Literal, Required, TypedDict +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + __all__ = ["ChatCompletionSystemMessageParam"] class ChatCompletionSystemMessageParam(TypedDict, total=False): - content: Required[str] + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] """The contents of the system message.""" role: Required[Literal["system"]] diff --git a/src/openai/types/chat/chat_completion_tool_message_param.py b/src/openai/types/chat/chat_completion_tool_message_param.py index 5c590e033f..eb5e270e47 100644 --- a/src/openai/types/chat/chat_completion_tool_message_param.py +++ b/src/openai/types/chat/chat_completion_tool_message_param.py @@ -2,13 +2,16 @@ from __future__ import annotations +from typing import Union, Iterable from typing_extensions import Literal, Required, TypedDict +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + __all__ = ["ChatCompletionToolMessageParam"] class ChatCompletionToolMessageParam(TypedDict, total=False): - content: Required[str] + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] """The contents of the tool message.""" role: Required[Literal["tool"]] diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 9e81881b9e..bf648a3858 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -121,7 +121,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): response_format: ResponseFormat """An object specifying the format that the model must output. - Compatible with + Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. @@ -250,9 +251,9 @@ class Function(TypedDict, total=False): """ -class ResponseFormat(TypedDict, total=False): - type: Literal["text", "json_object"] - """Must be one of `text` or `json_object`.""" +ResponseFormat: TypeAlias = Union[ + shared_params.ResponseFormatText, shared_params.ResponseFormatJSONObject, shared_params.ResponseFormatJSONSchema +] class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase): diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index edb7b732bf..686f26b783 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -6,6 +6,7 @@ ChatModel: TypeAlias = Literal[ "gpt-4o", + "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py index c5196e4406..e9be2ef1ca 100644 --- a/src/openai/types/fine_tuning/job_create_params.py +++ b/src/openai/types/fine_tuning/job_create_params.py @@ -9,11 +9,11 @@ class JobCreateParams(TypedDict, total=False): - model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]]] + model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]]] """The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). """ training_file: Required[str] @@ -54,7 +54,7 @@ class JobCreateParams(TypedDict, total=False): name. For example, a `suffix` of "custom-model-name" would produce a model name like - `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. """ validation_file: Optional[str] diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py index e085744e29..c8776bca0e 100644 --- a/src/openai/types/shared/__init__.py +++ b/src/openai/types/shared/__init__.py @@ -3,3 +3,6 @@ from .error_object import ErrorObject as ErrorObject from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters +from .response_format_text import ResponseFormatText as ResponseFormatText +from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject +from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema diff --git a/src/openai/types/shared/function_definition.py b/src/openai/types/shared/function_definition.py index 49f5e67c50..06baa23170 100644 --- a/src/openai/types/shared/function_definition.py +++ b/src/openai/types/shared/function_definition.py @@ -32,3 +32,12 @@ class FunctionDefinition(BaseModel): Omitting `parameters` defines a function with an empty parameter list. """ + + strict: Optional[bool] = None + """Whether to enable strict schema adherence when generating the function call. + + If set to true, the model will follow the exact schema defined in the + `parameters` field. Only a subset of JSON Schema is supported when `strict` is + `true`. Learn more about Structured Outputs in the + [function calling guide](docs/guides/function-calling). + """ diff --git a/src/openai/types/shared/response_format_json_object.py b/src/openai/types/shared/response_format_json_object.py new file mode 100644 index 0000000000..107728dd2e --- /dev/null +++ b/src/openai/types/shared/response_format_json_object.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatJSONObject"] + + +class ResponseFormatJSONObject(BaseModel): + type: Literal["json_object"] + """The type of response format being defined: `json_object`""" diff --git a/src/openai/types/shared/response_format_json_schema.py b/src/openai/types/shared/response_format_json_schema.py new file mode 100644 index 0000000000..3194a4fe91 --- /dev/null +++ b/src/openai/types/shared/response_format_json_schema.py @@ -0,0 +1,44 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from ..._models import BaseModel + +__all__ = ["ResponseFormatJSONSchema", "JSONSchema"] + + +class JSONSchema(BaseModel): + name: str + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: Optional[str] = None + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None) + """The schema for the response format, described as a JSON Schema object.""" + + strict: Optional[bool] = None + """Whether to enable strict schema adherence when generating the output. + + If set to true, the model will always follow the exact schema defined in the + `schema` field. Only a subset of JSON Schema is supported when `strict` is + `true`. To learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + """ + + +class ResponseFormatJSONSchema(BaseModel): + json_schema: JSONSchema + + type: Literal["json_schema"] + """The type of response format being defined: `json_schema`""" diff --git a/src/openai/types/shared/response_format_text.py b/src/openai/types/shared/response_format_text.py new file mode 100644 index 0000000000..6721fe0973 --- /dev/null +++ b/src/openai/types/shared/response_format_text.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatText"] + + +class ResponseFormatText(BaseModel): + type: Literal["text"] + """The type of response format being defined: `text`""" diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py index ef638cb279..ab4057d59f 100644 --- a/src/openai/types/shared_params/__init__.py +++ b/src/openai/types/shared_params/__init__.py @@ -2,3 +2,6 @@ from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters +from .response_format_text import ResponseFormatText as ResponseFormatText +from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject +from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema diff --git a/src/openai/types/shared_params/function_definition.py b/src/openai/types/shared_params/function_definition.py index 29ccc548d4..f41392f154 100644 --- a/src/openai/types/shared_params/function_definition.py +++ b/src/openai/types/shared_params/function_definition.py @@ -2,6 +2,7 @@ from __future__ import annotations +from typing import Optional from typing_extensions import Required, TypedDict from ...types import shared_params @@ -33,3 +34,12 @@ class FunctionDefinition(TypedDict, total=False): Omitting `parameters` defines a function with an empty parameter list. """ + + strict: Optional[bool] + """Whether to enable strict schema adherence when generating the function call. + + If set to true, the model will follow the exact schema defined in the + `parameters` field. Only a subset of JSON Schema is supported when `strict` is + `true`. Learn more about Structured Outputs in the + [function calling guide](docs/guides/function-calling). + """ diff --git a/src/openai/types/shared_params/response_format_json_object.py b/src/openai/types/shared_params/response_format_json_object.py new file mode 100644 index 0000000000..8419c6cb56 --- /dev/null +++ b/src/openai/types/shared_params/response_format_json_object.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatJSONObject"] + + +class ResponseFormatJSONObject(TypedDict, total=False): + type: Required[Literal["json_object"]] + """The type of response format being defined: `json_object`""" diff --git a/src/openai/types/shared_params/response_format_json_schema.py b/src/openai/types/shared_params/response_format_json_schema.py new file mode 100644 index 0000000000..4b60fae8ee --- /dev/null +++ b/src/openai/types/shared_params/response_format_json_schema.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatJSONSchema", "JSONSchema"] + + +class JSONSchema(TypedDict, total=False): + name: Required[str] + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + schema: Dict[str, object] + """The schema for the response format, described as a JSON Schema object.""" + + strict: Optional[bool] + """Whether to enable strict schema adherence when generating the output. + + If set to true, the model will always follow the exact schema defined in the + `schema` field. Only a subset of JSON Schema is supported when `strict` is + `true`. To learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + """ + + +class ResponseFormatJSONSchema(TypedDict, total=False): + json_schema: Required[JSONSchema] + + type: Required[Literal["json_schema"]] + """The type of response format being defined: `json_schema`""" diff --git a/src/openai/types/shared_params/response_format_text.py b/src/openai/types/shared_params/response_format_text.py new file mode 100644 index 0000000000..5bec7fc503 --- /dev/null +++ b/src/openai/types/shared_params/response_format_text.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatText"] + + +class ResponseFormatText(TypedDict, total=False): + type: Required[Literal["text"]] + """The type of response format being defined: `text`""" diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index 14f279bbb5..fbd5ff0597 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -24,19 +24,19 @@ class TestAssistants: @parametrize def test_method_create(self, client: OpenAI) -> None: assistant = client.beta.assistants.create( - model="gpt-4-turbo", + model="gpt-4o", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: assistant = client.beta.assistants.create( - model="gpt-4-turbo", + model="gpt-4o", description="description", instructions="instructions", metadata={}, name="name", - response_format="none", + response_format="auto", temperature=1, tool_resources={ "code_interpreter": {"file_ids": ["string", "string", "string"]}, @@ -59,7 +59,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.assistants.with_raw_response.create( - model="gpt-4-turbo", + model="gpt-4o", ) assert response.is_closed is True @@ -70,7 +70,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.beta.assistants.with_streaming_response.create( - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -134,7 +134,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: metadata={}, model="model", name="name", - response_format="none", + response_format="auto", temperature=1, tool_resources={ "code_interpreter": {"file_ids": ["string", "string", "string"]}, @@ -256,19 +256,19 @@ class TestAsyncAssistants: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.create( - model="gpt-4-turbo", + model="gpt-4o", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.create( - model="gpt-4-turbo", + model="gpt-4o", description="description", instructions="instructions", metadata={}, name="name", - response_format="none", + response_format="auto", temperature=1, tool_resources={ "code_interpreter": {"file_ids": ["string", "string", "string"]}, @@ -291,7 +291,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.with_raw_response.create( - model="gpt-4-turbo", + model="gpt-4o", ) assert response.is_closed is True @@ -302,7 +302,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.with_streaming_response.create( - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -366,7 +366,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> metadata={}, model="model", name="name", - response_format="none", + response_format="auto", temperature=1, tool_resources={ "code_interpreter": {"file_ids": ["string", "string", "string"]}, diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index d45a1a18d1..36ce75b8e7 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -302,9 +302,9 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", stream=False, temperature=1, thread={ @@ -473,9 +473,9 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", temperature=1, thread={ "messages": [ @@ -912,9 +912,9 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", stream=False, temperature=1, thread={ @@ -1083,9 +1083,9 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", temperature=1, thread={ "messages": [ diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index ff242126b2..548c14f45d 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -133,9 +133,9 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", stream=False, temperature=1, tool_choice="none", @@ -297,9 +297,9 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", temperature=1, tool_choice="none", tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], @@ -799,9 +799,9 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", stream=False, temperature=1, tool_choice="none", @@ -963,9 +963,9 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn max_completion_tokens=256, max_prompt_tokens=256, metadata={}, - model="gpt-4-turbo", + model="gpt-4o", parallel_tool_calls=True, - response_format="none", + response_format="auto", temperature=1, tool_choice="none", tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index ca5cada7f3..f31fd06dd9 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -24,11 +24,11 @@ def test_method_create_overload_1(self, client: OpenAI) -> None: completion = client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert_matches_type(ChatCompletion, completion, path=["response"]) @@ -37,12 +37,12 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: completion = client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", "name": "name", } ], - model="gpt-4-turbo", + model="gpt-4o", frequency_penalty=-2, function_call="none", functions=[ @@ -58,7 +58,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: n=1, parallel_tool_calls=True, presence_penalty=-2, - response_format={"type": "json_object"}, + response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", stop="string", @@ -73,6 +73,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -81,6 +82,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -89,6 +91,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, ], @@ -103,11 +106,11 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert response.is_closed is True @@ -120,11 +123,11 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -139,11 +142,11 @@ def test_method_create_overload_2(self, client: OpenAI) -> None: completion_stream = client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) completion_stream.response.close() @@ -153,12 +156,12 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: completion_stream = client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", "name": "name", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, frequency_penalty=-2, function_call="none", @@ -175,7 +178,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: n=1, parallel_tool_calls=True, presence_penalty=-2, - response_format={"type": "json_object"}, + response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", stop="string", @@ -189,6 +192,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -197,6 +201,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -205,6 +210,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, ], @@ -219,11 +225,11 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) @@ -236,11 +242,11 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) as response: assert not response.is_closed @@ -260,11 +266,11 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None completion = await async_client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert_matches_type(ChatCompletion, completion, path=["response"]) @@ -273,12 +279,12 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn completion = await async_client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", "name": "name", } ], - model="gpt-4-turbo", + model="gpt-4o", frequency_penalty=-2, function_call="none", functions=[ @@ -294,7 +300,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn n=1, parallel_tool_calls=True, presence_penalty=-2, - response_format={"type": "json_object"}, + response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", stop="string", @@ -309,6 +315,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -317,6 +324,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -325,6 +333,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, ], @@ -339,11 +348,11 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) - response = await async_client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert response.is_closed is True @@ -356,11 +365,11 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe async with async_client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -375,11 +384,11 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None completion_stream = await async_client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) await completion_stream.response.aclose() @@ -389,12 +398,12 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn completion_stream = await async_client.chat.completions.create( messages=[ { - "content": "content", + "content": "string", "role": "system", "name": "name", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, frequency_penalty=-2, function_call="none", @@ -411,7 +420,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn n=1, parallel_tool_calls=True, presence_penalty=-2, - response_format={"type": "json_object"}, + response_format={"type": "text"}, seed=-9007199254740991, service_tier="auto", stop="string", @@ -425,6 +434,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -433,6 +443,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, { @@ -441,6 +452,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "description": "description", "name": "name", "parameters": {"foo": "bar"}, + "strict": True, }, }, ], @@ -455,11 +467,11 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) - response = await async_client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) @@ -472,11 +484,11 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncOpe async with async_client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", stream=True, ) as response: assert not response.is_closed diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py index 3353547ad7..e19b22b0b1 100644 --- a/tests/api_resources/fine_tuning/test_jobs.py +++ b/tests/api_resources/fine_tuning/test_jobs.py @@ -24,7 +24,7 @@ class TestJobs: @parametrize def test_method_create(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) assert_matches_type(FineTuningJob, job, path=["response"]) @@ -32,7 +32,7 @@ def test_method_create(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", hyperparameters={ "batch_size": "auto", @@ -77,7 +77,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.fine_tuning.jobs.with_raw_response.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) @@ -89,7 +89,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.fine_tuning.jobs.with_streaming_response.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) as response: assert not response.is_closed @@ -263,7 +263,7 @@ class TestAsyncJobs: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) assert_matches_type(FineTuningJob, job, path=["response"]) @@ -271,7 +271,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", hyperparameters={ "batch_size": "auto", @@ -316,7 +316,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.fine_tuning.jobs.with_raw_response.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) @@ -328,7 +328,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.fine_tuning.jobs.with_streaming_response.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", training_file="file-abc123", ) as response: assert not response.is_closed diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 71f8e5834b..8791507c3e 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -21,14 +21,14 @@ class TestModels: @parametrize def test_method_retrieve(self, client: OpenAI) -> None: model = client.models.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) assert_matches_type(Model, model, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.models.with_raw_response.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) assert response.is_closed is True @@ -39,7 +39,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.models.with_streaming_response.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -84,14 +84,14 @@ def test_streaming_response_list(self, client: OpenAI) -> None: @parametrize def test_method_delete(self, client: OpenAI) -> None: model = client.models.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) assert_matches_type(ModelDeleted, model, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.models.with_raw_response.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) assert response.is_closed is True @@ -102,7 +102,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None: @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.models.with_streaming_response.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -126,14 +126,14 @@ class TestAsyncModels: @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: model = await async_client.models.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) assert_matches_type(Model, model, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.models.with_raw_response.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) assert response.is_closed is True @@ -144,7 +144,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.models.with_streaming_response.retrieve( - "gpt-3.5-turbo", + "gpt-4o-mini", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -189,14 +189,14 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: model = await async_client.models.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) assert_matches_type(ModelDeleted, model, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.models.with_raw_response.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) assert response.is_closed is True @@ -207,7 +207,7 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.models.with_streaming_response.delete( - "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "ft:gpt-4o-mini:acemeco:suffix:abc123", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/test_client.py b/tests/test_client.py index 2402ffa82f..054ae0ff4e 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -780,11 +780,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: response = client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert response.retries_taken == failures_before_success @@ -811,11 +811,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: with client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert response.retries_taken == failures_before_success @@ -1574,11 +1574,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: response = await client.chat.completions.with_raw_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) assert response.retries_taken == failures_before_success @@ -1606,10 +1606,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: async with client.chat.completions.with_streaming_response.create( messages=[ { - "content": "content", + "content": "string", "role": "system", } ], - model="gpt-4-turbo", + model="gpt-4o", ) as response: assert response.retries_taken == failures_before_success