diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 89bd0ec2f7..bfaab56f68 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.23.6" + ".": "1.24.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a25cd549b7..da1a4c27a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 1.24.0 (2024-04-29) + +Full Changelog: [v1.23.6...v1.24.0](https://github.com/openai/openai-python/compare/v1.23.6...v1.24.0) + +### Features + +* **api:** add required tool_choice ([#1382](https://github.com/openai/openai-python/issues/1382)) ([c558f65](https://github.com/openai/openai-python/commit/c558f651df39f61425cd4109318f78ed94cbf163)) + + +### Chores + +* **client:** log response headers in debug mode ([#1383](https://github.com/openai/openai-python/issues/1383)) ([f31a426](https://github.com/openai/openai-python/commit/f31a4261adc4ebd92582cee264e41eb6a6dafc57)) +* **internal:** minor reformatting ([#1377](https://github.com/openai/openai-python/issues/1377)) ([7003dbb](https://github.com/openai/openai-python/commit/7003dbb863b6e16381070b8b86ac24aa070a3799)) +* **internal:** reformat imports ([#1375](https://github.com/openai/openai-python/issues/1375)) ([2ad0c3b](https://github.com/openai/openai-python/commit/2ad0c3b8e0b746ed20db3c84a9c6a369aa10bf5d)) + ## 1.23.6 (2024-04-25) Full Changelog: [v1.23.5...v1.23.6](https://github.com/openai/openai-python/compare/v1.23.5...v1.23.6) diff --git a/pyproject.toml b/pyproject.toml index d82615a4ad..d1006b3b70 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.23.6" +version = "1.24.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index cd8361607e..5d5d25fca9 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -946,6 +946,8 @@ def _request( if self.custom_auth is not None: kwargs["auth"] = self.custom_auth + log.debug("Sending HTTP Request: %s %s", request.method, request.url) + try: response = self._client.send( request, @@ -984,8 +986,14 @@ def _request( raise APIConnectionError(request=request) from err log.debug( - 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase + 'HTTP Response: %s %s "%i %s" %s', + request.method, + request.url, + response.status_code, + response.reason_phrase, + response.headers, ) + log.debug("request_id: %s", response.headers.get("x-request-id")) try: response.raise_for_status() diff --git a/src/openai/_version.py b/src/openai/_version.py index 7ca1a0a78b..5b3383adc3 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.23.6" # x-release-please-version +__version__ = "1.24.0" # x-release-please-version diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index e572a14a19..4268d41390 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -187,8 +187,9 @@ def create( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. @@ -330,8 +331,9 @@ def create( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. @@ -473,8 +475,9 @@ def create( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. @@ -1716,8 +1719,9 @@ async def create( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. @@ -1859,8 +1863,9 @@ async def create( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. @@ -2002,8 +2007,9 @@ async def create( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 1c516bcea6..2455272658 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -365,8 +365,9 @@ def create_and_run( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. @@ -507,8 +508,9 @@ def create_and_run( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. @@ -649,8 +651,9 @@ def create_and_run( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. @@ -1336,8 +1339,9 @@ async def create_and_run( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. @@ -1478,8 +1482,9 @@ async def create_and_run( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. @@ -1620,8 +1625,9 @@ async def create_and_run( tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 2a6a0e7738..5104cd6136 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -171,15 +171,15 @@ def create( We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that function. + call that tool. - `none` is the default when no functions are present. `auto` is the default if - functions are present. + `none` is the default when no tools are present. `auto` is the default if tools + are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs @@ -339,15 +339,15 @@ def create( We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that function. + call that tool. - `none` is the default when no functions are present. `auto` is the default if - functions are present. + `none` is the default when no tools are present. `auto` is the default if tools + are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs @@ -507,15 +507,15 @@ def create( We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that function. + call that tool. - `none` is the default when no functions are present. `auto` is the default if - functions are present. + `none` is the default when no tools are present. `auto` is the default if tools + are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs @@ -751,15 +751,15 @@ async def create( We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that function. + call that tool. - `none` is the default when no functions are present. `auto` is the default if - functions are present. + `none` is the default when no tools are present. `auto` is the default if tools + are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs @@ -919,15 +919,15 @@ async def create( We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that function. + call that tool. - `none` is the default when no functions are present. `auto` is the default if - functions are present. + `none` is the default when no tools are present. `auto` is the default if tools + are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs @@ -1087,15 +1087,15 @@ async def create( We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that function. + call that tool. - `none` is the default when no functions are present. `auto` is the default if - functions are present. + `none` is the default when no tools are present. `auto` is the default if tools + are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs diff --git a/src/openai/types/audio/transcription.py b/src/openai/types/audio/transcription.py index fa512e27f9..0b6ab39e78 100644 --- a/src/openai/types/audio/transcription.py +++ b/src/openai/types/audio/transcription.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + from ..._models import BaseModel __all__ = ["Transcription"] diff --git a/src/openai/types/audio/translation.py b/src/openai/types/audio/translation.py index efc56f7f9b..3d9ede2939 100644 --- a/src/openai/types/audio/translation.py +++ b/src/openai/types/audio/translation.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + from ..._models import BaseModel __all__ = ["Translation"] diff --git a/src/openai/types/batch_request_counts.py b/src/openai/types/batch_request_counts.py index 068b071af1..ef6c84a0a1 100644 --- a/src/openai/types/batch_request_counts.py +++ b/src/openai/types/batch_request_counts.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + from .._models import BaseModel __all__ = ["BatchRequestCounts"] diff --git a/src/openai/types/beta/assistant_tool_choice_function.py b/src/openai/types/beta/assistant_tool_choice_function.py index 87f38310ca..d0d4255357 100644 --- a/src/openai/types/beta/assistant_tool_choice_function.py +++ b/src/openai/types/beta/assistant_tool_choice_function.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + from ..._models import BaseModel __all__ = ["AssistantToolChoiceFunction"] diff --git a/src/openai/types/beta/assistant_tool_choice_option.py b/src/openai/types/beta/assistant_tool_choice_option.py index 0045a5986e..8958bc8fb0 100644 --- a/src/openai/types/beta/assistant_tool_choice_option.py +++ b/src/openai/types/beta/assistant_tool_choice_option.py @@ -7,4 +7,4 @@ __all__ = ["AssistantToolChoiceOption"] -AssistantToolChoiceOption = Union[Literal["none", "auto"], AssistantToolChoice] +AssistantToolChoiceOption = Union[Literal["none", "auto", "required"], AssistantToolChoice] diff --git a/src/openai/types/beta/assistant_tool_choice_option_param.py b/src/openai/types/beta/assistant_tool_choice_option_param.py index 618e7bff98..81b7f15136 100644 --- a/src/openai/types/beta/assistant_tool_choice_option_param.py +++ b/src/openai/types/beta/assistant_tool_choice_option_param.py @@ -9,4 +9,4 @@ __all__ = ["AssistantToolChoiceOptionParam"] -AssistantToolChoiceOptionParam = Union[Literal["none", "auto"], AssistantToolChoiceParam] +AssistantToolChoiceOptionParam = Union[Literal["none", "auto", "required"], AssistantToolChoiceParam] diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 9adb049843..60510965a2 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -135,8 +135,9 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): """ Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. """ diff --git a/src/openai/types/beta/threads/image_file.py b/src/openai/types/beta/threads/image_file.py index db0d6e823a..651a247d21 100644 --- a/src/openai/types/beta/threads/image_file.py +++ b/src/openai/types/beta/threads/image_file.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + from ...._models import BaseModel __all__ = ["ImageFile"] diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index 4fd5103348..6c118f27c1 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -196,8 +196,9 @@ class Run(BaseModel): """ Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. """ diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index f4780b7f09..2e4823bacd 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -134,8 +134,9 @@ class RunCreateParamsBase(TypedDict, total=False): """ Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value - and means the model can pick between generating a message or calling a tool. - Specifying a particular tool like `{"type": "file_search"}` or + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. """ diff --git a/src/openai/types/chat/chat_completion_tool_choice_option_param.py b/src/openai/types/chat/chat_completion_tool_choice_option_param.py index 9c0ae22528..1d3c2506ab 100644 --- a/src/openai/types/chat/chat_completion_tool_choice_option_param.py +++ b/src/openai/types/chat/chat_completion_tool_choice_option_param.py @@ -9,4 +9,4 @@ __all__ = ["ChatCompletionToolChoiceOptionParam"] -ChatCompletionToolChoiceOptionParam = Union[Literal["none", "auto"], ChatCompletionNamedToolChoiceParam] +ChatCompletionToolChoiceOptionParam = Union[Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam] diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 964b246c41..d30da60b16 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -152,15 +152,15 @@ class CompletionCreateParamsBase(TypedDict, total=False): tool_choice: ChatCompletionToolChoiceOptionParam """ - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via + Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that function. + call that tool. - `none` is the default when no functions are present. `auto` is the default if - functions are present. + `none` is the default when no tools are present. `auto` is the default if tools + are present. """ tools: Iterable[ChatCompletionToolParam] diff --git a/src/openai/types/completion_usage.py b/src/openai/types/completion_usage.py index e185a5cc38..0d57b96595 100644 --- a/src/openai/types/completion_usage.py +++ b/src/openai/types/completion_usage.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + from .._models import BaseModel __all__ = ["CompletionUsage"] diff --git a/src/openai/types/model_deleted.py b/src/openai/types/model_deleted.py index e7601f74e4..d9a48bb1b5 100644 --- a/src/openai/types/model_deleted.py +++ b/src/openai/types/model_deleted.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + from .._models import BaseModel __all__ = ["ModelDeleted"] diff --git a/src/openai/types/moderation.py b/src/openai/types/moderation.py index 2a2e5c5d7a..5aa691823a 100644 --- a/src/openai/types/moderation.py +++ b/src/openai/types/moderation.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from pydantic import Field as FieldInfo from .._models import BaseModel diff --git a/tests/api_resources/audio/test_transcriptions.py b/tests/api_resources/audio/test_transcriptions.py index 0c59cea09f..ba8e9e4099 100644 --- a/tests/api_resources/audio/test_transcriptions.py +++ b/tests/api_resources/audio/test_transcriptions.py @@ -9,7 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.audio.transcription import Transcription +from openai.types.audio import Transcription base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/audio/test_translations.py b/tests/api_resources/audio/test_translations.py index 5463fcff63..f5c6c68f0b 100644 --- a/tests/api_resources/audio/test_translations.py +++ b/tests/api_resources/audio/test_translations.py @@ -9,7 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.audio.translation import Translation +from openai.types.audio import Translation base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index 428fe41e93..a92acb2ca5 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -10,8 +10,10 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta.assistant import Assistant -from openai.types.beta.assistant_deleted import AssistantDeleted +from openai.types.beta import ( + Assistant, + AssistantDeleted, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index 29a0a8d91c..715e3e8726 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -9,9 +9,11 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.beta.thread import Thread -from openai.types.beta.threads.run import Run -from openai.types.beta.thread_deleted import ThreadDeleted +from openai.types.beta import ( + Thread, + ThreadDeleted, +) +from openai.types.beta.threads import Run base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/beta/test_vector_stores.py b/tests/api_resources/beta/test_vector_stores.py index 742b5c0ed4..e671c96a45 100644 --- a/tests/api_resources/beta/test_vector_stores.py +++ b/tests/api_resources/beta/test_vector_stores.py @@ -10,8 +10,10 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta.vector_store import VectorStore -from openai.types.beta.vector_store_deleted import VectorStoreDeleted +from openai.types.beta import ( + VectorStore, + VectorStoreDeleted, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/beta/threads/runs/test_steps.py b/tests/api_resources/beta/threads/runs/test_steps.py index 3b40b36e37..e6108d8dad 100644 --- a/tests/api_resources/beta/threads/runs/test_steps.py +++ b/tests/api_resources/beta/threads/runs/test_steps.py @@ -10,7 +10,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta.threads.runs.run_step import RunStep +from openai.types.beta.threads.runs import RunStep base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py index f06db4fce5..26eb09acdd 100644 --- a/tests/api_resources/beta/threads/test_messages.py +++ b/tests/api_resources/beta/threads/test_messages.py @@ -10,7 +10,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta.threads.message import Message +from openai.types.beta.threads import Message base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index f8dbdbf449..429c9bdeeb 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -10,7 +10,9 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta.threads.run import Run +from openai.types.beta.threads import ( + Run, +) # pyright: reportDeprecated=false diff --git a/tests/api_resources/beta/vector_stores/test_file_batches.py b/tests/api_resources/beta/vector_stores/test_file_batches.py index 7e9b2e85de..9854d1a138 100644 --- a/tests/api_resources/beta/vector_stores/test_file_batches.py +++ b/tests/api_resources/beta/vector_stores/test_file_batches.py @@ -10,8 +10,10 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta.vector_stores.vector_store_file import VectorStoreFile -from openai.types.beta.vector_stores.vector_store_file_batch import VectorStoreFileBatch +from openai.types.beta.vector_stores import ( + VectorStoreFile, + VectorStoreFileBatch, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/beta/vector_stores/test_files.py b/tests/api_resources/beta/vector_stores/test_files.py index 09f5c259bd..58301e2d37 100644 --- a/tests/api_resources/beta/vector_stores/test_files.py +++ b/tests/api_resources/beta/vector_stores/test_files.py @@ -10,8 +10,10 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.beta.vector_stores.vector_store_file import VectorStoreFile -from openai.types.beta.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted +from openai.types.beta.vector_stores import ( + VectorStoreFile, + VectorStoreFileDeleted, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index ddba1ca085..c54b56a37d 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -9,7 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.chat.chat_completion import ChatCompletion +from openai.types.chat import ChatCompletion base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py index 6ebf2225ae..915d5c6f63 100644 --- a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py +++ b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py @@ -10,7 +10,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.fine_tuning.jobs.fine_tuning_job_checkpoint import FineTuningJobCheckpoint +from openai.types.fine_tuning.jobs import FineTuningJobCheckpoint base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py index 29a96feb2d..1ff6d63b31 100644 --- a/tests/api_resources/fine_tuning/test_jobs.py +++ b/tests/api_resources/fine_tuning/test_jobs.py @@ -10,8 +10,10 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.fine_tuning.fine_tuning_job import FineTuningJob -from openai.types.fine_tuning.fine_tuning_job_event import FineTuningJobEvent +from openai.types.fine_tuning import ( + FineTuningJob, + FineTuningJobEvent, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py index 7967634128..6f9b598e61 100644 --- a/tests/api_resources/test_batches.py +++ b/tests/api_resources/test_batches.py @@ -9,8 +9,8 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type +from openai.types import Batch from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.batch import Batch base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index 249744b531..691c4ff77f 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -9,7 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.completion import Completion +from openai.types import Completion base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py index 9c4e55a5a8..e75545b4e2 100644 --- a/tests/api_resources/test_embeddings.py +++ b/tests/api_resources/test_embeddings.py @@ -9,7 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.create_embedding_response import CreateEmbeddingResponse +from openai.types import CreateEmbeddingResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py index 3b6817e27b..e5466e9eda 100644 --- a/tests/api_resources/test_files.py +++ b/tests/api_resources/test_files.py @@ -12,9 +12,8 @@ import openai._legacy_response as _legacy_response from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type +from openai.types import FileObject, FileDeleted from openai.pagination import SyncPage, AsyncPage -from openai.types.file_object import FileObject -from openai.types.file_deleted import FileDeleted # pyright: reportDeprecated=false diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py index 8d857d2f45..2e31f3354a 100644 --- a/tests/api_resources/test_images.py +++ b/tests/api_resources/test_images.py @@ -9,7 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.images_response import ImagesResponse +from openai.types import ImagesResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 2351d64a33..71f8e5834b 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,9 +9,8 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type +from openai.types import Model, ModelDeleted from openai.pagination import SyncPage, AsyncPage -from openai.types.model import Model -from openai.types.model_deleted import ModelDeleted base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_moderations.py b/tests/api_resources/test_moderations.py index 52436ad0a9..94b9ecd31b 100644 --- a/tests/api_resources/test_moderations.py +++ b/tests/api_resources/test_moderations.py @@ -9,7 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.moderation_create_response import ModerationCreateResponse +from openai.types import ModerationCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/test_client.py b/tests/test_client.py index ba85fd9d5f..c1e545e66f 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -17,7 +17,6 @@ from pydantic import ValidationError from openai import OpenAI, AsyncOpenAI, APIResponseValidationError -from openai._client import OpenAI, AsyncOpenAI from openai._models import BaseModel, FinalRequestOptions from openai._constants import RAW_RESPONSE_HEADER from openai._streaming import Stream, AsyncStream