diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 44959ac416..21b274b18a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.35.0" + ".": "1.35.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index dc259c2ac8..f442ddd2ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.35.1 (2024-06-19) + +Full Changelog: [v1.35.0...v1.35.1](https://github.com/openai/openai-python/compare/v1.35.0...v1.35.1) + +### Bug Fixes + +* **client/async:** avoid blocking io call for platform headers ([#1488](https://github.com/openai/openai-python/issues/1488)) ([ae64c05](https://github.com/openai/openai-python/commit/ae64c05cbae76a58b592d913bee6ac1ef9611d4c)) + ## 1.35.0 (2024-06-18) Full Changelog: [v1.34.0...v1.35.0](https://github.com/openai/openai-python/compare/v1.34.0...v1.35.0) diff --git a/pyproject.toml b/pyproject.toml index 5241598939..270c21f4cf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.35.0" +version = "1.35.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 1c9a1a03f2..84004ebba5 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -60,7 +60,7 @@ RequestOptions, ModelBuilderProtocol, ) -from ._utils import is_dict, is_list, is_given, lru_cache, is_mapping +from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping from ._compat import model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type from ._response import ( @@ -359,6 +359,7 @@ def __init__( self._custom_query = custom_query or {} self._strict_response_validation = _strict_response_validation self._idempotency_header = None + self._platform: Platform | None = None if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( @@ -623,7 +624,10 @@ def base_url(self, url: URL | str) -> None: self._base_url = self._enforce_trailing_slash(url if isinstance(url, URL) else URL(url)) def platform_headers(self) -> Dict[str, str]: - return platform_headers(self._version) + # the actual implementation is in a separate `lru_cache` decorated + # function because adding `lru_cache` to methods will leak memory + # https://github.com/python/cpython/issues/88476 + return platform_headers(self._version, platform=self._platform) def _parse_retry_after_header(self, response_headers: Optional[httpx.Headers] = None) -> float | None: """Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified. @@ -1513,6 +1517,11 @@ async def _request( stream_cls: type[_AsyncStreamT] | None, remaining_retries: int | None, ) -> ResponseT | _AsyncStreamT: + if self._platform is None: + # `get_platform` can make blocking IO calls so we + # execute it earlier while we are in an async context + self._platform = await asyncify(get_platform)() + cast_to = self._maybe_override_cast_to(cast_to, options) await self._prepare_options(options) @@ -1949,11 +1958,11 @@ def get_platform() -> Platform: @lru_cache(maxsize=None) -def platform_headers(version: str) -> Dict[str, str]: +def platform_headers(version: str, *, platform: Platform | None) -> Dict[str, str]: return { "X-Stainless-Lang": "python", "X-Stainless-Package-Version": version, - "X-Stainless-OS": str(get_platform()), + "X-Stainless-OS": str(platform or get_platform()), "X-Stainless-Arch": str(get_architecture()), "X-Stainless-Runtime": get_python_runtime(), "X-Stainless-Runtime-Version": get_python_version(), diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py index 31b5b22799..667e2473f6 100644 --- a/src/openai/_utils/__init__.py +++ b/src/openai/_utils/__init__.py @@ -49,3 +49,4 @@ maybe_transform as maybe_transform, async_maybe_transform as async_maybe_transform, ) +from ._reflection import function_has_argument as function_has_argument diff --git a/src/openai/_utils/_reflection.py b/src/openai/_utils/_reflection.py new file mode 100644 index 0000000000..e134f58e08 --- /dev/null +++ b/src/openai/_utils/_reflection.py @@ -0,0 +1,8 @@ +import inspect +from typing import Any, Callable + + +def function_has_argument(func: Callable[..., Any], arg_name: str) -> bool: + """Returns whether or not the given function has a specific parameter""" + sig = inspect.signature(func) + return arg_name in sig.parameters diff --git a/src/openai/_utils/_sync.py b/src/openai/_utils/_sync.py index 595924e5b1..d0d810337e 100644 --- a/src/openai/_utils/_sync.py +++ b/src/openai/_utils/_sync.py @@ -7,6 +7,8 @@ import anyio import anyio.to_thread +from ._reflection import function_has_argument + T_Retval = TypeVar("T_Retval") T_ParamSpec = ParamSpec("T_ParamSpec") @@ -59,6 +61,21 @@ def do_work(arg1, arg2, kwarg1="", kwarg2="") -> str: async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval: partial_f = functools.partial(function, *args, **kwargs) - return await anyio.to_thread.run_sync(partial_f, cancellable=cancellable, limiter=limiter) + + # In `v4.1.0` anyio added the `abandon_on_cancel` argument and deprecated the old + # `cancellable` argument, so we need to use the new `abandon_on_cancel` to avoid + # surfacing deprecation warnings. + if function_has_argument(anyio.to_thread.run_sync, "abandon_on_cancel"): + return await anyio.to_thread.run_sync( + partial_f, + abandon_on_cancel=cancellable, + limiter=limiter, + ) + + return await anyio.to_thread.run_sync( + partial_f, + cancellable=cancellable, + limiter=limiter, + ) return wrapper diff --git a/src/openai/_version.py b/src/openai/_version.py index 1f47c75093..ae8e3fb08d 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.35.0" # x-release-please-version +__version__ = "1.35.1" # x-release-please-version