diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 7ccfe12c9e..bc845f32af 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.15.0"
+ ".": "1.16.0"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 180bbf2a28..ce046a623d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,24 @@
# Changelog
+## 1.16.0 (2024-04-01)
+
+Full Changelog: [v1.15.0...v1.16.0](https://github.com/openai/openai-python/compare/v1.15.0...v1.16.0)
+
+### Features
+
+* **api:** add support for filtering messages by run_id ([#1288](https://github.com/openai/openai-python/issues/1288)) ([58d6b77](https://github.com/openai/openai-python/commit/58d6b773218ef1dd8dc6208124a16078e4ac11c1))
+* **api:** run polling helpers ([#1289](https://github.com/openai/openai-python/issues/1289)) ([6b427f3](https://github.com/openai/openai-python/commit/6b427f38610847bce3ce5334177f07917bd7c187))
+
+
+### Chores
+
+* **client:** validate that max_retries is not None ([#1286](https://github.com/openai/openai-python/issues/1286)) ([aa5920a](https://github.com/openai/openai-python/commit/aa5920af6131c49a44352524154ee4a1684e76b2))
+
+
+### Refactors
+
+* rename createAndStream to stream ([6b427f3](https://github.com/openai/openai-python/commit/6b427f38610847bce3ce5334177f07917bd7c187))
+
## 1.15.0 (2024-03-31)
Full Changelog: [v1.14.3...v1.15.0](https://github.com/openai/openai-python/compare/v1.14.3...v1.15.0)
diff --git a/README.md b/README.md
index 7f053e5429..5264026dc9 100644
--- a/README.md
+++ b/README.md
@@ -51,12 +51,30 @@ we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/)
to add `OPENAI_API_KEY="My API Key"` to your `.env` file
so that your API Key is not stored in source control.
+### Polling Helpers
+
+When interacting with the API some actions such as starting a Run may take time to complete. The SDK includes
+helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
+If an API method results in an action which could benefit from polling there will be a corresponding version of the
+method ending in '\_and_poll'.
+
+For instance to create a Run and poll until it reaches a terminal state you can run:
+
+```python
+run = client.beta.threads.runs.create_and_poll(
+ thread_id=thread.id,
+ assistant_id=assistant.id,
+)
+```
+
+More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle)
+
### Streaming Helpers
The SDK also includes helpers to process streams and handle the incoming events.
```python
-with client.beta.threads.runs.create_and_stream(
+with client.beta.threads.runs.stream(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="Please address the user as Jane Doe. The user has a premium account.",
diff --git a/api.md b/api.md
index 29392cff13..dbc95cd0b4 100644
--- a/api.md
+++ b/api.md
@@ -230,6 +230,7 @@ Methods:
- client.beta.threads.update(thread_id, \*\*params) -> Thread
- client.beta.threads.delete(thread_id) -> ThreadDeleted
- client.beta.threads.create_and_run(\*\*params) -> Run
+- client.beta.threads.create_and_run_poll(\*args) -> Run
- client.beta.threads.create_and_run_stream(\*args) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]
### Runs
@@ -248,7 +249,11 @@ Methods:
- client.beta.threads.runs.list(thread_id, \*\*params) -> SyncCursorPage[Run]
- client.beta.threads.runs.cancel(run_id, \*, thread_id) -> Run
- client.beta.threads.runs.submit_tool_outputs(run_id, \*, thread_id, \*\*params) -> Run
+- client.beta.threads.runs.create_and_poll(\*args) -> Run
- client.beta.threads.runs.create_and_stream(\*args) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]
+- client.beta.threads.runs.poll(\*args) -> Run
+- client.beta.threads.runs.stream(\*args) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]
+- client.beta.threads.runs.submit_tool_outputs_and_poll(\*args) -> Run
- client.beta.threads.runs.submit_tool_outputs_stream(\*args) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]
#### Steps
diff --git a/examples/assistant.py b/examples/assistant.py
index c5fbb82a3a..0631494ecc 100644
--- a/examples/assistant.py
+++ b/examples/assistant.py
@@ -1,4 +1,3 @@
-import time
import openai
@@ -20,28 +19,20 @@
content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
)
-run = client.beta.threads.runs.create(
+run = client.beta.threads.runs.create_and_poll(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="Please address the user as Jane Doe. The user has a premium account.",
)
-print("checking assistant status. ")
-while True:
- run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
+print("Run completed with status: " + run.status)
- if run.status == "completed":
- print("done!")
- messages = client.beta.threads.messages.list(thread_id=thread.id)
+if run.status == "completed":
+ messages = client.beta.threads.messages.list(thread_id=thread.id)
- print("messages: ")
- for message in messages:
- assert message.content[0].type == "text"
- print({"role": message.role, "message": message.content[0].text.value})
+ print("messages: ")
+ for message in messages:
+ assert message.content[0].type == "text"
+ print({"role": message.role, "message": message.content[0].text.value})
- client.beta.assistants.delete(assistant.id)
-
- break
- else:
- print("in progress...")
- time.sleep(5)
+ client.beta.assistants.delete(assistant.id)
diff --git a/examples/assistant_stream_helpers.py b/examples/assistant_stream_helpers.py
index 6c2aae0b46..7baec77c72 100644
--- a/examples/assistant_stream_helpers.py
+++ b/examples/assistant_stream_helpers.py
@@ -63,7 +63,7 @@ def main() -> None:
)
print(f"Question: {question}\n")
- with client.beta.threads.runs.create_and_stream(
+ with client.beta.threads.runs.stream(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="Please address the user as Jane Doe. The user has a premium account.",
diff --git a/helpers.md b/helpers.md
index fed20ee81c..4271cd9ede 100644
--- a/helpers.md
+++ b/helpers.md
@@ -46,11 +46,11 @@ class EventHandler(AssistantEventHandler):
if output.type == "logs":
print(f"\n{output.logs}", flush=True)
-# Then, we use the `create_and_stream` SDK helper
+# Then, we use the `stream` SDK helper
# with the `EventHandler` class to create the Run
# and stream the response.
-with client.beta.threads.runs.create_and_stream(
+with client.beta.threads.runs.stream(
thread_id="thread_id",
assistant_id="assistant_id",
event_handler=EventHandler(),
@@ -63,7 +63,7 @@ with client.beta.threads.runs.create_and_stream(
You can also iterate over all the streamed events.
```python
-with client.beta.threads.runs.create_and_stream(
+with client.beta.threads.runs.stream(
thread_id=thread.id,
assistant_id=assistant.id
) as stream:
@@ -78,7 +78,7 @@ with client.beta.threads.runs.create_and_stream(
You can also iterate over just the text deltas received
```python
-with client.beta.threads.runs.create_and_stream(
+with client.beta.threads.runs.stream(
thread_id=thread.id,
assistant_id=assistant.id
) as stream:
@@ -91,7 +91,7 @@ with client.beta.threads.runs.create_and_stream(
There are three helper methods for creating streams:
```python
-client.beta.threads.runs.create_and_stream()
+client.beta.threads.runs.stream()
```
This method can be used to start and stream the response to an existing run with an associated thread
diff --git a/pyproject.toml b/pyproject.toml
index beb31f24a1..437a5e9cc8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "1.15.0"
+version = "1.16.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py
index 7a8595c173..502ed7c7ae 100644
--- a/src/openai/_base_client.py
+++ b/src/openai/_base_client.py
@@ -361,6 +361,11 @@ def __init__(
self._strict_response_validation = _strict_response_validation
self._idempotency_header = None
+ if max_retries is None: # pyright: ignore[reportUnnecessaryComparison]
+ raise TypeError(
+ "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `openai.DEFAULT_MAX_RETRIES`"
+ )
+
def _enforce_trailing_slash(self, url: URL) -> URL:
if url.raw_path.endswith(b"/"):
return url
diff --git a/src/openai/_version.py b/src/openai/_version.py
index 6865a9f7bd..fe724b63af 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "1.15.0" # x-release-please-version
+__version__ = "1.16.0" # x-release-please-version
diff --git a/src/openai/resources/beta/threads/messages/messages.py b/src/openai/resources/beta/threads/messages/messages.py
index 1c008a7cc4..bbce3e99e4 100644
--- a/src/openai/resources/beta/threads/messages/messages.py
+++ b/src/openai/resources/beta/threads/messages/messages.py
@@ -203,6 +203,7 @@ def list(
before: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ run_id: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -230,6 +231,8 @@ def list(
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
+ run_id: Filter messages by the run ID that generated them.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -255,6 +258,7 @@ def list(
"before": before,
"limit": limit,
"order": order,
+ "run_id": run_id,
},
message_list_params.MessageListParams,
),
@@ -432,6 +436,7 @@ def list(
before: str | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ run_id: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -459,6 +464,8 @@ def list(
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
+ run_id: Filter messages by the run ID that generated them.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -484,6 +491,7 @@ def list(
"before": before,
"limit": limit,
"order": order,
+ "run_id": run_id,
},
message_list_params.MessageListParams,
),
diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py
index ab39a96a8d..4529c65025 100644
--- a/src/openai/resources/beta/threads/runs/runs.py
+++ b/src/openai/resources/beta/threads/runs/runs.py
@@ -2,6 +2,8 @@
from __future__ import annotations
+import time
+import typing_extensions
from typing import Iterable, Optional, overload
from functools import partial
from typing_extensions import Literal
@@ -19,6 +21,7 @@
)
from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ....._utils import (
+ is_given,
required_args,
maybe_transform,
async_maybe_transform,
@@ -497,7 +500,58 @@ def cancel(
cast_to=Run,
)
+ def create_and_poll(
+ self,
+ *,
+ assistant_id: str,
+ additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ model: Optional[str] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ thread_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Run:
+ """
+ A helper to create a run an poll for a terminal state. More information on Run
+ lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ """
+ run = self.create(
+ thread_id=thread_id,
+ assistant_id=assistant_id,
+ additional_instructions=additional_instructions,
+ instructions=instructions,
+ metadata=metadata,
+ model=model,
+ temperature=temperature,
+ # We assume we are not streaming when polling
+ stream=False,
+ tools=tools,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ )
+ return self.poll(
+ run.id,
+ thread_id=thread_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ poll_interval_ms=poll_interval_ms,
+ timeout=timeout,
+ )
+
@overload
+ @typing_extensions.deprecated("use `stream` instead")
def create_and_stream(
self,
*,
@@ -520,6 +574,7 @@ def create_and_stream(
...
@overload
+ @typing_extensions.deprecated("use `stream` instead")
def create_and_stream(
self,
*,
@@ -542,6 +597,7 @@ def create_and_stream(
"""Create a Run stream"""
...
+ @typing_extensions.deprecated("use `stream` instead")
def create_and_stream(
self,
*,
@@ -596,6 +652,150 @@ def create_and_stream(
)
return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
+ def poll(
+ self,
+ run_id: str,
+ thread_id: str,
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> Run:
+ """
+ A helper to poll a run status until it reaches a terminal state. More
+ information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ """
+ extra_headers = {"X-Stainless-Poll-Helper": "true", **(extra_headers or {})}
+
+ if is_given(poll_interval_ms):
+ extra_headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+ terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired"}
+ while True:
+ response = self.with_raw_response.retrieve(
+ thread_id=thread_id,
+ run_id=run_id,
+ extra_headers=extra_headers,
+ extra_body=extra_body,
+ extra_query=extra_query,
+ timeout=timeout,
+ )
+
+ run = response.parse()
+ # Return if we reached a terminal state
+ if run.status in terminal_states:
+ return run
+
+ if not is_given(poll_interval_ms):
+ from_header = response.headers.get("openai-poll-after-ms")
+ if from_header is not None:
+ poll_interval_ms = int(from_header)
+ else:
+ poll_interval_ms = 1000
+
+ time.sleep(poll_interval_ms / 1000)
+
+ @overload
+ def stream(
+ self,
+ *,
+ assistant_id: str,
+ additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ model: Optional[str] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ thread_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AssistantStreamManager[AssistantEventHandler]:
+ """Create a Run stream"""
+ ...
+
+ @overload
+ def stream(
+ self,
+ *,
+ assistant_id: str,
+ additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ model: Optional[str] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ thread_id: str,
+ event_handler: AssistantEventHandlerT,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AssistantStreamManager[AssistantEventHandlerT]:
+ """Create a Run stream"""
+ ...
+
+ def stream(
+ self,
+ *,
+ assistant_id: str,
+ additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ model: Optional[str] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ thread_id: str,
+ event_handler: AssistantEventHandlerT | None = None,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]:
+ """Create a Run stream"""
+ if not thread_id:
+ raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+
+ extra_headers = {
+ "OpenAI-Beta": "assistants=v1",
+ "X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
+ "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
+ **(extra_headers or {}),
+ }
+ make_request = partial(
+ self._post,
+ f"/threads/{thread_id}/runs",
+ body=maybe_transform(
+ {
+ "assistant_id": assistant_id,
+ "additional_instructions": additional_instructions,
+ "instructions": instructions,
+ "metadata": metadata,
+ "model": model,
+ "temperature": temperature,
+ "stream": True,
+ "tools": tools,
+ },
+ run_create_params.RunCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Run,
+ stream=True,
+ stream_cls=Stream[AssistantStreamEvent],
+ )
+ return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
+
@overload
def submit_tool_outputs(
self,
@@ -747,6 +947,45 @@ def submit_tool_outputs(
stream_cls=Stream[AssistantStreamEvent],
)
+ def submit_tool_outputs_and_poll(
+ self,
+ *,
+ tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+ run_id: str,
+ thread_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Run:
+ """
+ A helper to submit a tool output to a run and poll for a terminal run state.
+ More information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ """
+ run = self.submit_tool_outputs(
+ run_id=run_id,
+ thread_id=thread_id,
+ tool_outputs=tool_outputs,
+ stream=False,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ )
+ return self.poll(
+ run_id=run.id,
+ thread_id=thread_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ poll_interval_ms=poll_interval_ms,
+ )
+
@overload
def submit_tool_outputs_stream(
self,
@@ -763,7 +1002,8 @@ def submit_tool_outputs_stream(
) -> AssistantStreamManager[AssistantEventHandler]:
"""
Submit the tool outputs from a previous run and stream the run to a terminal
- state.
+ state. More information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
...
@@ -784,7 +1024,8 @@ def submit_tool_outputs_stream(
) -> AssistantStreamManager[AssistantEventHandlerT]:
"""
Submit the tool outputs from a previous run and stream the run to a terminal
- state.
+ state. More information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
...
@@ -804,7 +1045,8 @@ def submit_tool_outputs_stream(
) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]:
"""
Submit the tool outputs from a previous run and stream the run to a terminal
- state.
+ state. More information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
@@ -1283,7 +1525,58 @@ async def cancel(
cast_to=Run,
)
+ async def create_and_poll(
+ self,
+ *,
+ assistant_id: str,
+ additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ model: Optional[str] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ thread_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Run:
+ """
+ A helper to create a run an poll for a terminal state. More information on Run
+ lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ """
+ run = await self.create(
+ thread_id=thread_id,
+ assistant_id=assistant_id,
+ additional_instructions=additional_instructions,
+ instructions=instructions,
+ metadata=metadata,
+ model=model,
+ temperature=temperature,
+ # We assume we are not streaming when polling
+ stream=False,
+ tools=tools,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ )
+ return await self.poll(
+ run.id,
+ thread_id=thread_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ poll_interval_ms=poll_interval_ms,
+ timeout=timeout,
+ )
+
@overload
+ @typing_extensions.deprecated("use `stream` instead")
def create_and_stream(
self,
*,
@@ -1306,6 +1599,7 @@ def create_and_stream(
...
@overload
+ @typing_extensions.deprecated("use `stream` instead")
def create_and_stream(
self,
*,
@@ -1328,6 +1622,7 @@ def create_and_stream(
"""Create a Run stream"""
...
+ @typing_extensions.deprecated("use `stream` instead")
def create_and_stream(
self,
*,
@@ -1384,6 +1679,152 @@ def create_and_stream(
)
return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
+ async def poll(
+ self,
+ run_id: str,
+ thread_id: str,
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ ) -> Run:
+ """
+ A helper to poll a run status until it reaches a terminal state. More
+ information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ """
+ extra_headers = {"X-Stainless-Poll-Helper": "true", **(extra_headers or {})}
+
+ if is_given(poll_interval_ms):
+ extra_headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
+
+ terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired"}
+ while True:
+ response = await self.with_raw_response.retrieve(
+ thread_id=thread_id,
+ run_id=run_id,
+ extra_headers=extra_headers,
+ extra_body=extra_body,
+ extra_query=extra_query,
+ timeout=timeout,
+ )
+
+ run = response.parse()
+ # Return if we reached a terminal state
+ if run.status in terminal_states:
+ return run
+
+ if not is_given(poll_interval_ms):
+ from_header = response.headers.get("openai-poll-after-ms")
+ if from_header is not None:
+ poll_interval_ms = int(from_header)
+ else:
+ poll_interval_ms = 1000
+
+ time.sleep(poll_interval_ms / 1000)
+
+ @overload
+ def stream(
+ self,
+ *,
+ assistant_id: str,
+ additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ model: Optional[str] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ thread_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]:
+ """Create a Run stream"""
+ ...
+
+ @overload
+ def stream(
+ self,
+ *,
+ assistant_id: str,
+ additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ model: Optional[str] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ thread_id: str,
+ event_handler: AsyncAssistantEventHandlerT,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]:
+ """Create a Run stream"""
+ ...
+
+ def stream(
+ self,
+ *,
+ assistant_id: str,
+ additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ model: Optional[str] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
+ thread_id: str,
+ event_handler: AsyncAssistantEventHandlerT | None = None,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> (
+ AsyncAssistantStreamManager[AsyncAssistantEventHandler]
+ | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]
+ ):
+ """Create a Run stream"""
+ if not thread_id:
+ raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
+
+ extra_headers = {
+ "OpenAI-Beta": "assistants=v1",
+ "X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
+ "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
+ **(extra_headers or {}),
+ }
+ request = self._post(
+ f"/threads/{thread_id}/runs",
+ body=maybe_transform(
+ {
+ "assistant_id": assistant_id,
+ "additional_instructions": additional_instructions,
+ "instructions": instructions,
+ "metadata": metadata,
+ "model": model,
+ "temperature": temperature,
+ "stream": True,
+ "tools": tools,
+ },
+ run_create_params.RunCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Run,
+ stream=True,
+ stream_cls=AsyncStream[AssistantStreamEvent],
+ )
+ return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
+
@overload
async def submit_tool_outputs(
self,
@@ -1535,6 +1976,45 @@ async def submit_tool_outputs(
stream_cls=AsyncStream[AssistantStreamEvent],
)
+ async def submit_tool_outputs_and_poll(
+ self,
+ *,
+ tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
+ run_id: str,
+ thread_id: str,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Run:
+ """
+ A helper to submit a tool output to a run and poll for a terminal run state.
+ More information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ """
+ run = await self.submit_tool_outputs(
+ run_id=run_id,
+ thread_id=thread_id,
+ tool_outputs=tool_outputs,
+ stream=False,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ )
+ return await self.poll(
+ run_id=run.id,
+ thread_id=thread_id,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ poll_interval_ms=poll_interval_ms,
+ )
+
@overload
def submit_tool_outputs_stream(
self,
@@ -1551,7 +2031,8 @@ def submit_tool_outputs_stream(
) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]:
"""
Submit the tool outputs from a previous run and stream the run to a terminal
- state.
+ state. More information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
...
@@ -1572,7 +2053,8 @@ def submit_tool_outputs_stream(
) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]:
"""
Submit the tool outputs from a previous run and stream the run to a terminal
- state.
+ state. More information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
...
@@ -1595,7 +2077,8 @@ def submit_tool_outputs_stream(
):
"""
Submit the tool outputs from a previous run and stream the run to a terminal
- state.
+ state. More information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
"""
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py
index c2ad6aca5f..3509267d4f 100644
--- a/src/openai/resources/beta/threads/threads.py
+++ b/src/openai/resources/beta/threads/threads.py
@@ -467,6 +467,45 @@ def create_and_run(
stream_cls=Stream[AssistantStreamEvent],
)
+ def create_and_run_poll(
+ self,
+ *,
+ assistant_id: str,
+ instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ model: Optional[str] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Run:
+ """
+ A helper to create a thread, start a run and then poll for a terminal state.
+ More information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ """
+ run = self.create_and_run(
+ assistant_id=assistant_id,
+ instructions=instructions,
+ metadata=metadata,
+ model=model,
+ temperature=temperature,
+ stream=False,
+ thread=thread,
+ tools=tools,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ )
+ return self.runs.poll(run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms)
+
@overload
def create_and_run_stream(
self,
@@ -967,6 +1006,47 @@ async def create_and_run(
stream_cls=AsyncStream[AssistantStreamEvent],
)
+ async def create_and_run_poll(
+ self,
+ *,
+ assistant_id: str,
+ instructions: Optional[str] | NotGiven = NOT_GIVEN,
+ metadata: Optional[object] | NotGiven = NOT_GIVEN,
+ model: Optional[str] | NotGiven = NOT_GIVEN,
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
+ thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
+ tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+ poll_interval_ms: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Run:
+ """
+ A helper to create a thread, start a run and then poll for a terminal state.
+ More information on Run lifecycles can be found here:
+ https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ """
+ run = await self.create_and_run(
+ assistant_id=assistant_id,
+ instructions=instructions,
+ metadata=metadata,
+ model=model,
+ temperature=temperature,
+ stream=False,
+ thread=thread,
+ tools=tools,
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ )
+ return await self.runs.poll(
+ run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms
+ )
+
@overload
def create_and_run_stream(
self,
diff --git a/src/openai/types/beta/threads/message_list_params.py b/src/openai/types/beta/threads/message_list_params.py
index 8b139caa93..18c2442fb5 100644
--- a/src/openai/types/beta/threads/message_list_params.py
+++ b/src/openai/types/beta/threads/message_list_params.py
@@ -37,3 +37,6 @@ class MessageListParams(TypedDict, total=False):
`asc` for ascending order and `desc` for descending order.
"""
+
+ run_id: str
+ """Filter messages by the run ID that generated them."""
diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py
index c708c94068..22198ccbc5 100644
--- a/tests/api_resources/beta/threads/test_messages.py
+++ b/tests/api_resources/beta/threads/test_messages.py
@@ -195,6 +195,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None:
before="string",
limit=0,
order="asc",
+ run_id="string",
)
assert_matches_type(SyncCursorPage[Message], message, path=["response"])
@@ -410,6 +411,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N
before="string",
limit=0,
order="asc",
+ run_id="string",
)
assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py
index aabe2c7fc9..b9f392dc87 100644
--- a/tests/api_resources/beta/threads/test_runs.py
+++ b/tests/api_resources/beta/threads/test_runs.py
@@ -14,6 +14,8 @@
Run,
)
+# pyright: reportDeprecated=false
+
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
diff --git a/tests/test_client.py b/tests/test_client.py
index dab1cb0efd..ba85fd9d5f 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -646,6 +646,10 @@ class Model(BaseModel):
assert isinstance(exc.value.__cause__, ValidationError)
+ def test_client_max_retries_validation(self) -> None:
+ with pytest.raises(TypeError, match=r"max_retries cannot be None"):
+ OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None))
+
@pytest.mark.respx(base_url=base_url)
def test_default_stream_cls(self, respx_mock: MockRouter) -> None:
class Model(BaseModel):
@@ -1368,6 +1372,12 @@ class Model(BaseModel):
assert isinstance(exc.value.__cause__, ValidationError)
+ async def test_client_max_retries_validation(self) -> None:
+ with pytest.raises(TypeError, match=r"max_retries cannot be None"):
+ AsyncOpenAI(
+ base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None)
+ )
+
@pytest.mark.respx(base_url=base_url)
@pytest.mark.asyncio
async def test_default_stream_cls(self, respx_mock: MockRouter) -> None: