Skip to content

feat: extract out ImageModel, AudioModel, SpeechModel #1586

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 14 additions & 2 deletions api.md
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ Methods:
Types:

```python
from openai.types import Image, ImagesResponse
from openai.types import Image, ImageModel, ImagesResponse
```

Methods:
Expand All @@ -102,6 +102,12 @@ Methods:

# Audio

Types:

```python
from openai.types import AudioModel
```

## Transcriptions

Types:
Expand All @@ -128,6 +134,12 @@ Methods:

## Speech

Types:

```python
from openai.types.audio import SpeechModel
```

Methods:

- <code title="post /audio/speech">client.audio.speech.<a href="./src/openai/resources/audio/speech.py">create</a>(\*\*<a href="src/openai/types/audio/speech_create_params.py">params</a>) -> HttpxBinaryResponseContent</code>
Expand All @@ -137,7 +149,7 @@ Methods:
Types:

```python
from openai.types import Moderation, ModerationCreateResponse
from openai.types import Moderation, ModerationModel, ModerationCreateResponse
```

Methods:
Expand Down
5 changes: 3 additions & 2 deletions src/openai/resources/audio/speech.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
)
from ...types.audio import speech_create_params
from ..._base_client import make_request_options
from ...types.audio.speech_model import SpeechModel

__all__ = ["Speech", "AsyncSpeech"]

Expand All @@ -40,7 +41,7 @@ def create(
self,
*,
input: str,
model: Union[str, Literal["tts-1", "tts-1-hd"]],
model: Union[str, SpeechModel],
voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
speed: float | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -113,7 +114,7 @@ async def create(
self,
*,
input: str,
model: Union[str, Literal["tts-1", "tts-1-hd"]],
model: Union[str, SpeechModel],
voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
speed: float | NotGiven = NOT_GIVEN,
Expand Down
5 changes: 3 additions & 2 deletions src/openai/resources/audio/transcriptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...types.audio import transcription_create_params
from ..._base_client import make_request_options
from ...types.audio_model import AudioModel
from ...types.audio.transcription import Transcription

__all__ = ["Transcriptions", "AsyncTranscriptions"]
Expand All @@ -38,7 +39,7 @@ def create(
self,
*,
file: FileTypes,
model: Union[str, Literal["whisper-1"]],
model: Union[str, AudioModel],
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -134,7 +135,7 @@ async def create(
self,
*,
file: FileTypes,
model: Union[str, Literal["whisper-1"]],
model: Union[str, AudioModel],
language: str | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
Expand Down
6 changes: 3 additions & 3 deletions src/openai/resources/audio/translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from __future__ import annotations

from typing import Union, Mapping, cast
from typing_extensions import Literal

import httpx

Expand All @@ -20,6 +19,7 @@
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...types.audio import translation_create_params
from ..._base_client import make_request_options
from ...types.audio_model import AudioModel
from ...types.audio.translation import Translation

__all__ = ["Translations", "AsyncTranslations"]
Expand All @@ -38,7 +38,7 @@ def create(
self,
*,
file: FileTypes,
model: Union[str, Literal["whisper-1"]],
model: Union[str, AudioModel],
prompt: str | NotGiven = NOT_GIVEN,
response_format: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -119,7 +119,7 @@ async def create(
self,
*,
file: FileTypes,
model: Union[str, Literal["whisper-1"]],
model: Union[str, AudioModel],
prompt: str | NotGiven = NOT_GIVEN,
response_format: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
Expand Down
57 changes: 3 additions & 54 deletions src/openai/resources/beta/assistants.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
assistant_update_params,
)
from ..._base_client import AsyncPaginator, make_request_options
from ...types.chat_model import ChatModel
from ...types.beta.assistant import Assistant
from ...types.beta.assistant_deleted import AssistantDeleted
from ...types.beta.assistant_tool_param import AssistantToolParam
Expand All @@ -43,33 +44,7 @@ def with_streaming_response(self) -> AssistantsWithStreamingResponse:
def create(
self,
*,
model: Union[
str,
Literal[
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
model: Union[str, ChatModel],
description: Optional[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -434,33 +409,7 @@ def with_streaming_response(self) -> AsyncAssistantsWithStreamingResponse:
async def create(
self,
*,
model: Union[
str,
Literal[
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
model: Union[str, ChatModel],
description: Optional[str] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
Expand Down
Loading