From 0673da62f2f2476a3e5791122e75ec0cbfd03442 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Mon, 23 Jun 2025 11:26:16 -0700
Subject: [PATCH] release: 1.91.0 (#2423)
* feat(api): update api shapes for usage and code interpreter
* release: 1.91.0
---------
Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
---
.release-please-manifest.json | 2 +-
.stats.yml | 6 +--
CHANGELOG.md | 8 ++++
api.md | 2 +-
pyproject.toml | 2 +-
src/openai/_version.py | 2 +-
src/openai/resources/audio/speech.py | 14 +++++-
.../fine_tuning/checkpoints/permissions.py | 22 ++++-----
.../types/audio/speech_create_params.py | 10 +++-
src/openai/types/audio/transcription.py | 45 ++++++++++++++++-
.../audio/transcription_text_done_event.py | 30 +++++++++++-
.../types/audio/transcription_verbose.py | 14 +++++-
.../beta/realtime/session_create_params.py | 10 ++--
.../beta/realtime/session_create_response.py | 9 ++--
.../beta/realtime/session_update_event.py | 8 ++--
.../realtime/session_update_event_param.py | 8 ++--
.../permission_retrieve_response.py | 17 ++++++-
..._code_interpreter_call_code_delta_event.py | 12 +++--
...e_code_interpreter_call_code_done_event.py | 7 ++-
...e_code_interpreter_call_completed_event.py | 12 +++--
...code_interpreter_call_in_progress_event.py | 12 +++--
...ode_interpreter_call_interpreting_event.py | 12 +++--
.../response_code_interpreter_tool_call.py | 46 ++++++++----------
...sponse_code_interpreter_tool_call_param.py | 48 +++++++++----------
.../types/responses/response_output_text.py | 6 +++
.../responses/response_output_text_param.py | 6 +++
tests/api_resources/audio/test_speech.py | 2 +
.../beta/realtime/test_sessions.py | 4 +-
.../checkpoints/test_permissions.py | 18 +++----
29 files changed, 264 insertions(+), 130 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 407051a9fb..f18270d528 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.90.0"
+ ".": "1.91.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index f8abf5bab6..1e0182cf22 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f411a68f272b8be0ab0c266043da33228687b9b2d76896724e3cef797de9563d.yml
-openapi_spec_hash: 89bf866ea95ecfb3d76c8833237047d6
-config_hash: dc5515e257676a27cb1ace1784aa92b3
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ef4ecb19eb61e24c49d77fef769ee243e5279bc0bdbaee8d0f8dba4da8722559.yml
+openapi_spec_hash: 1b8a9767c9f04e6865b06c41948cdc24
+config_hash: fd2af1d5eff0995bb7dc02ac9a34851d
diff --git a/CHANGELOG.md b/CHANGELOG.md
index dc45fa7bb5..14562edfac 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 1.91.0 (2025-06-23)
+
+Full Changelog: [v1.90.0...v1.91.0](https://github.com/openai/openai-python/compare/v1.90.0...v1.91.0)
+
+### Features
+
+* **api:** update api shapes for usage and code interpreter ([060d566](https://github.com/openai/openai-python/commit/060d5661e4a1fcdb953c52facd3e668ee80f9295))
+
## 1.90.0 (2025-06-20)
Full Changelog: [v1.89.0...v1.90.0](https://github.com/openai/openai-python/compare/v1.89.0...v1.90.0)
diff --git a/api.md b/api.md
index db52398b97..25360d741e 100644
--- a/api.md
+++ b/api.md
@@ -293,7 +293,7 @@ from openai.types.fine_tuning.checkpoints import (
Methods:
- client.fine_tuning.checkpoints.permissions.create(fine_tuned_model_checkpoint, \*\*params) -> SyncPage[PermissionCreateResponse]
-- client.fine_tuning.checkpoints.permissions.retrieve(fine_tuned_model_checkpoint, \*\*params) -> SyncCursorPage[PermissionRetrieveResponse]
+- client.fine_tuning.checkpoints.permissions.retrieve(fine_tuned_model_checkpoint, \*\*params) -> PermissionRetrieveResponse
- client.fine_tuning.checkpoints.permissions.delete(permission_id, \*, fine_tuned_model_checkpoint) -> PermissionDeleteResponse
## Alpha
diff --git a/pyproject.toml b/pyproject.toml
index f66dacbf6d..1f2b8a6044 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "1.90.0"
+version = "1.91.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/openai/_version.py b/src/openai/_version.py
index 7e515c74bd..d1cad1dd01 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "1.90.0" # x-release-please-version
+__version__ = "1.91.0" # x-release-please-version
diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py
index a195d7135e..fe776baae8 100644
--- a/src/openai/resources/audio/speech.py
+++ b/src/openai/resources/audio/speech.py
@@ -56,6 +56,7 @@ def create(
instructions: str | NotGiven = NOT_GIVEN,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
speed: float | NotGiven = NOT_GIVEN,
+ stream_format: Literal["sse", "audio"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -85,7 +86,10 @@ def create(
`wav`, and `pcm`.
speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
- the default. Does not work with `gpt-4o-mini-tts`.
+ the default.
+
+ stream_format: The format to stream the audio in. Supported formats are `sse` and `audio`.
+ `sse` is not supported for `tts-1` or `tts-1-hd`.
extra_headers: Send extra headers
@@ -106,6 +110,7 @@ def create(
"instructions": instructions,
"response_format": response_format,
"speed": speed,
+ "stream_format": stream_format,
},
speech_create_params.SpeechCreateParams,
),
@@ -147,6 +152,7 @@ async def create(
instructions: str | NotGiven = NOT_GIVEN,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
speed: float | NotGiven = NOT_GIVEN,
+ stream_format: Literal["sse", "audio"] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -176,7 +182,10 @@ async def create(
`wav`, and `pcm`.
speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
- the default. Does not work with `gpt-4o-mini-tts`.
+ the default.
+
+ stream_format: The format to stream the audio in. Supported formats are `sse` and `audio`.
+ `sse` is not supported for `tts-1` or `tts-1-hd`.
extra_headers: Send extra headers
@@ -197,6 +206,7 @@ async def create(
"instructions": instructions,
"response_format": response_format,
"speed": speed,
+ "stream_format": stream_format,
},
speech_create_params.SpeechCreateParams,
),
diff --git a/src/openai/resources/fine_tuning/checkpoints/permissions.py b/src/openai/resources/fine_tuning/checkpoints/permissions.py
index ceb747a367..547e42ecac 100644
--- a/src/openai/resources/fine_tuning/checkpoints/permissions.py
+++ b/src/openai/resources/fine_tuning/checkpoints/permissions.py
@@ -9,11 +9,11 @@
from .... import _legacy_response
from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform
+from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
-from ....pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
+from ....pagination import SyncPage, AsyncPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.fine_tuning.checkpoints import permission_create_params, permission_retrieve_params
from ....types.fine_tuning.checkpoints.permission_create_response import PermissionCreateResponse
@@ -101,7 +101,7 @@ def retrieve(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncCursorPage[PermissionRetrieveResponse]:
+ ) -> PermissionRetrieveResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
@@ -129,9 +129,8 @@ def retrieve(
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
- return self._get_api_list(
+ return self._get(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
- page=SyncCursorPage[PermissionRetrieveResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -147,7 +146,7 @@ def retrieve(
permission_retrieve_params.PermissionRetrieveParams,
),
),
- model=PermissionRetrieveResponse,
+ cast_to=PermissionRetrieveResponse,
)
def delete(
@@ -256,7 +255,7 @@ def create(
method="post",
)
- def retrieve(
+ async def retrieve(
self,
fine_tuned_model_checkpoint: str,
*,
@@ -270,7 +269,7 @@ def retrieve(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[PermissionRetrieveResponse, AsyncCursorPage[PermissionRetrieveResponse]]:
+ ) -> PermissionRetrieveResponse:
"""
**NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
@@ -298,15 +297,14 @@ def retrieve(
raise ValueError(
f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
)
- return self._get_api_list(
+ return await self._get(
f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
- page=AsyncCursorPage[PermissionRetrieveResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
- query=maybe_transform(
+ query=await async_maybe_transform(
{
"after": after,
"limit": limit,
@@ -316,7 +314,7 @@ def retrieve(
permission_retrieve_params.PermissionRetrieveParams,
),
),
- model=PermissionRetrieveResponse,
+ cast_to=PermissionRetrieveResponse,
)
async def delete(
diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py
index 905ca5c3a8..4ee4a3c4e4 100644
--- a/src/openai/types/audio/speech_create_params.py
+++ b/src/openai/types/audio/speech_create_params.py
@@ -48,6 +48,12 @@ class SpeechCreateParams(TypedDict, total=False):
speed: float
"""The speed of the generated audio.
- Select a value from `0.25` to `4.0`. `1.0` is the default. Does not work with
- `gpt-4o-mini-tts`.
+ Select a value from `0.25` to `4.0`. `1.0` is the default.
+ """
+
+ stream_format: Literal["sse", "audio"]
+ """The format to stream the audio in.
+
+ Supported formats are `sse` and `audio`. `sse` is not supported for `tts-1` or
+ `tts-1-hd`.
"""
diff --git a/src/openai/types/audio/transcription.py b/src/openai/types/audio/transcription.py
index 1576385404..7115eb9edb 100644
--- a/src/openai/types/audio/transcription.py
+++ b/src/openai/types/audio/transcription.py
@@ -1,10 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+from ..._utils import PropertyInfo
from ..._models import BaseModel
-__all__ = ["Transcription", "Logprob"]
+__all__ = ["Transcription", "Logprob", "Usage", "UsageTokens", "UsageTokensInputTokenDetails", "UsageDuration"]
class Logprob(BaseModel):
@@ -18,6 +20,42 @@ class Logprob(BaseModel):
"""The log probability of the token."""
+class UsageTokensInputTokenDetails(BaseModel):
+ audio_tokens: Optional[int] = None
+ """Number of audio tokens billed for this request."""
+
+ text_tokens: Optional[int] = None
+ """Number of text tokens billed for this request."""
+
+
+class UsageTokens(BaseModel):
+ input_tokens: int
+ """Number of input tokens billed for this request."""
+
+ output_tokens: int
+ """Number of output tokens generated."""
+
+ total_tokens: int
+ """Total number of tokens used (input + output)."""
+
+ type: Literal["tokens"]
+ """The type of the usage object. Always `tokens` for this variant."""
+
+ input_token_details: Optional[UsageTokensInputTokenDetails] = None
+ """Details about the input tokens billed for this request."""
+
+
+class UsageDuration(BaseModel):
+ duration: float
+ """Duration of the input audio in seconds."""
+
+ type: Literal["duration"]
+ """The type of the usage object. Always `duration` for this variant."""
+
+
+Usage: TypeAlias = Annotated[Union[UsageTokens, UsageDuration], PropertyInfo(discriminator="type")]
+
+
class Transcription(BaseModel):
text: str
"""The transcribed text."""
@@ -28,3 +66,6 @@ class Transcription(BaseModel):
Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`
if `logprobs` is added to the `include` array.
"""
+
+ usage: Optional[Usage] = None
+ """Token usage statistics for the request."""
diff --git a/src/openai/types/audio/transcription_text_done_event.py b/src/openai/types/audio/transcription_text_done_event.py
index c8875a1bdb..9665edc565 100644
--- a/src/openai/types/audio/transcription_text_done_event.py
+++ b/src/openai/types/audio/transcription_text_done_event.py
@@ -5,7 +5,7 @@
from ..._models import BaseModel
-__all__ = ["TranscriptionTextDoneEvent", "Logprob"]
+__all__ = ["TranscriptionTextDoneEvent", "Logprob", "Usage", "UsageInputTokenDetails"]
class Logprob(BaseModel):
@@ -19,6 +19,31 @@ class Logprob(BaseModel):
"""The log probability of the token."""
+class UsageInputTokenDetails(BaseModel):
+ audio_tokens: Optional[int] = None
+ """Number of audio tokens billed for this request."""
+
+ text_tokens: Optional[int] = None
+ """Number of text tokens billed for this request."""
+
+
+class Usage(BaseModel):
+ input_tokens: int
+ """Number of input tokens billed for this request."""
+
+ output_tokens: int
+ """Number of output tokens generated."""
+
+ total_tokens: int
+ """Total number of tokens used (input + output)."""
+
+ type: Literal["tokens"]
+ """The type of the usage object. Always `tokens` for this variant."""
+
+ input_token_details: Optional[UsageInputTokenDetails] = None
+ """Details about the input tokens billed for this request."""
+
+
class TranscriptionTextDoneEvent(BaseModel):
text: str
"""The text that was transcribed."""
@@ -33,3 +58,6 @@ class TranscriptionTextDoneEvent(BaseModel):
[create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
with the `include[]` parameter set to `logprobs`.
"""
+
+ usage: Optional[Usage] = None
+ """Usage statistics for models billed by token usage."""
diff --git a/src/openai/types/audio/transcription_verbose.py b/src/openai/types/audio/transcription_verbose.py
index 2a670189e0..cc6d769a65 100644
--- a/src/openai/types/audio/transcription_verbose.py
+++ b/src/openai/types/audio/transcription_verbose.py
@@ -1,12 +1,21 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
+from typing_extensions import Literal
from ..._models import BaseModel
from .transcription_word import TranscriptionWord
from .transcription_segment import TranscriptionSegment
-__all__ = ["TranscriptionVerbose"]
+__all__ = ["TranscriptionVerbose", "Usage"]
+
+
+class Usage(BaseModel):
+ duration: float
+ """Duration of the input audio in seconds."""
+
+ type: Literal["duration"]
+ """The type of the usage object. Always `duration` for this variant."""
class TranscriptionVerbose(BaseModel):
@@ -22,5 +31,8 @@ class TranscriptionVerbose(BaseModel):
segments: Optional[List[TranscriptionSegment]] = None
"""Segments of the transcribed text and their corresponding details."""
+ usage: Optional[Usage] = None
+ """Usage statistics for models billed by audio input duration."""
+
words: Optional[List[TranscriptionWord]] = None
"""Extracted words and their corresponding timestamps."""
diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py
index cebf67c732..e04985d2b6 100644
--- a/src/openai/types/beta/realtime/session_create_params.py
+++ b/src/openai/types/beta/realtime/session_create_params.py
@@ -3,12 +3,12 @@
from __future__ import annotations
from typing import List, Union, Iterable
-from typing_extensions import Literal, TypeAlias, TypedDict
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
__all__ = [
"SessionCreateParams",
"ClientSecret",
- "ClientSecretExpiresAt",
+ "ClientSecretExpiresAfter",
"InputAudioNoiseReduction",
"InputAudioTranscription",
"Tool",
@@ -156,8 +156,8 @@ class SessionCreateParams(TypedDict, total=False):
"""
-class ClientSecretExpiresAt(TypedDict, total=False):
- anchor: Literal["created_at"]
+class ClientSecretExpiresAfter(TypedDict, total=False):
+ anchor: Required[Literal["created_at"]]
"""The anchor point for the ephemeral token expiration.
Only `created_at` is currently supported.
@@ -171,7 +171,7 @@ class ClientSecretExpiresAt(TypedDict, total=False):
class ClientSecret(TypedDict, total=False):
- expires_at: ClientSecretExpiresAt
+ expires_after: ClientSecretExpiresAfter
"""Configuration for the ephemeral token expiration."""
diff --git a/src/openai/types/beta/realtime/session_create_response.py b/src/openai/types/beta/realtime/session_create_response.py
index 81fed95fa9..15d5c1742b 100644
--- a/src/openai/types/beta/realtime/session_create_response.py
+++ b/src/openai/types/beta/realtime/session_create_response.py
@@ -33,10 +33,7 @@ class ClientSecret(BaseModel):
class InputAudioTranscription(BaseModel):
model: Optional[str] = None
- """
- The model to use for transcription, `whisper-1` is the only currently supported
- model.
- """
+ """The model to use for transcription."""
class Tool(BaseModel):
@@ -116,8 +113,8 @@ class SessionCreateResponse(BaseModel):
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
- asynchronously through Whisper and should be treated as rough guidance rather
- than the representation understood by the model.
+ asynchronously and should be treated as rough guidance rather than the
+ representation understood by the model.
"""
instructions: Optional[str] = None
diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py
index 8bb6a0e266..789b9cd1e5 100644
--- a/src/openai/types/beta/realtime/session_update_event.py
+++ b/src/openai/types/beta/realtime/session_update_event.py
@@ -9,7 +9,7 @@
"SessionUpdateEvent",
"Session",
"SessionClientSecret",
- "SessionClientSecretExpiresAt",
+ "SessionClientSecretExpiresAfter",
"SessionInputAudioNoiseReduction",
"SessionInputAudioTranscription",
"SessionTool",
@@ -19,8 +19,8 @@
]
-class SessionClientSecretExpiresAt(BaseModel):
- anchor: Optional[Literal["created_at"]] = None
+class SessionClientSecretExpiresAfter(BaseModel):
+ anchor: Literal["created_at"]
"""The anchor point for the ephemeral token expiration.
Only `created_at` is currently supported.
@@ -34,7 +34,7 @@ class SessionClientSecretExpiresAt(BaseModel):
class SessionClientSecret(BaseModel):
- expires_at: Optional[SessionClientSecretExpiresAt] = None
+ expires_after: Optional[SessionClientSecretExpiresAfter] = None
"""Configuration for the ephemeral token expiration."""
diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py
index a10de540d0..2dfa2c26f3 100644
--- a/src/openai/types/beta/realtime/session_update_event_param.py
+++ b/src/openai/types/beta/realtime/session_update_event_param.py
@@ -9,7 +9,7 @@
"SessionUpdateEventParam",
"Session",
"SessionClientSecret",
- "SessionClientSecretExpiresAt",
+ "SessionClientSecretExpiresAfter",
"SessionInputAudioNoiseReduction",
"SessionInputAudioTranscription",
"SessionTool",
@@ -19,8 +19,8 @@
]
-class SessionClientSecretExpiresAt(TypedDict, total=False):
- anchor: Literal["created_at"]
+class SessionClientSecretExpiresAfter(TypedDict, total=False):
+ anchor: Required[Literal["created_at"]]
"""The anchor point for the ephemeral token expiration.
Only `created_at` is currently supported.
@@ -34,7 +34,7 @@ class SessionClientSecretExpiresAt(TypedDict, total=False):
class SessionClientSecret(TypedDict, total=False):
- expires_at: SessionClientSecretExpiresAt
+ expires_after: SessionClientSecretExpiresAfter
"""Configuration for the ephemeral token expiration."""
diff --git a/src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py b/src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py
index 4c540179e7..14c73b55d0 100644
--- a/src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py
+++ b/src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py
@@ -1,13 +1,14 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from typing import List, Optional
from typing_extensions import Literal
from ...._models import BaseModel
-__all__ = ["PermissionRetrieveResponse"]
+__all__ = ["PermissionRetrieveResponse", "Data"]
-class PermissionRetrieveResponse(BaseModel):
+class Data(BaseModel):
id: str
"""The permission identifier, which can be referenced in the API endpoints."""
@@ -19,3 +20,15 @@ class PermissionRetrieveResponse(BaseModel):
project_id: str
"""The project identifier that the permission is for."""
+
+
+class PermissionRetrieveResponse(BaseModel):
+ data: List[Data]
+
+ has_more: bool
+
+ object: Literal["list"]
+
+ first_id: Optional[str] = None
+
+ last_id: Optional[str] = None
diff --git a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
index d222431504..c5fef939b1 100644
--- a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
@@ -9,13 +9,19 @@
class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel):
delta: str
- """The partial code snippet added by the code interpreter."""
+ """The partial code snippet being streamed by the code interpreter."""
+
+ item_id: str
+ """The unique identifier of the code interpreter tool call item."""
output_index: int
- """The index of the output item that the code interpreter call is in progress."""
+ """
+ The index of the output item in the response for which the code is being
+ streamed.
+ """
sequence_number: int
- """The sequence number of this event."""
+ """The sequence number of this event, used to order streaming events."""
type: Literal["response.code_interpreter_call_code.delta"]
"""The type of the event. Always `response.code_interpreter_call_code.delta`."""
diff --git a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py
index 1ce6796a0e..5201a02d36 100644
--- a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py
@@ -11,11 +11,14 @@ class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel):
code: str
"""The final code snippet output by the code interpreter."""
+ item_id: str
+ """The unique identifier of the code interpreter tool call item."""
+
output_index: int
- """The index of the output item that the code interpreter call is in progress."""
+ """The index of the output item in the response for which the code is finalized."""
sequence_number: int
- """The sequence number of this event."""
+ """The sequence number of this event, used to order streaming events."""
type: Literal["response.code_interpreter_call_code.done"]
"""The type of the event. Always `response.code_interpreter_call_code.done`."""
diff --git a/src/openai/types/responses/response_code_interpreter_call_completed_event.py b/src/openai/types/responses/response_code_interpreter_call_completed_event.py
index 3a3a718971..bb9563a16b 100644
--- a/src/openai/types/responses/response_code_interpreter_call_completed_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_completed_event.py
@@ -3,20 +3,22 @@
from typing_extensions import Literal
from ..._models import BaseModel
-from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
__all__ = ["ResponseCodeInterpreterCallCompletedEvent"]
class ResponseCodeInterpreterCallCompletedEvent(BaseModel):
- code_interpreter_call: ResponseCodeInterpreterToolCall
- """A tool call to run code."""
+ item_id: str
+ """The unique identifier of the code interpreter tool call item."""
output_index: int
- """The index of the output item that the code interpreter call is in progress."""
+ """
+ The index of the output item in the response for which the code interpreter call
+ is completed.
+ """
sequence_number: int
- """The sequence number of this event."""
+ """The sequence number of this event, used to order streaming events."""
type: Literal["response.code_interpreter_call.completed"]
"""The type of the event. Always `response.code_interpreter_call.completed`."""
diff --git a/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py b/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py
index d1c8230919..9c6b221004 100644
--- a/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py
@@ -3,20 +3,22 @@
from typing_extensions import Literal
from ..._models import BaseModel
-from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
__all__ = ["ResponseCodeInterpreterCallInProgressEvent"]
class ResponseCodeInterpreterCallInProgressEvent(BaseModel):
- code_interpreter_call: ResponseCodeInterpreterToolCall
- """A tool call to run code."""
+ item_id: str
+ """The unique identifier of the code interpreter tool call item."""
output_index: int
- """The index of the output item that the code interpreter call is in progress."""
+ """
+ The index of the output item in the response for which the code interpreter call
+ is in progress.
+ """
sequence_number: int
- """The sequence number of this event."""
+ """The sequence number of this event, used to order streaming events."""
type: Literal["response.code_interpreter_call.in_progress"]
"""The type of the event. Always `response.code_interpreter_call.in_progress`."""
diff --git a/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py b/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py
index 7f4d294f56..f6191e4165 100644
--- a/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py
@@ -3,20 +3,22 @@
from typing_extensions import Literal
from ..._models import BaseModel
-from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
__all__ = ["ResponseCodeInterpreterCallInterpretingEvent"]
class ResponseCodeInterpreterCallInterpretingEvent(BaseModel):
- code_interpreter_call: ResponseCodeInterpreterToolCall
- """A tool call to run code."""
+ item_id: str
+ """The unique identifier of the code interpreter tool call item."""
output_index: int
- """The index of the output item that the code interpreter call is in progress."""
+ """
+ The index of the output item in the response for which the code interpreter is
+ interpreting code.
+ """
sequence_number: int
- """The sequence number of this event."""
+ """The sequence number of this event, used to order streaming events."""
type: Literal["response.code_interpreter_call.interpreting"]
"""The type of the event. Always `response.code_interpreter_call.interpreting`."""
diff --git a/src/openai/types/responses/response_code_interpreter_tool_call.py b/src/openai/types/responses/response_code_interpreter_tool_call.py
index 762542f398..7e4dc9f984 100644
--- a/src/openai/types/responses/response_code_interpreter_tool_call.py
+++ b/src/openai/types/responses/response_code_interpreter_tool_call.py
@@ -6,50 +6,46 @@
from ..._utils import PropertyInfo
from ..._models import BaseModel
-__all__ = ["ResponseCodeInterpreterToolCall", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"]
+__all__ = ["ResponseCodeInterpreterToolCall", "Output", "OutputLogs", "OutputImage"]
-class ResultLogs(BaseModel):
+class OutputLogs(BaseModel):
logs: str
- """The logs of the code interpreter tool call."""
+ """The logs output from the code interpreter."""
type: Literal["logs"]
- """The type of the code interpreter text output. Always `logs`."""
+ """The type of the output. Always 'logs'."""
-class ResultFilesFile(BaseModel):
- file_id: str
- """The ID of the file."""
+class OutputImage(BaseModel):
+ type: Literal["image"]
+ """The type of the output. Always 'image'."""
- mime_type: str
- """The MIME type of the file."""
+ url: str
+ """The URL of the image output from the code interpreter."""
-class ResultFiles(BaseModel):
- files: List[ResultFilesFile]
-
- type: Literal["files"]
- """The type of the code interpreter file output. Always `files`."""
-
-
-Result: TypeAlias = Annotated[Union[ResultLogs, ResultFiles], PropertyInfo(discriminator="type")]
+Output: TypeAlias = Annotated[Union[OutputLogs, OutputImage], PropertyInfo(discriminator="type")]
class ResponseCodeInterpreterToolCall(BaseModel):
id: str
"""The unique ID of the code interpreter tool call."""
- code: str
- """The code to run."""
+ code: Optional[str] = None
+ """The code to run, or null if not available."""
+
+ container_id: str
+ """The ID of the container used to run the code."""
+
+ outputs: Optional[List[Output]] = None
+ """The outputs generated by the code interpreter, such as logs or images.
- results: List[Result]
- """The results of the code interpreter tool call."""
+ Can be null if no outputs are available.
+ """
- status: Literal["in_progress", "interpreting", "completed"]
+ status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]
"""The status of the code interpreter tool call."""
type: Literal["code_interpreter_call"]
"""The type of the code interpreter tool call. Always `code_interpreter_call`."""
-
- container_id: Optional[str] = None
- """The ID of the container used to run the code."""
diff --git a/src/openai/types/responses/response_code_interpreter_tool_call_param.py b/src/openai/types/responses/response_code_interpreter_tool_call_param.py
index be0f909a6a..69e01f99ed 100644
--- a/src/openai/types/responses/response_code_interpreter_tool_call_param.py
+++ b/src/openai/types/responses/response_code_interpreter_tool_call_param.py
@@ -2,53 +2,49 @@
from __future__ import annotations
-from typing import Union, Iterable
+from typing import Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
-__all__ = ["ResponseCodeInterpreterToolCallParam", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"]
+__all__ = ["ResponseCodeInterpreterToolCallParam", "Output", "OutputLogs", "OutputImage"]
-class ResultLogs(TypedDict, total=False):
+class OutputLogs(TypedDict, total=False):
logs: Required[str]
- """The logs of the code interpreter tool call."""
+ """The logs output from the code interpreter."""
type: Required[Literal["logs"]]
- """The type of the code interpreter text output. Always `logs`."""
+ """The type of the output. Always 'logs'."""
-class ResultFilesFile(TypedDict, total=False):
- file_id: Required[str]
- """The ID of the file."""
+class OutputImage(TypedDict, total=False):
+ type: Required[Literal["image"]]
+ """The type of the output. Always 'image'."""
- mime_type: Required[str]
- """The MIME type of the file."""
+ url: Required[str]
+ """The URL of the image output from the code interpreter."""
-class ResultFiles(TypedDict, total=False):
- files: Required[Iterable[ResultFilesFile]]
-
- type: Required[Literal["files"]]
- """The type of the code interpreter file output. Always `files`."""
-
-
-Result: TypeAlias = Union[ResultLogs, ResultFiles]
+Output: TypeAlias = Union[OutputLogs, OutputImage]
class ResponseCodeInterpreterToolCallParam(TypedDict, total=False):
id: Required[str]
"""The unique ID of the code interpreter tool call."""
- code: Required[str]
- """The code to run."""
+ code: Required[Optional[str]]
+ """The code to run, or null if not available."""
+
+ container_id: Required[str]
+ """The ID of the container used to run the code."""
+
+ outputs: Required[Optional[Iterable[Output]]]
+ """The outputs generated by the code interpreter, such as logs or images.
- results: Required[Iterable[Result]]
- """The results of the code interpreter tool call."""
+ Can be null if no outputs are available.
+ """
- status: Required[Literal["in_progress", "interpreting", "completed"]]
+ status: Required[Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]]
"""The status of the code interpreter tool call."""
type: Required[Literal["code_interpreter_call"]]
"""The type of the code interpreter tool call. Always `code_interpreter_call`."""
-
- container_id: str
- """The ID of the container used to run the code."""
diff --git a/src/openai/types/responses/response_output_text.py b/src/openai/types/responses/response_output_text.py
index 1ea9a4ba93..aa97b629f0 100644
--- a/src/openai/types/responses/response_output_text.py
+++ b/src/openai/types/responses/response_output_text.py
@@ -22,6 +22,9 @@ class AnnotationFileCitation(BaseModel):
file_id: str
"""The ID of the file."""
+ filename: str
+ """The filename of the file cited."""
+
index: int
"""The index of the file in the list of files."""
@@ -56,6 +59,9 @@ class AnnotationContainerFileCitation(BaseModel):
file_id: str
"""The ID of the file."""
+ filename: str
+ """The filename of the container file cited."""
+
start_index: int
"""The index of the first character of the container file citation in the message."""
diff --git a/src/openai/types/responses/response_output_text_param.py b/src/openai/types/responses/response_output_text_param.py
index 207901e8ef..63d2d394a8 100644
--- a/src/openai/types/responses/response_output_text_param.py
+++ b/src/openai/types/responses/response_output_text_param.py
@@ -21,6 +21,9 @@ class AnnotationFileCitation(TypedDict, total=False):
file_id: Required[str]
"""The ID of the file."""
+ filename: Required[str]
+ """The filename of the file cited."""
+
index: Required[int]
"""The index of the file in the list of files."""
@@ -55,6 +58,9 @@ class AnnotationContainerFileCitation(TypedDict, total=False):
file_id: Required[str]
"""The ID of the file."""
+ filename: Required[str]
+ """The filename of the container file cited."""
+
start_index: Required[int]
"""The index of the first character of the container file citation in the message."""
diff --git a/tests/api_resources/audio/test_speech.py b/tests/api_resources/audio/test_speech.py
index 01746b3a3a..2c77f38949 100644
--- a/tests/api_resources/audio/test_speech.py
+++ b/tests/api_resources/audio/test_speech.py
@@ -44,6 +44,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou
instructions="instructions",
response_format="mp3",
speed=0.25,
+ stream_format="sse",
)
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
assert speech.json() == {"foo": "bar"}
@@ -110,6 +111,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re
instructions="instructions",
response_format="mp3",
speed=0.25,
+ stream_format="sse",
)
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
assert speech.json() == {"foo": "bar"}
diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py
index 9b78956a98..3c55abf80c 100644
--- a/tests/api_resources/beta/realtime/test_sessions.py
+++ b/tests/api_resources/beta/realtime/test_sessions.py
@@ -26,7 +26,7 @@ def test_method_create(self, client: OpenAI) -> None:
def test_method_create_with_all_params(self, client: OpenAI) -> None:
session = client.beta.realtime.sessions.create(
client_secret={
- "expires_at": {
+ "expires_after": {
"anchor": "created_at",
"seconds": 0,
}
@@ -103,7 +103,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
session = await async_client.beta.realtime.sessions.create(
client_secret={
- "expires_at": {
+ "expires_after": {
"anchor": "created_at",
"seconds": 0,
}
diff --git a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
index 4944597624..9420e3a34c 100644
--- a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
+++ b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
@@ -9,7 +9,7 @@
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
-from openai.pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage
+from openai.pagination import SyncPage, AsyncPage
from openai.types.fine_tuning.checkpoints import (
PermissionCreateResponse,
PermissionDeleteResponse,
@@ -71,7 +71,7 @@ def test_method_retrieve(self, client: OpenAI) -> None:
permission = client.fine_tuning.checkpoints.permissions.retrieve(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
- assert_matches_type(SyncCursorPage[PermissionRetrieveResponse], permission, path=["response"])
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
@@ -82,7 +82,7 @@ def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
order="ascending",
project_id="project_id",
)
- assert_matches_type(SyncCursorPage[PermissionRetrieveResponse], permission, path=["response"])
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
@@ -93,7 +93,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
- assert_matches_type(SyncCursorPage[PermissionRetrieveResponse], permission, path=["response"])
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
@@ -104,7 +104,7 @@ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
- assert_matches_type(SyncCursorPage[PermissionRetrieveResponse], permission, path=["response"])
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -222,7 +222,7 @@ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
- assert_matches_type(AsyncCursorPage[PermissionRetrieveResponse], permission, path=["response"])
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
@@ -233,7 +233,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI)
order="ascending",
project_id="project_id",
)
- assert_matches_type(AsyncCursorPage[PermissionRetrieveResponse], permission, path=["response"])
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
@@ -244,7 +244,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
- assert_matches_type(AsyncCursorPage[PermissionRetrieveResponse], permission, path=["response"])
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
@@ -255,7 +255,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> N
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = await response.parse()
- assert_matches_type(AsyncCursorPage[PermissionRetrieveResponse], permission, path=["response"])
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
assert cast(Any, response.is_closed) is True