diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e853b86695..7991b3e7c7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,6 +7,10 @@ on: - 'integrated/**' - 'stl-preview-head/**' - 'stl-preview-base/**' + pull_request: + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: @@ -95,11 +99,11 @@ jobs: run: | rye sync --all-features - - env: + - env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} run: | rye run python examples/demo.py - - env: + - env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} run: | rye run python examples/async_demo.py diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ceafc9afb0..f18270d528 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.86.0" + ".": "1.91.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index c9e264655c..1e0182cf22 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-3ae9c18dd7ccfc3ac5206f24394665f563a19015cfa8847b2801a2694d012abc.yml -openapi_spec_hash: 48175b03b58805cd5c80793c66fd54e5 -config_hash: 4caff63b74a41f71006987db702f2918 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ef4ecb19eb61e24c49d77fef769ee243e5279bc0bdbaee8d0f8dba4da8722559.yml +openapi_spec_hash: 1b8a9767c9f04e6865b06c41948cdc24 +config_hash: fd2af1d5eff0995bb7dc02ac9a34851d diff --git a/CHANGELOG.md b/CHANGELOG.md index aa75f7a2fe..14562edfac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,73 @@ # Changelog +## 1.91.0 (2025-06-23) + +Full Changelog: [v1.90.0...v1.91.0](https://github.com/openai/openai-python/compare/v1.90.0...v1.91.0) + +### Features + +* **api:** update api shapes for usage and code interpreter ([060d566](https://github.com/openai/openai-python/commit/060d5661e4a1fcdb953c52facd3e668ee80f9295)) + +## 1.90.0 (2025-06-20) + +Full Changelog: [v1.89.0...v1.90.0](https://github.com/openai/openai-python/compare/v1.89.0...v1.90.0) + +### Features + +* **api:** make model and inputs not required to create response ([11bd62e](https://github.com/openai/openai-python/commit/11bd62eb7e46eec748edaf2e0cecf253ffc1202c)) + +## 1.89.0 (2025-06-20) + +Full Changelog: [v1.88.0...v1.89.0](https://github.com/openai/openai-python/compare/v1.88.0...v1.89.0) + +### Features + +* **client:** add support for aiohttp ([9218b07](https://github.com/openai/openai-python/commit/9218b07727bf6f6eb00953df66de6ab061fecddb)) + + +### Bug Fixes + +* **tests:** fix: tests which call HTTP endpoints directly with the example parameters ([35bcc4b](https://github.com/openai/openai-python/commit/35bcc4b80bdbaa31108650f2a515902e83794e5a)) + + +### Chores + +* **readme:** update badges ([68044ee](https://github.com/openai/openai-python/commit/68044ee85d1bf324b17d3f60c914df4725d47fc8)) + +## 1.88.0 (2025-06-17) + +Full Changelog: [v1.87.0...v1.88.0](https://github.com/openai/openai-python/compare/v1.87.0...v1.88.0) + +### Features + +* **api:** manual updates ([5d18a84](https://github.com/openai/openai-python/commit/5d18a8448ecbe31597e98ec7f64d7050c831901e)) + + +### Chores + +* **ci:** enable for pull requests ([542b0ce](https://github.com/openai/openai-python/commit/542b0ce98f14ccff4f9e1bcbd3a9ea5e4f846638)) +* **internal:** minor formatting ([29d723d](https://github.com/openai/openai-python/commit/29d723d1f1baf2a5843293c8647dc7baa16d56d1)) + +## 1.87.0 (2025-06-16) + +Full Changelog: [v1.86.0...v1.87.0](https://github.com/openai/openai-python/compare/v1.86.0...v1.87.0) + +### Features + +* **api:** add reusable prompt IDs ([36bfe6e](https://github.com/openai/openai-python/commit/36bfe6e8ae12a31624ba1a360d9260f0aeec448a)) + + +### Bug Fixes + +* **client:** update service_tier on `client.beta.chat.completions` ([aa488d5](https://github.com/openai/openai-python/commit/aa488d5cf210d8640f87216538d4ff79d7181f2a)) + + +### Chores + +* **internal:** codegen related update ([b1a31e5](https://github.com/openai/openai-python/commit/b1a31e5ef4387d9f82cf33f9461371651788d381)) +* **internal:** update conftest.py ([bba0213](https://github.com/openai/openai-python/commit/bba0213842a4c161f2235e526d50901a336eecef)) +* **tests:** add tests for httpx client instantiation & proxies ([bc93712](https://github.com/openai/openai-python/commit/bc9371204f457aee9ed9b6ec1b61c2084f32faf1)) + ## 1.86.0 (2025-06-10) Full Changelog: [v1.85.0...v1.86.0](https://github.com/openai/openai-python/compare/v1.85.0...v1.86.0) diff --git a/README.md b/README.md index b83cb47c74..4861e4aaab 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # OpenAI Python API library -[![PyPI version](https://img.shields.io/pypi/v/openai.svg)](https://pypi.org/project/openai/) +[![PyPI version]()](https://pypi.org/project/openai/) The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -145,6 +145,45 @@ asyncio.run(main()) Functionality between the synchronous and asynchronous clients is otherwise identical. +### With aiohttp + +By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend. + +You can enable this by installing `aiohttp`: + +```sh +# install from PyPI +pip install openai[aiohttp] +``` + +Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: + +```python +import os +import asyncio +from openai import DefaultAioHttpClient +from openai import AsyncOpenAI + + +async def main() -> None: + async with AsyncOpenAI( + api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted + http_client=DefaultAioHttpClient(), + ) as client: + chat_completion = await client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", + ) + + +asyncio.run(main()) +``` + ## Streaming responses We provide support for streaming responses using Server Side Events (SSE). diff --git a/api.md b/api.md index 732436aacd..25360d741e 100644 --- a/api.md +++ b/api.md @@ -750,6 +750,7 @@ from openai.types.responses import ( ResponseOutputRefusal, ResponseOutputText, ResponseOutputTextAnnotationAddedEvent, + ResponsePrompt, ResponseQueuedEvent, ResponseReasoningDeltaEvent, ResponseReasoningDoneEvent, diff --git a/pyproject.toml b/pyproject.toml index a9ef5bec90..1f2b8a6044 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.86.0" +version = "1.91.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" @@ -43,6 +43,7 @@ Repository = "https://github.com/openai/openai-python" openai = "openai.cli:main" [project.optional-dependencies] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"] realtime = ["websockets >= 13, < 16"] datalib = ["numpy >= 1", "pandas >= 1.2.3", "pandas-stubs >= 1.1.0.11"] voice_helpers = ["sounddevice>=0.5.1", "numpy>=2.0.2"] @@ -68,6 +69,7 @@ dev-dependencies = [ "types-pyaudio > 0", "trio >=0.22.2", "nest_asyncio==1.6.0", + "pytest-xdist>=3.6.1", ] [tool.rye.scripts] @@ -139,7 +141,7 @@ replacement = '[\1](https://github.com/openai/openai-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] -addopts = "--tb=short" +addopts = "--tb=short -n auto" xfail_strict = true asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "session" diff --git a/requirements-dev.lock b/requirements-dev.lock index 9875a2b860..138fd3b4f6 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -10,6 +10,13 @@ # universal: false -e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.13 + # via httpx-aiohttp + # via openai +aiosignal==1.3.2 + # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.1.0 @@ -19,7 +26,10 @@ argcomplete==3.1.2 # via nox asttokens==2.4.1 # via inline-snapshot +async-timeout==5.0.1 + # via aiohttp attrs==24.2.0 + # via aiohttp # via outcome # via trio azure-core==1.31.0 @@ -54,22 +64,31 @@ exceptiongroup==1.2.2 # via anyio # via pytest # via trio +execnet==2.1.1 + # via pytest-xdist executing==2.1.0 # via inline-snapshot filelock==3.12.4 # via virtualenv +frozenlist==1.7.0 + # via aiohttp + # via aiosignal h11==0.14.0 # via httpcore httpcore==1.0.2 # via httpx httpx==0.28.1 + # via httpx-aiohttp # via openai # via respx +httpx-aiohttp==0.1.6 + # via openai idna==3.4 # via anyio # via httpx # via requests # via trio + # via yarl importlib-metadata==7.0.0 iniconfig==2.0.0 # via pytest @@ -85,6 +104,9 @@ msal==1.31.0 # via msal-extensions msal-extensions==1.2.0 # via azure-identity +multidict==6.5.0 + # via aiohttp + # via yarl mypy==1.14.1 mypy-extensions==1.0.0 # via black @@ -116,6 +138,9 @@ pluggy==1.5.0 # via pytest portalocker==2.10.1 # via msal-extensions +propcache==0.3.2 + # via aiohttp + # via yarl pycparser==2.22 # via cffi pydantic==2.10.3 @@ -129,7 +154,9 @@ pyjwt==2.8.0 pyright==1.1.399 pytest==8.3.3 # via pytest-asyncio + # via pytest-xdist pytest-asyncio==0.24.0 +pytest-xdist==3.7.0 python-dateutil==2.8.2 # via pandas # via time-machine @@ -177,6 +204,7 @@ typing-extensions==4.12.2 # via azure-core # via azure-identity # via black + # via multidict # via mypy # via openai # via pydantic @@ -190,5 +218,7 @@ virtualenv==20.24.5 # via nox websockets==15.0.1 # via openai +yarl==1.20.1 + # via aiohttp zipp==3.17.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index 467abc6e90..84cb9276d8 100644 --- a/requirements.lock +++ b/requirements.lock @@ -10,11 +10,22 @@ # universal: false -e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.13 + # via httpx-aiohttp + # via openai +aiosignal==1.3.2 + # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.1.0 # via httpx # via openai +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp certifi==2023.7.22 # via httpcore # via httpx @@ -24,17 +35,27 @@ distro==1.8.0 # via openai exceptiongroup==1.2.2 # via anyio +frozenlist==1.7.0 + # via aiohttp + # via aiosignal h11==0.14.0 # via httpcore httpcore==1.0.2 # via httpx httpx==0.28.1 + # via httpx-aiohttp + # via openai +httpx-aiohttp==0.1.6 # via openai idna==3.4 # via anyio # via httpx + # via yarl jiter==0.6.1 # via openai +multidict==6.5.0 + # via aiohttp + # via yarl numpy==2.0.2 # via openai # via pandas @@ -43,6 +64,9 @@ pandas==2.2.3 # via openai pandas-stubs==2.2.2.240807 # via openai +propcache==0.3.2 + # via aiohttp + # via yarl pycparser==2.22 # via cffi pydantic==2.10.3 @@ -65,6 +89,7 @@ tqdm==4.66.5 types-pytz==2024.2.0.20241003 # via pandas-stubs typing-extensions==4.12.2 + # via multidict # via openai # via pydantic # via pydantic-core @@ -72,3 +97,5 @@ tzdata==2024.1 # via pandas websockets==15.0.1 # via openai +yarl==1.20.1 + # via aiohttp diff --git a/src/openai/__init__.py b/src/openai/__init__.py index 92beeb5da1..5fb1520549 100644 --- a/src/openai/__init__.py +++ b/src/openai/__init__.py @@ -32,7 +32,7 @@ APIResponseValidationError, ContentFilterFinishReasonError, ) -from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient +from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient from ._utils._logs import setup_logging as _setup_logging from ._legacy_response import HttpxBinaryResponseContent as HttpxBinaryResponseContent @@ -77,6 +77,7 @@ "DEFAULT_CONNECTION_LIMITS", "DefaultHttpxClient", "DefaultAsyncHttpxClient", + "DefaultAioHttpClient", ] if not _t.TYPE_CHECKING: diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 44b3603008..0a6385a7b5 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -1088,7 +1088,14 @@ def _process_response( origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, APIResponse): raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") @@ -1299,6 +1306,24 @@ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) +try: + import httpx_aiohttp +except ImportError: + + class _DefaultAioHttpClient(httpx.AsyncClient): + def __init__(self, **_kwargs: Any) -> None: + raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra") +else: + + class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + + super().__init__(**kwargs) + + if TYPE_CHECKING: DefaultAsyncHttpxClient = httpx.AsyncClient """An alias to `httpx.AsyncClient` that provides the same defaults that this SDK @@ -1307,8 +1332,12 @@ def __init__(self, **kwargs: Any) -> None: This is useful because overriding the `http_client` with your own instance of `httpx.AsyncClient` will result in httpx's defaults being used, not ours. """ + + DefaultAioHttpClient = httpx.AsyncClient + """An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`.""" else: DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient + DefaultAioHttpClient = _DefaultAioHttpClient class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient): @@ -1606,7 +1635,14 @@ async def _process_response( origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, AsyncAPIResponse): raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}") diff --git a/src/openai/_version.py b/src/openai/_version.py index c0f313e3c3..d1cad1dd01 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.86.0" # x-release-please-version +__version__ = "1.91.0" # x-release-please-version diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index a195d7135e..fe776baae8 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -56,6 +56,7 @@ def create( instructions: str | NotGiven = NOT_GIVEN, response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, + stream_format: Literal["sse", "audio"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -85,7 +86,10 @@ def create( `wav`, and `pcm`. speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - the default. Does not work with `gpt-4o-mini-tts`. + the default. + + stream_format: The format to stream the audio in. Supported formats are `sse` and `audio`. + `sse` is not supported for `tts-1` or `tts-1-hd`. extra_headers: Send extra headers @@ -106,6 +110,7 @@ def create( "instructions": instructions, "response_format": response_format, "speed": speed, + "stream_format": stream_format, }, speech_create_params.SpeechCreateParams, ), @@ -147,6 +152,7 @@ async def create( instructions: str | NotGiven = NOT_GIVEN, response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, + stream_format: Literal["sse", "audio"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -176,7 +182,10 @@ async def create( `wav`, and `pcm`. speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - the default. Does not work with `gpt-4o-mini-tts`. + the default. + + stream_format: The format to stream the audio in. Supported formats are `sse` and `audio`. + `sse` is not supported for `tts-1` or `tts-1-hd`. extra_headers: Send extra headers @@ -197,6 +206,7 @@ async def create( "instructions": instructions, "response_format": response_format, "speed": speed, + "stream_format": stream_format, }, speech_create_params.SpeechCreateParams, ), diff --git a/src/openai/resources/beta/chat/completions.py b/src/openai/resources/beta/chat/completions.py index 80e015615f..871c4ab48a 100644 --- a/src/openai/resources/beta/chat/completions.py +++ b/src/openai/resources/beta/chat/completions.py @@ -81,7 +81,7 @@ def parse( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -228,7 +228,7 @@ def stream( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -360,7 +360,7 @@ async def parse( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -507,7 +507,7 @@ def stream( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index a2a664ac59..a6b89fc833 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -95,7 +95,7 @@ def create( reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, @@ -365,7 +365,7 @@ def create( reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -634,7 +634,7 @@ def create( reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -902,7 +902,7 @@ def create( reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -1198,7 +1198,7 @@ async def create( reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, @@ -1468,7 +1468,7 @@ async def create( reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -1737,7 +1737,7 @@ async def create( reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, @@ -2005,7 +2005,7 @@ async def create( reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index 5cca219172..ee21cdd280 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -84,7 +84,7 @@ def create( Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) Args: model: The name of the model to fine-tune. You can select one of the @@ -105,7 +105,8 @@ def create( [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated @@ -142,7 +143,8 @@ def create( Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. extra_headers: Send extra headers @@ -189,7 +191,7 @@ def retrieve( """ Get info about a fine-tuning job. - [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) Args: extra_headers: Send extra headers @@ -462,7 +464,7 @@ async def create( Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) Args: model: The name of the model to fine-tune. You can select one of the @@ -483,7 +485,8 @@ async def create( [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated @@ -520,7 +523,8 @@ async def create( Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. extra_headers: Send extra headers @@ -567,7 +571,7 @@ async def retrieve( """ Get info about a fine-tuning job. - [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) Args: extra_headers: Send extra headers diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 0f1c9fcb9e..43f6189f91 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -123,6 +123,8 @@ def edit( mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] @@ -171,6 +173,14 @@ def edit( n: The number of images to generate. Must be between 1 and 10. + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -204,6 +214,8 @@ def edit( "mask": mask, "model": model, "n": n, + "output_compression": output_compression, + "output_format": output_format, "quality": quality, "response_format": response_format, "size": size, @@ -447,6 +459,8 @@ async def edit( mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] @@ -495,6 +509,14 @@ async def edit( n: The number of images to generate. Must be between 1 and 10. + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -528,6 +550,8 @@ async def edit( "mask": mask, "model": model, "n": n, + "output_compression": output_compression, + "output_format": output_format, "quality": quality, "response_format": response_format, "size": size, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 81ae4e5bd6..841d198a5b 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -10,7 +10,7 @@ from ... import _legacy_response from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import is_given, required_args, maybe_transform, async_maybe_transform +from ..._utils import is_given, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper @@ -41,6 +41,7 @@ from ...types.responses.response_includable import ResponseIncludable from ...types.shared_params.responses_model import ResponsesModel from ...types.responses.response_input_param import ResponseInputParam +from ...types.responses.response_prompt_param import ResponsePromptParam from ...types.responses.response_stream_event import ResponseStreamEvent from ...types.responses.response_text_config_param import ResponseTextConfigParam @@ -75,17 +76,18 @@ def with_streaming_response(self) -> ResponsesWithStreamingResponse: def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -117,22 +119,6 @@ def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). @@ -152,8 +138,17 @@ def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -170,12 +165,21 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + reasoning: **o-series models only** Configuration options for @@ -270,18 +274,19 @@ def create( def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: Literal[True], background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, @@ -312,22 +317,6 @@ def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -354,8 +343,17 @@ def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -372,12 +370,21 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + reasoning: **o-series models only** Configuration options for @@ -465,18 +472,19 @@ def create( def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: bool, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, @@ -507,22 +515,6 @@ def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -549,8 +541,17 @@ def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -567,12 +568,21 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + reasoning: **o-series models only** Configuration options for @@ -656,21 +666,21 @@ def create( """ ... - @required_args(["input", "model"], ["input", "model", "stream"]) def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -691,15 +701,16 @@ def create( "/responses", body=maybe_transform( { - "input": input, - "model": model, "background": background, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, "reasoning": reasoning, "service_tier": service_tier, "store": store, @@ -1283,17 +1294,18 @@ def with_streaming_response(self) -> AsyncResponsesWithStreamingResponse: async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1325,22 +1337,6 @@ async def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). @@ -1360,8 +1356,17 @@ async def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -1378,12 +1383,21 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + reasoning: **o-series models only** Configuration options for @@ -1478,18 +1492,19 @@ async def create( async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: Literal[True], background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, @@ -1520,22 +1535,6 @@ async def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -1562,8 +1561,17 @@ async def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -1580,12 +1588,21 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + reasoning: **o-series models only** Configuration options for @@ -1673,18 +1690,19 @@ async def create( async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: bool, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, @@ -1715,22 +1733,6 @@ async def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -1757,8 +1759,17 @@ async def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -1775,12 +1786,21 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + reasoning: **o-series models only** Configuration options for @@ -1864,21 +1884,21 @@ async def create( """ ... - @required_args(["input", "model"], ["input", "model", "stream"]) async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1899,15 +1919,16 @@ async def create( "/responses", body=await async_maybe_transform( { - "input": input, - "model": model, "background": background, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, "reasoning": reasoning, "service_tier": service_tier, "store": store, diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py index 905ca5c3a8..4ee4a3c4e4 100644 --- a/src/openai/types/audio/speech_create_params.py +++ b/src/openai/types/audio/speech_create_params.py @@ -48,6 +48,12 @@ class SpeechCreateParams(TypedDict, total=False): speed: float """The speed of the generated audio. - Select a value from `0.25` to `4.0`. `1.0` is the default. Does not work with - `gpt-4o-mini-tts`. + Select a value from `0.25` to `4.0`. `1.0` is the default. + """ + + stream_format: Literal["sse", "audio"] + """The format to stream the audio in. + + Supported formats are `sse` and `audio`. `sse` is not supported for `tts-1` or + `tts-1-hd`. """ diff --git a/src/openai/types/audio/transcription.py b/src/openai/types/audio/transcription.py index 1576385404..7115eb9edb 100644 --- a/src/openai/types/audio/transcription.py +++ b/src/openai/types/audio/transcription.py @@ -1,10 +1,12 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias +from ..._utils import PropertyInfo from ..._models import BaseModel -__all__ = ["Transcription", "Logprob"] +__all__ = ["Transcription", "Logprob", "Usage", "UsageTokens", "UsageTokensInputTokenDetails", "UsageDuration"] class Logprob(BaseModel): @@ -18,6 +20,42 @@ class Logprob(BaseModel): """The log probability of the token.""" +class UsageTokensInputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """Number of audio tokens billed for this request.""" + + text_tokens: Optional[int] = None + """Number of text tokens billed for this request.""" + + +class UsageTokens(BaseModel): + input_tokens: int + """Number of input tokens billed for this request.""" + + output_tokens: int + """Number of output tokens generated.""" + + total_tokens: int + """Total number of tokens used (input + output).""" + + type: Literal["tokens"] + """The type of the usage object. Always `tokens` for this variant.""" + + input_token_details: Optional[UsageTokensInputTokenDetails] = None + """Details about the input tokens billed for this request.""" + + +class UsageDuration(BaseModel): + duration: float + """Duration of the input audio in seconds.""" + + type: Literal["duration"] + """The type of the usage object. Always `duration` for this variant.""" + + +Usage: TypeAlias = Annotated[Union[UsageTokens, UsageDuration], PropertyInfo(discriminator="type")] + + class Transcription(BaseModel): text: str """The transcribed text.""" @@ -28,3 +66,6 @@ class Transcription(BaseModel): Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added to the `include` array. """ + + usage: Optional[Usage] = None + """Token usage statistics for the request.""" diff --git a/src/openai/types/audio/transcription_text_done_event.py b/src/openai/types/audio/transcription_text_done_event.py index c8875a1bdb..9665edc565 100644 --- a/src/openai/types/audio/transcription_text_done_event.py +++ b/src/openai/types/audio/transcription_text_done_event.py @@ -5,7 +5,7 @@ from ..._models import BaseModel -__all__ = ["TranscriptionTextDoneEvent", "Logprob"] +__all__ = ["TranscriptionTextDoneEvent", "Logprob", "Usage", "UsageInputTokenDetails"] class Logprob(BaseModel): @@ -19,6 +19,31 @@ class Logprob(BaseModel): """The log probability of the token.""" +class UsageInputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """Number of audio tokens billed for this request.""" + + text_tokens: Optional[int] = None + """Number of text tokens billed for this request.""" + + +class Usage(BaseModel): + input_tokens: int + """Number of input tokens billed for this request.""" + + output_tokens: int + """Number of output tokens generated.""" + + total_tokens: int + """Total number of tokens used (input + output).""" + + type: Literal["tokens"] + """The type of the usage object. Always `tokens` for this variant.""" + + input_token_details: Optional[UsageInputTokenDetails] = None + """Details about the input tokens billed for this request.""" + + class TranscriptionTextDoneEvent(BaseModel): text: str """The text that was transcribed.""" @@ -33,3 +58,6 @@ class TranscriptionTextDoneEvent(BaseModel): [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `include[]` parameter set to `logprobs`. """ + + usage: Optional[Usage] = None + """Usage statistics for models billed by token usage.""" diff --git a/src/openai/types/audio/transcription_verbose.py b/src/openai/types/audio/transcription_verbose.py index 2a670189e0..cc6d769a65 100644 --- a/src/openai/types/audio/transcription_verbose.py +++ b/src/openai/types/audio/transcription_verbose.py @@ -1,12 +1,21 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Optional +from typing_extensions import Literal from ..._models import BaseModel from .transcription_word import TranscriptionWord from .transcription_segment import TranscriptionSegment -__all__ = ["TranscriptionVerbose"] +__all__ = ["TranscriptionVerbose", "Usage"] + + +class Usage(BaseModel): + duration: float + """Duration of the input audio in seconds.""" + + type: Literal["duration"] + """The type of the usage object. Always `duration` for this variant.""" class TranscriptionVerbose(BaseModel): @@ -22,5 +31,8 @@ class TranscriptionVerbose(BaseModel): segments: Optional[List[TranscriptionSegment]] = None """Segments of the transcribed text and their corresponding details.""" + usage: Optional[Usage] = None + """Usage statistics for models billed by audio input duration.""" + words: Optional[List[TranscriptionWord]] = None """Extracted words and their corresponding timestamps.""" diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index cebf67c732..e04985d2b6 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -3,12 +3,12 @@ from __future__ import annotations from typing import List, Union, Iterable -from typing_extensions import Literal, TypeAlias, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict __all__ = [ "SessionCreateParams", "ClientSecret", - "ClientSecretExpiresAt", + "ClientSecretExpiresAfter", "InputAudioNoiseReduction", "InputAudioTranscription", "Tool", @@ -156,8 +156,8 @@ class SessionCreateParams(TypedDict, total=False): """ -class ClientSecretExpiresAt(TypedDict, total=False): - anchor: Literal["created_at"] +class ClientSecretExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] """The anchor point for the ephemeral token expiration. Only `created_at` is currently supported. @@ -171,7 +171,7 @@ class ClientSecretExpiresAt(TypedDict, total=False): class ClientSecret(TypedDict, total=False): - expires_at: ClientSecretExpiresAt + expires_after: ClientSecretExpiresAfter """Configuration for the ephemeral token expiration.""" diff --git a/src/openai/types/beta/realtime/session_create_response.py b/src/openai/types/beta/realtime/session_create_response.py index 81fed95fa9..15d5c1742b 100644 --- a/src/openai/types/beta/realtime/session_create_response.py +++ b/src/openai/types/beta/realtime/session_create_response.py @@ -33,10 +33,7 @@ class ClientSecret(BaseModel): class InputAudioTranscription(BaseModel): model: Optional[str] = None - """ - The model to use for transcription, `whisper-1` is the only currently supported - model. - """ + """The model to use for transcription.""" class Tool(BaseModel): @@ -116,8 +113,8 @@ class SessionCreateResponse(BaseModel): Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously and should be treated as rough guidance rather than the + representation understood by the model. """ instructions: Optional[str] = None diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index 8bb6a0e266..789b9cd1e5 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -9,7 +9,7 @@ "SessionUpdateEvent", "Session", "SessionClientSecret", - "SessionClientSecretExpiresAt", + "SessionClientSecretExpiresAfter", "SessionInputAudioNoiseReduction", "SessionInputAudioTranscription", "SessionTool", @@ -19,8 +19,8 @@ ] -class SessionClientSecretExpiresAt(BaseModel): - anchor: Optional[Literal["created_at"]] = None +class SessionClientSecretExpiresAfter(BaseModel): + anchor: Literal["created_at"] """The anchor point for the ephemeral token expiration. Only `created_at` is currently supported. @@ -34,7 +34,7 @@ class SessionClientSecretExpiresAt(BaseModel): class SessionClientSecret(BaseModel): - expires_at: Optional[SessionClientSecretExpiresAt] = None + expires_after: Optional[SessionClientSecretExpiresAfter] = None """Configuration for the ephemeral token expiration.""" diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index a10de540d0..2dfa2c26f3 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -9,7 +9,7 @@ "SessionUpdateEventParam", "Session", "SessionClientSecret", - "SessionClientSecretExpiresAt", + "SessionClientSecretExpiresAfter", "SessionInputAudioNoiseReduction", "SessionInputAudioTranscription", "SessionTool", @@ -19,8 +19,8 @@ ] -class SessionClientSecretExpiresAt(TypedDict, total=False): - anchor: Literal["created_at"] +class SessionClientSecretExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] """The anchor point for the ephemeral token expiration. Only `created_at` is currently supported. @@ -34,7 +34,7 @@ class SessionClientSecretExpiresAt(TypedDict, total=False): class SessionClientSecret(TypedDict, total=False): - expires_at: SessionClientSecretExpiresAt + expires_after: SessionClientSecretExpiresAfter """Configuration for the ephemeral token expiration.""" diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index 49af1a3d0e..863cc2e81a 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -59,7 +59,7 @@ class ChatCompletion(BaseModel): object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" - service_tier: Optional[Literal["auto", "default", "flex"]] = None + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] = None """Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index c109e10f97..3d3d68602a 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -128,7 +128,7 @@ class ChatCompletionChunk(BaseModel): object: Literal["chat.completion.chunk"] """The object type, which is always `chat.completion.chunk`.""" - service_tier: Optional[Literal["auto", "default", "flex"]] = None + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] = None """Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index e55cc2d0b7..f1ed444b79 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -208,7 +208,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): in the backend. """ - service_tier: Optional[Literal["auto", "default", "flex"]] + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] """Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py index 6b2f41cb71..5514db1ed1 100644 --- a/src/openai/types/fine_tuning/job_create_params.py +++ b/src/openai/types/fine_tuning/job_create_params.py @@ -37,7 +37,8 @@ class JobCreateParams(TypedDict, total=False): [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. """ @@ -91,7 +92,8 @@ class JobCreateParams(TypedDict, total=False): Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. """ diff --git a/src/openai/types/image_edit_params.py b/src/openai/types/image_edit_params.py index 4f931ce141..aecb98fa6f 100644 --- a/src/openai/types/image_edit_params.py +++ b/src/openai/types/image_edit_params.py @@ -58,6 +58,20 @@ class ImageEditParams(TypedDict, total=False): n: Optional[int] """The number of images to generate. Must be between 1 and 10.""" + output_compression: Optional[int] + """The compression level (0-100%) for the generated images. + + This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` + output formats, and defaults to 100. + """ + + output_format: Optional[Literal["png", "jpeg", "webp"]] + """The format in which the generated images are returned. + + This parameter is only supported for `gpt-image-1`. Must be one of `png`, + `jpeg`, or `webp`. The default value is `png`. + """ + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] """The quality of the image that will be generated. diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index d33c26d23a..ba257eabc2 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -18,6 +18,7 @@ ParsedResponseOutputMessage as ParsedResponseOutputMessage, ParsedResponseFunctionToolCall as ParsedResponseFunctionToolCall, ) +from .response_prompt import ResponsePrompt as ResponsePrompt from .response_status import ResponseStatus as ResponseStatus from .web_search_tool import WebSearchTool as WebSearchTool from .file_search_tool import FileSearchTool as FileSearchTool @@ -28,6 +29,7 @@ from .function_tool_param import FunctionToolParam as FunctionToolParam from .response_includable import ResponseIncludable as ResponseIncludable from .response_input_file import ResponseInputFile as ResponseInputFile +from .response_input_item import ResponseInputItem as ResponseInputItem from .response_input_text import ResponseInputText as ResponseInputText from .tool_choice_options import ToolChoiceOptions as ToolChoiceOptions from .response_error_event import ResponseErrorEvent as ResponseErrorEvent @@ -38,6 +40,7 @@ from .response_text_config import ResponseTextConfig as ResponseTextConfig from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent +from .response_prompt_param import ResponsePromptParam as ResponsePromptParam from .response_queued_event import ResponseQueuedEvent as ResponseQueuedEvent from .response_stream_event import ResponseStreamEvent as ResponseStreamEvent from .web_search_tool_param import WebSearchToolParam as WebSearchToolParam diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 441b345414..75d1c5e3df 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -7,10 +7,12 @@ from ..._models import BaseModel from .response_error import ResponseError from .response_usage import ResponseUsage +from .response_prompt import ResponsePrompt from .response_status import ResponseStatus from ..shared.metadata import Metadata from ..shared.reasoning import Reasoning from .tool_choice_types import ToolChoiceTypes +from .response_input_item import ResponseInputItem from .tool_choice_options import ToolChoiceOptions from .response_output_item import ResponseOutputItem from .response_text_config import ResponseTextConfig @@ -41,10 +43,8 @@ class Response(BaseModel): incomplete_details: Optional[IncompleteDetails] = None """Details about why the response is incomplete.""" - instructions: Optional[str] = None - """ - Inserts a system (or developer) message as the first item in the model's - context. + instructions: Union[str, List[ResponseInputItem], None] = None + """A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -148,6 +148,12 @@ class Response(BaseModel): [conversation state](https://platform.openai.com/docs/guides/conversation-state). """ + prompt: Optional[ResponsePrompt] = None + """Reference to a prompt template and its variables. + + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + reasoning: Optional[Reasoning] = None """**o-series models only** @@ -155,7 +161,7 @@ class Response(BaseModel): [reasoning models](https://platform.openai.com/docs/guides/reasoning). """ - service_tier: Optional[Literal["auto", "default", "flex"]] = None + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] = None """Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: diff --git a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py index d222431504..c5fef939b1 100644 --- a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py +++ b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py @@ -9,13 +9,19 @@ class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel): delta: str - """The partial code snippet added by the code interpreter.""" + """The partial code snippet being streamed by the code interpreter.""" + + item_id: str + """The unique identifier of the code interpreter tool call item.""" output_index: int - """The index of the output item that the code interpreter call is in progress.""" + """ + The index of the output item in the response for which the code is being + streamed. + """ sequence_number: int - """The sequence number of this event.""" + """The sequence number of this event, used to order streaming events.""" type: Literal["response.code_interpreter_call_code.delta"] """The type of the event. Always `response.code_interpreter_call_code.delta`.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py index 1ce6796a0e..5201a02d36 100644 --- a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py +++ b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py @@ -11,11 +11,14 @@ class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel): code: str """The final code snippet output by the code interpreter.""" + item_id: str + """The unique identifier of the code interpreter tool call item.""" + output_index: int - """The index of the output item that the code interpreter call is in progress.""" + """The index of the output item in the response for which the code is finalized.""" sequence_number: int - """The sequence number of this event.""" + """The sequence number of this event, used to order streaming events.""" type: Literal["response.code_interpreter_call_code.done"] """The type of the event. Always `response.code_interpreter_call_code.done`.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_completed_event.py b/src/openai/types/responses/response_code_interpreter_call_completed_event.py index 3a3a718971..bb9563a16b 100644 --- a/src/openai/types/responses/response_code_interpreter_call_completed_event.py +++ b/src/openai/types/responses/response_code_interpreter_call_completed_event.py @@ -3,20 +3,22 @@ from typing_extensions import Literal from ..._models import BaseModel -from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall __all__ = ["ResponseCodeInterpreterCallCompletedEvent"] class ResponseCodeInterpreterCallCompletedEvent(BaseModel): - code_interpreter_call: ResponseCodeInterpreterToolCall - """A tool call to run code.""" + item_id: str + """The unique identifier of the code interpreter tool call item.""" output_index: int - """The index of the output item that the code interpreter call is in progress.""" + """ + The index of the output item in the response for which the code interpreter call + is completed. + """ sequence_number: int - """The sequence number of this event.""" + """The sequence number of this event, used to order streaming events.""" type: Literal["response.code_interpreter_call.completed"] """The type of the event. Always `response.code_interpreter_call.completed`.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py b/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py index d1c8230919..9c6b221004 100644 --- a/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py +++ b/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py @@ -3,20 +3,22 @@ from typing_extensions import Literal from ..._models import BaseModel -from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall __all__ = ["ResponseCodeInterpreterCallInProgressEvent"] class ResponseCodeInterpreterCallInProgressEvent(BaseModel): - code_interpreter_call: ResponseCodeInterpreterToolCall - """A tool call to run code.""" + item_id: str + """The unique identifier of the code interpreter tool call item.""" output_index: int - """The index of the output item that the code interpreter call is in progress.""" + """ + The index of the output item in the response for which the code interpreter call + is in progress. + """ sequence_number: int - """The sequence number of this event.""" + """The sequence number of this event, used to order streaming events.""" type: Literal["response.code_interpreter_call.in_progress"] """The type of the event. Always `response.code_interpreter_call.in_progress`.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py b/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py index 7f4d294f56..f6191e4165 100644 --- a/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py +++ b/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py @@ -3,20 +3,22 @@ from typing_extensions import Literal from ..._models import BaseModel -from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall __all__ = ["ResponseCodeInterpreterCallInterpretingEvent"] class ResponseCodeInterpreterCallInterpretingEvent(BaseModel): - code_interpreter_call: ResponseCodeInterpreterToolCall - """A tool call to run code.""" + item_id: str + """The unique identifier of the code interpreter tool call item.""" output_index: int - """The index of the output item that the code interpreter call is in progress.""" + """ + The index of the output item in the response for which the code interpreter is + interpreting code. + """ sequence_number: int - """The sequence number of this event.""" + """The sequence number of this event, used to order streaming events.""" type: Literal["response.code_interpreter_call.interpreting"] """The type of the event. Always `response.code_interpreter_call.interpreting`.""" diff --git a/src/openai/types/responses/response_code_interpreter_tool_call.py b/src/openai/types/responses/response_code_interpreter_tool_call.py index 762542f398..7e4dc9f984 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call.py @@ -6,50 +6,46 @@ from ..._utils import PropertyInfo from ..._models import BaseModel -__all__ = ["ResponseCodeInterpreterToolCall", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"] +__all__ = ["ResponseCodeInterpreterToolCall", "Output", "OutputLogs", "OutputImage"] -class ResultLogs(BaseModel): +class OutputLogs(BaseModel): logs: str - """The logs of the code interpreter tool call.""" + """The logs output from the code interpreter.""" type: Literal["logs"] - """The type of the code interpreter text output. Always `logs`.""" + """The type of the output. Always 'logs'.""" -class ResultFilesFile(BaseModel): - file_id: str - """The ID of the file.""" +class OutputImage(BaseModel): + type: Literal["image"] + """The type of the output. Always 'image'.""" - mime_type: str - """The MIME type of the file.""" + url: str + """The URL of the image output from the code interpreter.""" -class ResultFiles(BaseModel): - files: List[ResultFilesFile] - - type: Literal["files"] - """The type of the code interpreter file output. Always `files`.""" - - -Result: TypeAlias = Annotated[Union[ResultLogs, ResultFiles], PropertyInfo(discriminator="type")] +Output: TypeAlias = Annotated[Union[OutputLogs, OutputImage], PropertyInfo(discriminator="type")] class ResponseCodeInterpreterToolCall(BaseModel): id: str """The unique ID of the code interpreter tool call.""" - code: str - """The code to run.""" + code: Optional[str] = None + """The code to run, or null if not available.""" + + container_id: str + """The ID of the container used to run the code.""" + + outputs: Optional[List[Output]] = None + """The outputs generated by the code interpreter, such as logs or images. - results: List[Result] - """The results of the code interpreter tool call.""" + Can be null if no outputs are available. + """ - status: Literal["in_progress", "interpreting", "completed"] + status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"] """The status of the code interpreter tool call.""" type: Literal["code_interpreter_call"] """The type of the code interpreter tool call. Always `code_interpreter_call`.""" - - container_id: Optional[str] = None - """The ID of the container used to run the code.""" diff --git a/src/openai/types/responses/response_code_interpreter_tool_call_param.py b/src/openai/types/responses/response_code_interpreter_tool_call_param.py index be0f909a6a..69e01f99ed 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call_param.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call_param.py @@ -2,53 +2,49 @@ from __future__ import annotations -from typing import Union, Iterable +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict -__all__ = ["ResponseCodeInterpreterToolCallParam", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"] +__all__ = ["ResponseCodeInterpreterToolCallParam", "Output", "OutputLogs", "OutputImage"] -class ResultLogs(TypedDict, total=False): +class OutputLogs(TypedDict, total=False): logs: Required[str] - """The logs of the code interpreter tool call.""" + """The logs output from the code interpreter.""" type: Required[Literal["logs"]] - """The type of the code interpreter text output. Always `logs`.""" + """The type of the output. Always 'logs'.""" -class ResultFilesFile(TypedDict, total=False): - file_id: Required[str] - """The ID of the file.""" +class OutputImage(TypedDict, total=False): + type: Required[Literal["image"]] + """The type of the output. Always 'image'.""" - mime_type: Required[str] - """The MIME type of the file.""" + url: Required[str] + """The URL of the image output from the code interpreter.""" -class ResultFiles(TypedDict, total=False): - files: Required[Iterable[ResultFilesFile]] - - type: Required[Literal["files"]] - """The type of the code interpreter file output. Always `files`.""" - - -Result: TypeAlias = Union[ResultLogs, ResultFiles] +Output: TypeAlias = Union[OutputLogs, OutputImage] class ResponseCodeInterpreterToolCallParam(TypedDict, total=False): id: Required[str] """The unique ID of the code interpreter tool call.""" - code: Required[str] - """The code to run.""" + code: Required[Optional[str]] + """The code to run, or null if not available.""" + + container_id: Required[str] + """The ID of the container used to run the code.""" + + outputs: Required[Optional[Iterable[Output]]] + """The outputs generated by the code interpreter, such as logs or images. - results: Required[Iterable[Result]] - """The results of the code interpreter tool call.""" + Can be null if no outputs are available. + """ - status: Required[Literal["in_progress", "interpreting", "completed"]] + status: Required[Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]] """The status of the code interpreter tool call.""" type: Required[Literal["code_interpreter_call"]] """The type of the code interpreter tool call. Always `code_interpreter_call`.""" - - container_id: str - """The ID of the container used to run the code.""" diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 1abc2ccb1d..22acd6f653 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -9,6 +9,7 @@ from .response_includable import ResponseIncludable from .tool_choice_options import ToolChoiceOptions from .response_input_param import ResponseInputParam +from .response_prompt_param import ResponsePromptParam from ..shared_params.metadata import Metadata from .tool_choice_types_param import ToolChoiceTypesParam from ..shared_params.reasoning import Reasoning @@ -25,27 +26,6 @@ class ResponseCreateParamsBase(TypedDict, total=False): - input: Required[Union[str, ResponseInputParam]] - """Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - """ - - model: Required[ResponsesModel] - """Model ID used to generate the response, like `gpt-4o` or `o3`. - - OpenAI offers a wide range of models with different capabilities, performance - characteristics, and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - """ - background: Optional[bool] """Whether to run the model response in the background. @@ -71,10 +51,20 @@ class ResponseCreateParamsBase(TypedDict, total=False): in code interpreter tool call items. """ - instructions: Optional[str] + input: Union[str, ResponseInputParam] + """Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) """ - Inserts a system (or developer) message as the first item in the model's - context. + + instructions: Optional[str] + """A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -98,6 +88,15 @@ class ResponseCreateParamsBase(TypedDict, total=False): a maximum length of 512 characters. """ + model: ResponsesModel + """Model ID used to generate the response, like `gpt-4o` or `o3`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + """ + parallel_tool_calls: Optional[bool] """Whether to allow the model to run tool calls in parallel.""" @@ -108,6 +107,12 @@ class ResponseCreateParamsBase(TypedDict, total=False): [conversation state](https://platform.openai.com/docs/guides/conversation-state). """ + prompt: Optional[ResponsePromptParam] + """Reference to a prompt template and its variables. + + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + reasoning: Optional[Reasoning] """**o-series models only** @@ -115,7 +120,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): [reasoning models](https://platform.openai.com/docs/guides/reasoning). """ - service_tier: Optional[Literal["auto", "default", "flex"]] + service_tier: Optional[Literal["auto", "default", "flex", "scale"]] """Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: diff --git a/src/openai/types/responses/response_input_item.py b/src/openai/types/responses/response_input_item.py new file mode 100644 index 0000000000..5fbd7c274b --- /dev/null +++ b/src/openai/types/responses/response_input_item.py @@ -0,0 +1,305 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .easy_input_message import EasyInputMessage +from .response_output_message import ResponseOutputMessage +from .response_reasoning_item import ResponseReasoningItem +from .response_computer_tool_call import ResponseComputerToolCall +from .response_function_tool_call import ResponseFunctionToolCall +from .response_function_web_search import ResponseFunctionWebSearch +from .response_file_search_tool_call import ResponseFileSearchToolCall +from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall +from .response_input_message_content_list import ResponseInputMessageContentList +from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot + +__all__ = [ + "ResponseInputItem", + "Message", + "ComputerCallOutput", + "ComputerCallOutputAcknowledgedSafetyCheck", + "FunctionCallOutput", + "ImageGenerationCall", + "LocalShellCall", + "LocalShellCallAction", + "LocalShellCallOutput", + "McpListTools", + "McpListToolsTool", + "McpApprovalRequest", + "McpApprovalResponse", + "McpCall", + "ItemReference", +] + + +class Message(BaseModel): + content: ResponseInputMessageContentList + """ + A list of one or many input items to the model, containing different content + types. + """ + + role: Literal["user", "system", "developer"] + """The role of the message input. One of `user`, `system`, or `developer`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always set to `message`.""" + + +class ComputerCallOutputAcknowledgedSafetyCheck(BaseModel): + id: str + """The ID of the pending safety check.""" + + code: Optional[str] = None + """The type of the pending safety check.""" + + message: Optional[str] = None + """Details about the pending safety check.""" + + +class ComputerCallOutput(BaseModel): + call_id: str + """The ID of the computer tool call that produced the output.""" + + output: ResponseComputerToolCallOutputScreenshot + """A computer screenshot image used with the computer use tool.""" + + type: Literal["computer_call_output"] + """The type of the computer tool call output. Always `computer_call_output`.""" + + id: Optional[str] = None + """The ID of the computer tool call output.""" + + acknowledged_safety_checks: Optional[List[ComputerCallOutputAcknowledgedSafetyCheck]] = None + """ + The safety checks reported by the API that have been acknowledged by the + developer. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ + + +class FunctionCallOutput(BaseModel): + call_id: str + """The unique ID of the function tool call generated by the model.""" + + output: str + """A JSON string of the output of the function tool call.""" + + type: Literal["function_call_output"] + """The type of the function tool call output. Always `function_call_output`.""" + + id: Optional[str] = None + """The unique ID of the function tool call output. + + Populated when this item is returned via API. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + +class ImageGenerationCall(BaseModel): + id: str + """The unique ID of the image generation call.""" + + result: Optional[str] = None + """The generated image encoded in base64.""" + + status: Literal["in_progress", "completed", "generating", "failed"] + """The status of the image generation call.""" + + type: Literal["image_generation_call"] + """The type of the image generation call. Always `image_generation_call`.""" + + +class LocalShellCallAction(BaseModel): + command: List[str] + """The command to run.""" + + env: Dict[str, str] + """Environment variables to set for the command.""" + + type: Literal["exec"] + """The type of the local shell action. Always `exec`.""" + + timeout_ms: Optional[int] = None + """Optional timeout in milliseconds for the command.""" + + user: Optional[str] = None + """Optional user to run the command as.""" + + working_directory: Optional[str] = None + """Optional working directory to run the command in.""" + + +class LocalShellCall(BaseModel): + id: str + """The unique ID of the local shell call.""" + + action: LocalShellCallAction + """Execute a shell command on the server.""" + + call_id: str + """The unique ID of the local shell tool call generated by the model.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the local shell call.""" + + type: Literal["local_shell_call"] + """The type of the local shell call. Always `local_shell_call`.""" + + +class LocalShellCallOutput(BaseModel): + id: str + """The unique ID of the local shell tool call generated by the model.""" + + output: str + """A JSON string of the output of the local shell tool call.""" + + type: Literal["local_shell_call_output"] + """The type of the local shell tool call output. Always `local_shell_call_output`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. One of `in_progress`, `completed`, or `incomplete`.""" + + +class McpListToolsTool(BaseModel): + input_schema: object + """The JSON schema describing the tool's input.""" + + name: str + """The name of the tool.""" + + annotations: Optional[object] = None + """Additional annotations about the tool.""" + + description: Optional[str] = None + """The description of the tool.""" + + +class McpListTools(BaseModel): + id: str + """The unique ID of the list.""" + + server_label: str + """The label of the MCP server.""" + + tools: List[McpListToolsTool] + """The tools available on the server.""" + + type: Literal["mcp_list_tools"] + """The type of the item. Always `mcp_list_tools`.""" + + error: Optional[str] = None + """Error message if the server could not list tools.""" + + +class McpApprovalRequest(BaseModel): + id: str + """The unique ID of the approval request.""" + + arguments: str + """A JSON string of arguments for the tool.""" + + name: str + """The name of the tool to run.""" + + server_label: str + """The label of the MCP server making the request.""" + + type: Literal["mcp_approval_request"] + """The type of the item. Always `mcp_approval_request`.""" + + +class McpApprovalResponse(BaseModel): + approval_request_id: str + """The ID of the approval request being answered.""" + + approve: bool + """Whether the request was approved.""" + + type: Literal["mcp_approval_response"] + """The type of the item. Always `mcp_approval_response`.""" + + id: Optional[str] = None + """The unique ID of the approval response""" + + reason: Optional[str] = None + """Optional reason for the decision.""" + + +class McpCall(BaseModel): + id: str + """The unique ID of the tool call.""" + + arguments: str + """A JSON string of the arguments passed to the tool.""" + + name: str + """The name of the tool that was run.""" + + server_label: str + """The label of the MCP server running the tool.""" + + type: Literal["mcp_call"] + """The type of the item. Always `mcp_call`.""" + + error: Optional[str] = None + """The error from the tool call, if any.""" + + output: Optional[str] = None + """The output from the tool call.""" + + +class ItemReference(BaseModel): + id: str + """The ID of the item to reference.""" + + type: Optional[Literal["item_reference"]] = None + """The type of item to reference. Always `item_reference`.""" + + +ResponseInputItem: TypeAlias = Annotated[ + Union[ + EasyInputMessage, + Message, + ResponseOutputMessage, + ResponseFileSearchToolCall, + ResponseComputerToolCall, + ComputerCallOutput, + ResponseFunctionWebSearch, + ResponseFunctionToolCall, + FunctionCallOutput, + ResponseReasoningItem, + ImageGenerationCall, + ResponseCodeInterpreterToolCall, + LocalShellCall, + LocalShellCallOutput, + McpListTools, + McpApprovalRequest, + McpApprovalResponse, + McpCall, + ItemReference, + ], + PropertyInfo(discriminator="type"), +] diff --git a/src/openai/types/responses/response_output_text.py b/src/openai/types/responses/response_output_text.py index 1ea9a4ba93..aa97b629f0 100644 --- a/src/openai/types/responses/response_output_text.py +++ b/src/openai/types/responses/response_output_text.py @@ -22,6 +22,9 @@ class AnnotationFileCitation(BaseModel): file_id: str """The ID of the file.""" + filename: str + """The filename of the file cited.""" + index: int """The index of the file in the list of files.""" @@ -56,6 +59,9 @@ class AnnotationContainerFileCitation(BaseModel): file_id: str """The ID of the file.""" + filename: str + """The filename of the container file cited.""" + start_index: int """The index of the first character of the container file citation in the message.""" diff --git a/src/openai/types/responses/response_output_text_param.py b/src/openai/types/responses/response_output_text_param.py index 207901e8ef..63d2d394a8 100644 --- a/src/openai/types/responses/response_output_text_param.py +++ b/src/openai/types/responses/response_output_text_param.py @@ -21,6 +21,9 @@ class AnnotationFileCitation(TypedDict, total=False): file_id: Required[str] """The ID of the file.""" + filename: Required[str] + """The filename of the file cited.""" + index: Required[int] """The index of the file in the list of files.""" @@ -55,6 +58,9 @@ class AnnotationContainerFileCitation(TypedDict, total=False): file_id: Required[str] """The ID of the file.""" + filename: Required[str] + """The filename of the container file cited.""" + start_index: Required[int] """The index of the first character of the container file citation in the message.""" diff --git a/src/openai/types/responses/response_prompt.py b/src/openai/types/responses/response_prompt.py new file mode 100644 index 0000000000..537c2f8fbc --- /dev/null +++ b/src/openai/types/responses/response_prompt.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Union, Optional +from typing_extensions import TypeAlias + +from ..._models import BaseModel +from .response_input_file import ResponseInputFile +from .response_input_text import ResponseInputText +from .response_input_image import ResponseInputImage + +__all__ = ["ResponsePrompt", "Variables"] + +Variables: TypeAlias = Union[str, ResponseInputText, ResponseInputImage, ResponseInputFile] + + +class ResponsePrompt(BaseModel): + id: str + """The unique identifier of the prompt template to use.""" + + variables: Optional[Dict[str, Variables]] = None + """Optional map of values to substitute in for variables in your prompt. + + The substitution values can either be strings, or other Response input types + like images or files. + """ + + version: Optional[str] = None + """Optional version of the prompt template.""" diff --git a/src/openai/types/responses/response_prompt_param.py b/src/openai/types/responses/response_prompt_param.py new file mode 100644 index 0000000000..d935fa5191 --- /dev/null +++ b/src/openai/types/responses/response_prompt_param.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import Required, TypeAlias, TypedDict + +from .response_input_file_param import ResponseInputFileParam +from .response_input_text_param import ResponseInputTextParam +from .response_input_image_param import ResponseInputImageParam + +__all__ = ["ResponsePromptParam", "Variables"] + +Variables: TypeAlias = Union[str, ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam] + + +class ResponsePromptParam(TypedDict, total=False): + id: Required[str] + """The unique identifier of the prompt template to use.""" + + variables: Optional[Dict[str, Variables]] + """Optional map of values to substitute in for variables in your prompt. + + The substitution values can either be strings, or other Response input types + like images or files. + """ + + version: Optional[str] + """Optional version of the prompt template.""" diff --git a/tests/api_resources/audio/test_speech.py b/tests/api_resources/audio/test_speech.py index ce9ed59ce3..2c77f38949 100644 --- a/tests/api_resources/audio/test_speech.py +++ b/tests/api_resources/audio/test_speech.py @@ -44,6 +44,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou instructions="instructions", response_format="mp3", speed=0.25, + stream_format="sse", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) assert speech.json() == {"foo": "bar"} @@ -83,7 +84,9 @@ def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter) class TestAsyncSpeech: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize @pytest.mark.respx(base_url=base_url) @@ -108,6 +111,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re instructions="instructions", response_format="mp3", speed=0.25, + stream_format="sse", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) assert speech.json() == {"foo": "bar"} diff --git a/tests/api_resources/audio/test_transcriptions.py b/tests/api_resources/audio/test_transcriptions.py index 753acdecf6..11cbe2349c 100644 --- a/tests/api_resources/audio/test_transcriptions.py +++ b/tests/api_resources/audio/test_transcriptions.py @@ -121,7 +121,9 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: class TestAsyncTranscriptions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/audio/test_translations.py b/tests/api_resources/audio/test_translations.py index e12ab7e6c0..ead69e9369 100644 --- a/tests/api_resources/audio/test_translations.py +++ b/tests/api_resources/audio/test_translations.py @@ -64,7 +64,9 @@ def test_streaming_response_create(self, client: OpenAI) -> None: class TestAsyncTranslations: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py index efc52e0d57..3c55abf80c 100644 --- a/tests/api_resources/beta/realtime/test_sessions.py +++ b/tests/api_resources/beta/realtime/test_sessions.py @@ -26,7 +26,7 @@ def test_method_create(self, client: OpenAI) -> None: def test_method_create_with_all_params(self, client: OpenAI) -> None: session = client.beta.realtime.sessions.create( client_secret={ - "expires_at": { + "expires_after": { "anchor": "created_at", "seconds": 0, } @@ -90,7 +90,9 @@ def test_streaming_response_create(self, client: OpenAI) -> None: class TestAsyncSessions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: @@ -101,7 +103,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: session = await async_client.beta.realtime.sessions.create( client_secret={ - "expires_at": { + "expires_after": { "anchor": "created_at", "seconds": 0, } diff --git a/tests/api_resources/beta/realtime/test_transcription_sessions.py b/tests/api_resources/beta/realtime/test_transcription_sessions.py index 5a6b4f6c92..ac52489e74 100644 --- a/tests/api_resources/beta/realtime/test_transcription_sessions.py +++ b/tests/api_resources/beta/realtime/test_transcription_sessions.py @@ -74,7 +74,9 @@ def test_streaming_response_create(self, client: OpenAI) -> None: class TestAsyncTranscriptionSessions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index 82aaf87b1c..8aeb654e38 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -253,7 +253,9 @@ def test_path_params_delete(self, client: OpenAI) -> None: class TestAsyncAssistants: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/beta/test_realtime.py b/tests/api_resources/beta/test_realtime.py index 537017ffd3..2b0c7f7d8d 100644 --- a/tests/api_resources/beta/test_realtime.py +++ b/tests/api_resources/beta/test_realtime.py @@ -14,4 +14,6 @@ class TestRealtime: class TestAsyncRealtime: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index eab94f0f8a..f392c86729 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -420,7 +420,9 @@ def test_streaming_response_create_and_run_overload_2(self, client: OpenAI) -> N class TestAsyncThreads: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/beta/threads/runs/test_steps.py b/tests/api_resources/beta/threads/runs/test_steps.py index 9ca70657ec..ba44eec63d 100644 --- a/tests/api_resources/beta/threads/runs/test_steps.py +++ b/tests/api_resources/beta/threads/runs/test_steps.py @@ -167,7 +167,9 @@ def test_path_params_list(self, client: OpenAI) -> None: class TestAsyncSteps: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py index bf3f22e8a3..7f57002f27 100644 --- a/tests/api_resources/beta/threads/test_messages.py +++ b/tests/api_resources/beta/threads/test_messages.py @@ -321,7 +321,9 @@ def test_path_params_delete(self, client: OpenAI) -> None: class TestAsyncMessages: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index fdef5e40db..86a296627e 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -568,7 +568,9 @@ def test_path_params_submit_tool_outputs_overload_2(self, client: OpenAI) -> Non class TestAsyncRuns: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/chat/completions/test_messages.py b/tests/api_resources/chat/completions/test_messages.py index 5caac9ec6c..4a4267e539 100644 --- a/tests/api_resources/chat/completions/test_messages.py +++ b/tests/api_resources/chat/completions/test_messages.py @@ -68,7 +68,9 @@ def test_path_params_list(self, client: OpenAI) -> None: class TestAsyncMessages: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index aaef82e8c5..aa8f58f0e5 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -447,7 +447,9 @@ class MyModel(pydantic.BaseModel): class TestAsyncCompletions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/containers/files/test_content.py b/tests/api_resources/containers/files/test_content.py index 402607058f..67fcdca36c 100644 --- a/tests/api_resources/containers/files/test_content.py +++ b/tests/api_resources/containers/files/test_content.py @@ -86,7 +86,9 @@ def test_path_params_retrieve(self, client: OpenAI) -> None: class TestAsyncContent: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize @pytest.mark.respx(base_url=base_url) diff --git a/tests/api_resources/containers/test_files.py b/tests/api_resources/containers/test_files.py index 6edcc7973a..f9d82d005c 100644 --- a/tests/api_resources/containers/test_files.py +++ b/tests/api_resources/containers/test_files.py @@ -215,7 +215,9 @@ def test_path_params_delete(self, client: OpenAI) -> None: class TestAsyncFiles: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/evals/runs/test_output_items.py b/tests/api_resources/evals/runs/test_output_items.py index f764f0336e..673867ac42 100644 --- a/tests/api_resources/evals/runs/test_output_items.py +++ b/tests/api_resources/evals/runs/test_output_items.py @@ -140,7 +140,9 @@ def test_path_params_list(self, client: OpenAI) -> None: class TestAsyncOutputItems: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/evals/test_runs.py b/tests/api_resources/evals/test_runs.py index cefb1c82ff..1367cb4bab 100644 --- a/tests/api_resources/evals/test_runs.py +++ b/tests/api_resources/evals/test_runs.py @@ -306,7 +306,9 @@ def test_path_params_cancel(self, client: OpenAI) -> None: class TestAsyncRuns: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/fine_tuning/alpha/test_graders.py b/tests/api_resources/fine_tuning/alpha/test_graders.py index c7fe6670f3..4a237114b6 100644 --- a/tests/api_resources/fine_tuning/alpha/test_graders.py +++ b/tests/api_resources/fine_tuning/alpha/test_graders.py @@ -151,7 +151,9 @@ def test_streaming_response_validate(self, client: OpenAI) -> None: class TestAsyncGraders: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_run(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py index 6aa0b867d9..9420e3a34c 100644 --- a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py +++ b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py @@ -169,7 +169,9 @@ def test_path_params_delete(self, client: OpenAI) -> None: class TestAsyncPermissions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py index 915d5c6f63..bb11529263 100644 --- a/tests/api_resources/fine_tuning/jobs/test_checkpoints.py +++ b/tests/api_resources/fine_tuning/jobs/test_checkpoints.py @@ -67,7 +67,9 @@ def test_path_params_list(self, client: OpenAI) -> None: class TestAsyncCheckpoints: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py index 4589f12846..8a35255885 100644 --- a/tests/api_resources/fine_tuning/test_jobs.py +++ b/tests/api_resources/fine_tuning/test_jobs.py @@ -354,7 +354,9 @@ def test_path_params_resume(self, client: OpenAI) -> None: class TestAsyncJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/responses/test_input_items.py b/tests/api_resources/responses/test_input_items.py index 2528943c06..b28f5638c5 100644 --- a/tests/api_resources/responses/test_input_items.py +++ b/tests/api_resources/responses/test_input_items.py @@ -70,7 +70,9 @@ def test_path_params_list(self, client: OpenAI) -> None: class TestAsyncInputItems: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py index a2f8fb48a3..6775094a58 100644 --- a/tests/api_resources/test_batches.py +++ b/tests/api_resources/test_batches.py @@ -176,7 +176,9 @@ def test_path_params_cancel(self, client: OpenAI) -> None: class TestAsyncBatches: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index 9ec503c1e3..1c5271df75 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -137,7 +137,9 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: class TestAsyncCompletions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/test_containers.py b/tests/api_resources/test_containers.py index be9787c4d6..c972f6539d 100644 --- a/tests/api_resources/test_containers.py +++ b/tests/api_resources/test_containers.py @@ -177,7 +177,9 @@ def test_path_params_delete(self, client: OpenAI) -> None: class TestAsyncContainers: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py index e75545b4e2..ce6e213d59 100644 --- a/tests/api_resources/test_embeddings.py +++ b/tests/api_resources/test_embeddings.py @@ -64,7 +64,9 @@ def test_streaming_response_create(self, client: OpenAI) -> None: class TestAsyncEmbeddings: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/test_evals.py b/tests/api_resources/test_evals.py index 4ae2c597dd..473a4711ca 100644 --- a/tests/api_resources/test_evals.py +++ b/tests/api_resources/test_evals.py @@ -297,7 +297,9 @@ def test_path_params_delete(self, client: OpenAI) -> None: class TestAsyncEvals: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py index 7402566d95..fc4bb4a18e 100644 --- a/tests/api_resources/test_files.py +++ b/tests/api_resources/test_files.py @@ -260,7 +260,9 @@ def test_path_params_retrieve_content(self, client: OpenAI) -> None: class TestAsyncFiles: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py index 7c61453bc1..10fc56d685 100644 --- a/tests/api_resources/test_images.py +++ b/tests/api_resources/test_images.py @@ -77,6 +77,8 @@ def test_method_edit_with_all_params(self, client: OpenAI) -> None: mask=b"raw file contents", model="string", n=1, + output_compression=100, + output_format="png", quality="high", response_format="url", size="1024x1024", @@ -161,7 +163,9 @@ def test_streaming_response_generate(self, client: OpenAI) -> None: class TestAsyncImages: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create_variation(self, async_client: AsyncOpenAI) -> None: @@ -223,6 +227,8 @@ async def test_method_edit_with_all_params(self, async_client: AsyncOpenAI) -> N mask=b"raw file contents", model="string", n=1, + output_compression=100, + output_format="png", quality="high", response_format="url", size="1024x1024", diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 8791507c3e..cf70871ade 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -121,7 +121,9 @@ def test_path_params_delete(self, client: OpenAI) -> None: class TestAsyncModels: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/test_moderations.py b/tests/api_resources/test_moderations.py index 6df6464110..870c9e342f 100644 --- a/tests/api_resources/test_moderations.py +++ b/tests/api_resources/test_moderations.py @@ -58,7 +58,9 @@ def test_streaming_response_create(self, client: OpenAI) -> None: class TestAsyncModerations: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 7c0f980fbd..5b7559655a 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -9,7 +9,9 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type -from openai.types.responses import Response +from openai.types.responses import ( + Response, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,24 +21,26 @@ class TestResponses: @parametrize def test_method_create_overload_1(self, client: OpenAI) -> None: - response = client.responses.create( - input="string", - model="gpt-4o", - ) + response = client.responses.create() assert_matches_type(Response, response, path=["response"]) @parametrize def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: response = client.responses.create( - input="string", - model="gpt-4o", background=True, include=["file_search_call.results"], + input="string", instructions="instructions", max_output_tokens=0, metadata={"foo": "string"}, + model="gpt-4o", parallel_tool_calls=True, previous_response_id="previous_response_id", + prompt={ + "id": "id", + "variables": {"foo": "string"}, + "version": "version", + }, reasoning={ "effort": "low", "generate_summary": "auto", @@ -65,10 +69,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_1(self, client: OpenAI) -> None: - http_response = client.responses.with_raw_response.create( - input="string", - model="gpt-4o", - ) + http_response = client.responses.with_raw_response.create() assert http_response.is_closed is True assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -77,10 +78,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: - with client.responses.with_streaming_response.create( - input="string", - model="gpt-4o", - ) as http_response: + with client.responses.with_streaming_response.create() as http_response: assert not http_response.is_closed assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -92,8 +90,6 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_method_create_overload_2(self, client: OpenAI) -> None: response_stream = client.responses.create( - input="string", - model="gpt-4o", stream=True, ) response_stream.response.close() @@ -101,16 +97,21 @@ def test_method_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: response_stream = client.responses.create( - input="string", - model="gpt-4o", stream=True, background=True, include=["file_search_call.results"], + input="string", instructions="instructions", max_output_tokens=0, metadata={"foo": "string"}, + model="gpt-4o", parallel_tool_calls=True, previous_response_id="previous_response_id", + prompt={ + "id": "id", + "variables": {"foo": "string"}, + "version": "version", + }, reasoning={ "effort": "low", "generate_summary": "auto", @@ -139,8 +140,6 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.responses.with_raw_response.create( - input="string", - model="gpt-4o", stream=True, ) @@ -151,8 +150,6 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.responses.with_streaming_response.create( - input="string", - model="gpt-4o", stream=True, ) as response: assert not response.is_closed @@ -340,28 +337,32 @@ def test_path_params_cancel(self, client: OpenAI) -> None: class TestAsyncResponses: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: - response = await async_client.responses.create( - input="string", - model="gpt-4o", - ) + response = await async_client.responses.create() assert_matches_type(Response, response, path=["response"]) @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.responses.create( - input="string", - model="gpt-4o", background=True, include=["file_search_call.results"], + input="string", instructions="instructions", max_output_tokens=0, metadata={"foo": "string"}, + model="gpt-4o", parallel_tool_calls=True, previous_response_id="previous_response_id", + prompt={ + "id": "id", + "variables": {"foo": "string"}, + "version": "version", + }, reasoning={ "effort": "low", "generate_summary": "auto", @@ -390,10 +391,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: - http_response = await async_client.responses.with_raw_response.create( - input="string", - model="gpt-4o", - ) + http_response = await async_client.responses.with_raw_response.create() assert http_response.is_closed is True assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -402,10 +400,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: - async with async_client.responses.with_streaming_response.create( - input="string", - model="gpt-4o", - ) as http_response: + async with async_client.responses.with_streaming_response.create() as http_response: assert not http_response.is_closed assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -417,8 +412,6 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe @parametrize async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None: response_stream = await async_client.responses.create( - input="string", - model="gpt-4o", stream=True, ) await response_stream.response.aclose() @@ -426,16 +419,21 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: response_stream = await async_client.responses.create( - input="string", - model="gpt-4o", stream=True, background=True, include=["file_search_call.results"], + input="string", instructions="instructions", max_output_tokens=0, metadata={"foo": "string"}, + model="gpt-4o", parallel_tool_calls=True, previous_response_id="previous_response_id", + prompt={ + "id": "id", + "variables": {"foo": "string"}, + "version": "version", + }, reasoning={ "effort": "low", "generate_summary": "auto", @@ -464,8 +462,6 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: response = await async_client.responses.with_raw_response.create( - input="string", - model="gpt-4o", stream=True, ) @@ -476,8 +472,6 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: async with async_client.responses.with_streaming_response.create( - input="string", - model="gpt-4o", stream=True, ) as response: assert not response.is_closed diff --git a/tests/api_resources/test_uploads.py b/tests/api_resources/test_uploads.py index a14c4f8da2..72a2f6c83d 100644 --- a/tests/api_resources/test_uploads.py +++ b/tests/api_resources/test_uploads.py @@ -148,7 +148,9 @@ def test_path_params_complete(self, client: OpenAI) -> None: class TestAsyncUploads: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/test_vector_stores.py b/tests/api_resources/test_vector_stores.py index 54bb75bc1d..5af95fec41 100644 --- a/tests/api_resources/test_vector_stores.py +++ b/tests/api_resources/test_vector_stores.py @@ -286,7 +286,9 @@ def test_path_params_search(self, client: OpenAI) -> None: class TestAsyncVectorStores: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/uploads/test_parts.py b/tests/api_resources/uploads/test_parts.py index 2bba241a6d..191d3a1b04 100644 --- a/tests/api_resources/uploads/test_parts.py +++ b/tests/api_resources/uploads/test_parts.py @@ -61,7 +61,9 @@ def test_path_params_create(self, client: OpenAI) -> None: class TestAsyncParts: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/vector_stores/test_file_batches.py b/tests/api_resources/vector_stores/test_file_batches.py index 0587cfc56a..ac678ce912 100644 --- a/tests/api_resources/vector_stores/test_file_batches.py +++ b/tests/api_resources/vector_stores/test_file_batches.py @@ -232,7 +232,9 @@ def test_path_params_list_files(self, client: OpenAI) -> None: class TestAsyncFileBatches: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/api_resources/vector_stores/test_files.py b/tests/api_resources/vector_stores/test_files.py index c13442261e..0778704d5d 100644 --- a/tests/api_resources/vector_stores/test_files.py +++ b/tests/api_resources/vector_stores/test_files.py @@ -323,7 +323,9 @@ def test_path_params_content(self, client: OpenAI) -> None: class TestAsyncFiles: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: diff --git a/tests/conftest.py b/tests/conftest.py index 8b01753e2f..408bcf76c0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,13 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from __future__ import annotations import os import logging from typing import TYPE_CHECKING, Iterator, AsyncIterator +import httpx import pytest from pytest_asyncio import is_async_test -from openai import OpenAI, AsyncOpenAI +from openai import OpenAI, AsyncOpenAI, DefaultAioHttpClient +from openai._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] @@ -25,6 +29,19 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: for async_test in pytest_asyncio_tests: async_test.add_marker(session_scope_marker, append=False) + # We skip tests that use both the aiohttp client and respx_mock as respx_mock + # doesn't support custom transports. + for item in items: + if "async_client" not in item.fixturenames or "respx_mock" not in item.fixturenames: + continue + + if not hasattr(item, "callspec"): + continue + + async_client_param = item.callspec.params.get("async_client") + if is_dict(async_client_param) and async_client_param.get("http_client") == "aiohttp": + item.add_marker(pytest.mark.skip(reason="aiohttp client is not compatible with respx_mock")) + base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -43,9 +60,25 @@ def client(request: FixtureRequest) -> Iterator[OpenAI]: @pytest.fixture(scope="session") async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncOpenAI]: - strict = getattr(request, "param", True) - if not isinstance(strict, bool): - raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - - async with AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: + param = getattr(request, "param", True) + + # defaults + strict = True + http_client: None | httpx.AsyncClient = None + + if isinstance(param, bool): + strict = param + elif is_dict(param): + strict = param.get("strict", True) + assert isinstance(strict, bool) + + http_client_type = param.get("http_client", "httpx") + if http_client_type == "aiohttp": + http_client = DefaultAioHttpClient() + else: + raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict") + + async with AsyncOpenAI( + base_url=base_url, api_key=api_key, _strict_response_validation=strict, http_client=http_client + ) as client: yield client diff --git a/tests/test_client.py b/tests/test_client.py index 2b7aeaf946..3d08a0a601 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -23,13 +23,17 @@ from openai import OpenAI, AsyncOpenAI, APIResponseValidationError from openai._types import Omit -from openai._utils import maybe_transform from openai._models import BaseModel, FinalRequestOptions -from openai._constants import RAW_RESPONSE_HEADER from openai._streaming import Stream, AsyncStream from openai._exceptions import OpenAIError, APIStatusError, APITimeoutError, APIResponseValidationError -from openai._base_client import DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, make_request_options -from openai.types.chat.completion_create_params import CompletionCreateParamsNonStreaming +from openai._base_client import ( + DEFAULT_TIMEOUT, + HTTPX_DEFAULT_TIMEOUT, + BaseClient, + DefaultHttpxClient, + DefaultAsyncHttpxClient, + make_request_options, +) from .utils import update_env @@ -718,60 +722,37 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: OpenAI) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - self.client.post( - "/chat/completions", - body=cast( - object, - maybe_transform( - dict( - messages=[ - { - "role": "user", - "content": "Say this is a test", - } - ], - model="gpt-4o", - ), - CompletionCreateParamsNonStreaming, - ), - ), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "developer", + } + ], + model="gpt-4o", + ).__enter__() assert _get_open_connections(self.client) == 0 @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: OpenAI) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - self.client.post( - "/chat/completions", - body=cast( - object, - maybe_transform( - dict( - messages=[ - { - "role": "user", - "content": "Say this is a test", - } - ], - model="gpt-4o", - ), - CompletionCreateParamsNonStreaming, - ), - ), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "developer", + } + ], + model="gpt-4o", + ).__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -908,6 +889,28 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success + def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + @pytest.mark.respx(base_url=base_url) def test_follow_redirects(self, respx_mock: MockRouter) -> None: # Test that the default follow_redirects=True allows following redirects @@ -1618,60 +1621,37 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, async_client: AsyncOpenAI) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await self.client.post( - "/chat/completions", - body=cast( - object, - maybe_transform( - dict( - messages=[ - { - "role": "user", - "content": "Say this is a test", - } - ], - model="gpt-4o", - ), - CompletionCreateParamsNonStreaming, - ), - ), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + await async_client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "developer", + } + ], + model="gpt-4o", + ).__aenter__() assert _get_open_connections(self.client) == 0 @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, async_client: AsyncOpenAI) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await self.client.post( - "/chat/completions", - body=cast( - object, - maybe_transform( - dict( - messages=[ - { - "role": "user", - "content": "Say this is a test", - } - ], - model="gpt-4o", - ), - CompletionCreateParamsNonStreaming, - ), - ), - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + await async_client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "developer", + } + ], + model="gpt-4o", + ).__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1857,6 +1837,28 @@ async def test_main() -> None: time.sleep(0.1) + async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultAsyncHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + async def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultAsyncHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + @pytest.mark.respx(base_url=base_url) async def test_follow_redirects(self, respx_mock: MockRouter) -> None: # Test that the default follow_redirects=True allows following redirects