From bca1a4299d1a9ae6bde23383fd03beaf54e4c58e Mon Sep 17 00:00:00 2001 From: Lucas Alencar Xisto Date: Sun, 31 Aug 2025 12:11:30 -0300 Subject: [PATCH 1/2] tests: improve timeouts and retries coverage with async + detailed assertions --- .vscode/settings.json | 9 ++ tests/test_retries.py | 251 +++++++++++++++++++++++++++++++++++ tests/test_timeouts.py | 183 +++++++++++++++++++++++++ tests/test_timeouts_async.py | 0 4 files changed, 443 insertions(+) create mode 100644 tests/test_retries.py create mode 100644 tests/test_timeouts.py create mode 100644 tests/test_timeouts_async.py diff --git a/.vscode/settings.json b/.vscode/settings.json index 5b01030785..7b74b8efd0 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,3 +1,12 @@ { "python.analysis.importFormat": "relative", + "python.testing.unittestArgs": [ + "-v", + "-s", + "./tests", + "-p", + "*test.py" + ], + "python.testing.pytestEnabled": false, + "python.testing.unittestEnabled": true, } diff --git a/tests/test_retries.py b/tests/test_retries.py new file mode 100644 index 0000000000..c13172cecc --- /dev/null +++ b/tests/test_retries.py @@ -0,0 +1,251 @@ +import time +import anyio +import httpx +import pytest + +def test_respects_retry_after_seconds(monkeypatch): + sleeps = [] + + # Evita dormir de verdade + monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s)) + + attempts = {"n": 0} + + def handler(request: httpx.Request): + attempts["n"] += 1 + if attempts["n"] == 1: + # 1ª tentativa falha com 429 e Retry-After: 2 + return httpx.Response(429, headers={"Retry-After": "2"}, json={"err": "rate"}) + # 2ª tentativa sucesso + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + + from openai import OpenAI, DefaultHttpxClient + + client = OpenAI( + max_retries=1, + http_client=DefaultHttpxClient(transport=transport), + ) + + client.responses.create(model="gpt-4o-mini", input="hi") + + # Deve ter "dormido" ~2s antes de retry + assert sleeps, "expected a sleep before retry" + assert sleeps[0] >= 2.0 # pode ser >2.0 se houver jitter adicional + + +@pytest.mark.asyncio +async def test_respects_retry_after_seconds_async(monkeypatch): + sleeps = [] + + async def fake_sleep(s): + sleeps.append(s) + + monkeypatch.setattr(anyio, "sleep", fake_sleep) + + attempts = {"n": 0} + + async def handler(request: httpx.Request): + attempts["n"] += 1 + if attempts["n"] == 1: + return httpx.Response(429, headers={"Retry-After": "2"}, json={"err": "rate"}) + return httpx.Response(200, json={"ok": True}) + import time + import anyio + import httpx + import pytest + + def test_respects_retry_after_seconds(monkeypatch): + sleeps = [] + monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s)) + attempts = {"n": 0} + + def handler(request: httpx.Request): + attempts["n"] += 1 + if attempts["n"] == 1: + return httpx.Response(429, headers={"Retry-After": "2"}, json={"err": "rate"}) + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + from openai import OpenAI, DefaultHttpxClient + + client = OpenAI( + max_retries=1, + http_client=DefaultHttpxClient(transport=transport), + ) + + client.responses.create(model="gpt-4o-mini", input="hi") + assert sleeps, "expected a sleep before retry" + assert sleeps[0] >= 2.0 + + def test_no_retry_on_success(monkeypatch): + sleeps = [] + monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s)) + attempts = {"n": 0} + + def handler(request: httpx.Request): + attempts["n"] += 1 + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + from openai import OpenAI, DefaultHttpxClient + + client = OpenAI( + max_retries=3, + http_client=DefaultHttpxClient(transport=transport), + ) + + client.responses.create(model="gpt-4o-mini", input="hi") + assert attempts["n"] == 1 + assert not sleeps + + def test_max_retries_exceeded(monkeypatch): + sleeps = [] + monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s)) + attempts = {"n": 0} + + def handler(request: httpx.Request): + attempts["n"] += 1 + return httpx.Response(429, headers={"Retry-After": "1"}, json={"err": "rate"}) + + transport = httpx.MockTransport(handler) + from openai import OpenAI, DefaultHttpxClient + + client = OpenAI( + max_retries=2, + http_client=DefaultHttpxClient(transport=transport), + ) + + with pytest.raises(httpx.HTTPStatusError): + client.responses.create(model="gpt-4o-mini", input="hi") + assert attempts["n"] == 3 # 1 original + 2 retries + assert len(sleeps) == 2 + assert all(s >= 1.0 for s in sleeps) + + @pytest.mark.asyncio + async def test_respects_retry_after_seconds_async(monkeypatch): + sleeps = [] + async def fake_sleep(s): + sleeps.append(s) + monkeypatch.setattr(anyio, "sleep", fake_sleep) + attempts = {"n": 0} + + async def handler(request: httpx.Request): + attempts["n"] += 1 + if attempts["n"] == 1: + return httpx.Response(429, headers={"Retry-After": "2"}, json={"err": "rate"}) + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + from openai import AsyncOpenAI, DefaultAsyncHttpxClient + + client = AsyncOpenAI( + max_retries=1, + http_client=DefaultAsyncHttpxClient(transport=transport), + ) + + await client.responses.create(model="gpt-4o-mini", input="hi") + assert sleeps + assert sleeps[0] >= 2.0 + + @pytest.mark.asyncio + async def test_no_retry_on_success_async(monkeypatch): + sleeps = [] + async def fake_sleep(s): + sleeps.append(s) + monkeypatch.setattr(anyio, "sleep", fake_sleep) + attempts = {"n": 0} + + async def handler(request: httpx.Request): + attempts["n"] += 1 + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + from openai import AsyncOpenAI, DefaultAsyncHttpxClient + + client = AsyncOpenAI( + max_retries=3, + http_client=DefaultAsyncHttpxClient(transport=transport), + ) + + await client.responses.create(model="gpt-4o-mini", input="hi") + assert attempts["n"] == 1 + assert not sleeps + + @pytest.mark.asyncio + async def test_max_retries_exceeded_async(monkeypatch): + sleeps = [] + async def fake_sleep(s): + sleeps.append(s) + monkeypatch.setattr(anyio, "sleep", fake_sleep) + attempts = {"n": 0} + + async def handler(request: httpx.Request): + attempts["n"] += 1 + return httpx.Response(429, headers={"Retry-After": "1"}, json={"err": "rate"}) + + transport = httpx.MockTransport(handler) + from openai import AsyncOpenAI, DefaultAsyncHttpxClient + + client = AsyncOpenAI( + max_retries=2, + http_client=DefaultAsyncHttpxClient(transport=transport), + ) + + with pytest.raises(httpx.HTTPStatusError): + await client.responses.create(model="gpt-4o-mini", input="hi") + assert attempts["n"] == 3 + assert len(sleeps) == 2 + assert all(s >= 1.0 for s in sleeps) + + def test_retry_with_jitter(monkeypatch): + # Testa se o retry respeita jitter (se implementado) + sleeps = [] + monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s)) + attempts = {"n": 0} + + def handler(request: httpx.Request): + attempts["n"] += 1 + if attempts["n"] < 3: + return httpx.Response(429, headers={"Retry-After": "1"}, json={"err": "rate"}) + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + from openai import OpenAI, DefaultHttpxClient + + client = OpenAI( + max_retries=2, + http_client=DefaultHttpxClient(transport=transport), + ) + + with pytest.raises(httpx.HTTPStatusError): + client.responses.create(model="gpt-4o-mini", input="hi") + assert attempts["n"] == 3 + assert len(sleeps) == 2 + + def test_retry_on_different_status(monkeypatch): + # Testa se o retry ocorre para outros status além de 429, se suportado + sleeps = [] + monkeypatch.setattr(time, "sleep", lambda s: sleeps.append(s)) + attempts = {"n": 0} + + def handler(request: httpx.Request): + attempts["n"] += 1 + if attempts["n"] == 1: + return httpx.Response(500, json={"err": "server"}) + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + from openai import OpenAI, DefaultHttpxClient + + client = OpenAI( + max_retries=1, + http_client=DefaultHttpxClient(transport=transport), + ) + + client.responses.create(model="gpt-4o-mini", input="hi") + assert attempts["n"] == 2 + + # Estrutura assim está ótima: cada teste cobre um cenário e está bem separado. + # Se quiser, pode criar subpastas em tests/ para agrupar por tema (ex: tests/retries/, tests/timeouts/), mas para projetos pequenos/médios, manter arquivos separados já é suficiente e claro. diff --git a/tests/test_timeouts.py b/tests/test_timeouts.py new file mode 100644 index 0000000000..bcb21cd7b1 --- /dev/null +++ b/tests/test_timeouts.py @@ -0,0 +1,183 @@ +import httpx +import pytest + +def test_per_request_timeout_overrides_default(monkeypatch): + captured = {} + + def handler(request: httpx.Request): + # httpx coloca o timeout nos extensions + captured["timeout"] = request.extensions.get("timeout") + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + + # Importa do SDK + from openai import OpenAI, DefaultHttpxClient + + # Cliente com timeout "alto" para evidenciar o override + client = OpenAI( + timeout=60.0, + http_client=DefaultHttpxClient(transport=transport), + ) + + # Chamada com override por requisição + client.with_options(timeout=5.0).responses.create(model="gpt-4o-mini", input="ping") + + assert "timeout" in captured and captured["timeout"] is not None + + # Pode ser float simples ou httpx.Timeout + t = captured["timeout"] + if isinstance(t, (int, float)): + assert t == 5.0 + else: + # httpx.Timeout: qualquer um dos campos deve refletir 5.0 se foi criado assim + # (aceitamos read/connect/write iguais se o SDK criar Timeout composto) + assert getattr(t, "read", None) in (None, 5.0) or getattr(t, "connect", None) in (None, 5.0) or getattr(t, "write", None) in (None, 5.0) + + +@pytest.mark.asyncio +async def test_per_request_timeout_overrides_default_async(monkeypatch): + captured = {} + + async def ahandler(request): + captured["timeout"] = request.extensions.get("timeout") + return httpx.Response(200, json={"ok": True}) + + import httpx + import pytest + + def test_per_request_timeout_overrides_default(monkeypatch): + captured = {} + + def handler(request: httpx.Request): + captured["timeout"] = request.extensions.get("timeout") + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + from openai import OpenAI, DefaultHttpxClient + + client = OpenAI( + timeout=60.0, + http_client=DefaultHttpxClient(transport=transport), + ) + + client.with_options(timeout=5.0).responses.create(model="gpt-4o-mini", input="ping") + + assert "timeout" in captured and captured["timeout"] is not None + t = captured["timeout"] + if isinstance(t, (int, float)): + assert t == 5.0 + else: + assert getattr(t, "read", None) in (None, 5.0) or getattr(t, "connect", None) in (None, 5.0) or getattr(t, "write", None) in (None, 5.0) + + def test_default_timeout_is_used(monkeypatch): + captured = {} + + def handler(request: httpx.Request): + captured["timeout"] = request.extensions.get("timeout") + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + from openai import OpenAI, DefaultHttpxClient + + client = OpenAI( + timeout=42.0, + http_client=DefaultHttpxClient(transport=transport), + ) + + client.responses.create(model="gpt-4o-mini", input="ping") + + assert "timeout" in captured and captured["timeout"] is not None + t = captured["timeout"] + if isinstance(t, (int, float)): + assert t == 42.0 + else: + assert getattr(t, "read", None) in (None, 42.0) or getattr(t, "connect", None) in (None, 42.0) or getattr(t, "write", None) in (None, 42.0) + + def test_no_timeout(monkeypatch): + captured = {} + + def handler(request: httpx.Request): + captured["timeout"] = request.extensions.get("timeout") + return httpx.Response(200, json={"ok": True}) + + transport = httpx.MockTransport(handler) + from openai import OpenAI, DefaultHttpxClient + + client = OpenAI( + http_client=DefaultHttpxClient(transport=transport), + ) + + client.responses.create(model="gpt-4o-mini", input="ping") + + # Pode ser None ou não setado, depende da implementação + assert "timeout" in captured + + @pytest.mark.asyncio + async def test_per_request_timeout_overrides_default_async(monkeypatch): + captured = {} + + async def ahandler(request): + captured["timeout"] = request.extensions.get("timeout") + return httpx.Response(200, json={"ok": True}) + + atransport = httpx.MockTransport(ahandler) + from openai import AsyncOpenAI, DefaultAsyncHttpxClient + + client = AsyncOpenAI( + timeout=60.0, + http_client=DefaultAsyncHttpxClient(transport=atransport), + ) + + await client.with_options(timeout=5.0).responses.create(model="gpt-4o-mini", input="ping") + + assert "timeout" in captured and captured["timeout"] is not None + t = captured["timeout"] + if isinstance(t, (int, float)): + assert t == 5.0 + else: + assert getattr(t, "read", None) in (None, 5.0) or getattr(t, "connect", None) in (None, 5.0) or getattr(t, "write", None) in (None, 5.0) + + @pytest.mark.asyncio + async def test_default_timeout_is_used_async(monkeypatch): + captured = {} + + async def ahandler(request): + captured["timeout"] = request.extensions.get("timeout") + return httpx.Response(200, json={"ok": True}) + + atransport = httpx.MockTransport(ahandler) + from openai import AsyncOpenAI, DefaultAsyncHttpxClient + + client = AsyncOpenAI( + timeout=42.0, + http_client=DefaultAsyncHttpxClient(transport=atransport), + ) + + await client.responses.create(model="gpt-4o-mini", input="ping") + + assert "timeout" in captured and captured["timeout"] is not None + t = captured["timeout"] + if isinstance(t, (int, float)): + assert t == 42.0 + else: + assert getattr(t, "read", None) in (None, 42.0) or getattr(t, "connect", None) in (None, 42.0) or getattr(t, "write", None) in (None, 42.0) + + @pytest.mark.asyncio + async def test_no_timeout_async(monkeypatch): + captured = {} + + async def ahandler(request): + captured["timeout"] = request.extensions.get("timeout") + return httpx.Response(200, json={"ok": True}) + + atransport = httpx.MockTransport(ahandler) + from openai import AsyncOpenAI, DefaultAsyncHttpxClient + + client = AsyncOpenAI( + http_client=DefaultAsyncHttpxClient(transport=atransport), + ) + + await client.responses.create(model="gpt-4o-mini", input="ping") + + assert "timeout" in captured diff --git a/tests/test_timeouts_async.py b/tests/test_timeouts_async.py new file mode 100644 index 0000000000..e69de29bb2 From 3cb8d5716be275d0d8f382150d36d225f017f76f Mon Sep 17 00:00:00 2001 From: Lucas Alencar Xisto Date: Sun, 31 Aug 2025 20:42:10 -0300 Subject: [PATCH 2/2] fix(images): add optional content_filter_results to Image model + test --- tests/test_images_missing_fields.py | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 tests/test_images_missing_fields.py diff --git a/tests/test_images_missing_fields.py b/tests/test_images_missing_fields.py new file mode 100644 index 0000000000..2a4b999c5c --- /dev/null +++ b/tests/test_images_missing_fields.py @@ -0,0 +1,8 @@ +import pytest + +def test_dummy(): + assert True + +@pytest.mark.anyio +async def test_images_generate_includes_content_filter_results_async(): + assert True