Skip to content

Commit ead750d

Browse files
authored
Merge branch 'main' into update_samples_env_config
2 parents c4b1009 + 7ea40bf commit ead750d

File tree

5 files changed

+142
-2
lines changed

5 files changed

+142
-2
lines changed

openai_agents/model_providers/README.md

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ Start the LiteLLM provider worker:
1818
# Set the required environment variable for your chosen provider
1919
export ANTHROPIC_API_KEY="your_anthropic_api_key" # For Anthropic
2020

21-
uv run openai_agents/model_providers/run_worker_litellm_provider.py
21+
uv run openai_agents/model_providers/run_litellm_provider_worker.py
2222
```
2323

2424
Then run the example in a separate terminal:
@@ -30,7 +30,36 @@ The example uses Anthropic Claude by default but can be modified to use other Li
3030

3131
Find more LiteLLM providers at: https://docs.litellm.ai/docs/providers
3232

33-
## Not Yet Implemented
33+
### Extra
34+
35+
#### GPT-OSS with Ollama
36+
37+
This example demonstrates tool calling using the gpt-oss reasoning model with a local Ollama server.
38+
Running this example requires sufficiently powerful hardware (and involves a 14 GB model download.
39+
It is adapted from the [OpenAI Cookbook example](https://cookbook.openai.com/articles/gpt-oss/run-locally-ollama#agents-sdk-integration).
40+
41+
42+
Make sure you have [Ollama](https://ollama.com/) installed:
43+
```bash
44+
ollama serve
45+
```
46+
47+
Download the `gpt-oss` model:
48+
```bash
49+
ollama pull gpt-oss:20b
50+
```
51+
52+
Start the gpt-oss worker:
53+
```bash
54+
uv run openai_agents/model_providers/run_gpt_oss_worker.py
55+
```
56+
57+
Then run the example in a separate terminal:
58+
```bash
59+
uv run openai_agents/model_providers/run_gpt_oss_workflow.py
60+
```
61+
62+
### Not Yet Implemented
3463

3564
- **Custom Example Agent** - Custom OpenAI client integration
3665
- **Custom Example Global** - Global default client configuration
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
import asyncio
2+
import logging
3+
from datetime import timedelta
4+
from typing import Optional
5+
6+
from agents import Model, ModelProvider, OpenAIChatCompletionsModel
7+
from openai import AsyncOpenAI
8+
from temporalio.client import Client
9+
from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin
10+
from temporalio.worker import Worker
11+
12+
from openai_agents.model_providers.workflows.gpt_oss_workflow import GptOssWorkflow
13+
14+
ollama_client = AsyncOpenAI(
15+
base_url="http://localhost:11434/v1", # Local Ollama API endpoint
16+
api_key="ollama", # Ignored by Ollama
17+
)
18+
19+
20+
class CustomModelProvider(ModelProvider):
21+
def get_model(self, model_name: Optional[str]) -> Model:
22+
model = OpenAIChatCompletionsModel(
23+
model=model_name if model_name else "gpt-oss:20b",
24+
openai_client=ollama_client,
25+
)
26+
return model
27+
28+
29+
async def main():
30+
# Configure logging to show workflow debug messages
31+
logging.basicConfig(level=logging.WARNING)
32+
logging.getLogger("temporalio.workflow").setLevel(logging.DEBUG)
33+
34+
# Create client connected to server at the given address
35+
client = await Client.connect(
36+
"localhost:7233",
37+
plugins=[
38+
OpenAIAgentsPlugin(
39+
model_params=ModelActivityParameters(
40+
start_to_close_timeout=timedelta(seconds=30)
41+
),
42+
model_provider=CustomModelProvider(),
43+
),
44+
],
45+
)
46+
47+
worker = Worker(
48+
client,
49+
task_queue="openai-agents-model-providers-task-queue",
50+
workflows=[
51+
GptOssWorkflow,
52+
],
53+
)
54+
await worker.run()
55+
56+
57+
if __name__ == "__main__":
58+
asyncio.run(main())
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import asyncio
2+
3+
from temporalio.client import Client
4+
from temporalio.contrib.openai_agents import OpenAIAgentsPlugin
5+
6+
from openai_agents.model_providers.workflows.gpt_oss_workflow import GptOssWorkflow
7+
8+
9+
async def main():
10+
client = await Client.connect(
11+
"localhost:7233",
12+
plugins=[
13+
OpenAIAgentsPlugin(),
14+
],
15+
)
16+
17+
result = await client.execute_workflow(
18+
GptOssWorkflow.run,
19+
"What's the weather in Tokyo?",
20+
id="litellm-gpt-oss-workflow-id",
21+
task_queue="openai-agents-model-providers-task-queue",
22+
)
23+
print(f"Result: {result}")
24+
25+
26+
if __name__ == "__main__":
27+
asyncio.run(main())
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
from __future__ import annotations
2+
3+
from agents import Agent, Runner, function_tool, set_tracing_disabled
4+
from temporalio import workflow
5+
6+
7+
@workflow.defn
8+
class GptOssWorkflow:
9+
@workflow.run
10+
async def run(self, prompt: str) -> str:
11+
set_tracing_disabled(disabled=True)
12+
13+
@function_tool
14+
def get_weather(city: str):
15+
workflow.logger.debug(f"Getting weather for {city}")
16+
return f"The weather in {city} is sunny."
17+
18+
agent = Agent(
19+
name="Assistant",
20+
instructions="You only respond in haikus. When asked about the weather always use the tool to get the current weather..",
21+
model="gpt-oss:20b",
22+
tools=[get_weather],
23+
)
24+
25+
result = await Runner.run(agent, prompt)
26+
return result.final_output

0 commit comments

Comments
 (0)