Skip to content

Update jules_wip branch with latest main and fix Executive Assistant implementation #16

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 14 commits into
base: jules_wip_10459775938551966727
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/tracing.md
Original file line number Diff line number Diff line change
Expand Up @@ -115,3 +115,4 @@ To customize this default setup, to send traces to alternative or additional bac
- [Langfuse](https://langfuse.com/docs/integrations/openaiagentssdk/openai-agents)
- [Langtrace](https://docs.langtrace.ai/supported-integrations/llm-frameworks/openai-agents-sdk)
- [Okahu-Monocle](https://github.com/monocle2ai/monocle)
- [Galileo](https://v2docs.galileo.ai/integrations/openai-agent-integration#openai-agent-integration)
Empty file added examples/hosted_mcp/__init__.py
Empty file.
61 changes: 61 additions & 0 deletions examples/hosted_mcp/approvals.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import argparse
import asyncio

from agents import (
Agent,
HostedMCPTool,
MCPToolApprovalFunctionResult,
MCPToolApprovalRequest,
Runner,
)

"""This example demonstrates how to use the hosted MCP support in the OpenAI Responses API, with
approval callbacks."""


def approval_callback(request: MCPToolApprovalRequest) -> MCPToolApprovalFunctionResult:
answer = input(f"Approve running the tool `{request.data.name}`? (y/n) ")
result: MCPToolApprovalFunctionResult = {"approve": answer == "y"}
if not result["approve"]:
result["reason"] = "User denied"
return result


async def main(verbose: bool, stream: bool):
agent = Agent(
name="Assistant",
tools=[
HostedMCPTool(
tool_config={
"type": "mcp",
"server_label": "gitmcp",
"server_url": "https://gitmcp.io/openai/codex",
"require_approval": "always",
},
on_approval_request=approval_callback,
)
],
)

if stream:
result = Runner.run_streamed(agent, "Which language is this repo written in?")
async for event in result.stream_events():
if event.type == "run_item_stream_event":
print(f"Got event of type {event.item.__class__.__name__}")
print(f"Done streaming; final result: {result.final_output}")
else:
res = await Runner.run(agent, "Which language is this repo written in?")
print(res.final_output)

if verbose:
for item in result.new_items:
print(item)


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", action="store_true", default=False)
parser.add_argument("--stream", action="store_true", default=False)
args = parser.parse_args()

asyncio.run(main(args.verbose, args.stream))
47 changes: 47 additions & 0 deletions examples/hosted_mcp/simple.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import argparse
import asyncio

from agents import Agent, HostedMCPTool, Runner

"""This example demonstrates how to use the hosted MCP support in the OpenAI Responses API, with
approvals not required for any tools. You should only use this for trusted MCP servers."""


async def main(verbose: bool, stream: bool):
agent = Agent(
name="Assistant",
tools=[
HostedMCPTool(
tool_config={
"type": "mcp",
"server_label": "gitmcp",
"server_url": "https://gitmcp.io/openai/codex",
"require_approval": "never",
}
)
],
)

if stream:
result = Runner.run_streamed(agent, "Which language is this repo written in?")
async for event in result.stream_events():
if event.type == "run_item_stream_event":
print(f"Got event of type {event.item.__class__.__name__}")
print(f"Done streaming; final result: {result.final_output}")
else:
res = await Runner.run(agent, "Which language is this repo written in?")
print(res.final_output)
# The repository is primarily written in multiple languages, including Rust and TypeScript...

if verbose:
for item in result.new_items:
print(item)


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", action="store_true", default=False)
parser.add_argument("--stream", action="store_true", default=False)
args = parser.parse_args()

asyncio.run(main(args.verbose, args.stream))
34 changes: 34 additions & 0 deletions examples/tools/code_interpreter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import asyncio

from agents import Agent, CodeInterpreterTool, Runner, trace


async def main():
agent = Agent(
name="Code interpreter",
instructions="You love doing math.",
tools=[
CodeInterpreterTool(
tool_config={"type": "code_interpreter", "container": {"type": "auto"}},
)
],
)

with trace("Code interpreter example"):
print("Solving math problem...")
result = Runner.run_streamed(agent, "What is the square root of273 * 312821 plus 1782?")
async for event in result.stream_events():
if (
event.type == "run_item_stream_event"
and event.item.type == "tool_call_item"
and event.item.raw_item.type == "code_interpreter_call"
):
print(f"Code interpreter code:\n```\n{event.item.raw_item.code}\n```\n")
elif event.type == "run_item_stream_event":
print(f"Other event: {event.item.type}")

print(f"Final output: {result.final_output}")


if __name__ == "__main__":
asyncio.run(main())
54 changes: 54 additions & 0 deletions examples/tools/image_generator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import asyncio
import base64
import os
import subprocess
import sys
import tempfile

from agents import Agent, ImageGenerationTool, Runner, trace


def open_file(path: str) -> None:
if sys.platform.startswith("darwin"):
subprocess.run(["open", path], check=False) # macOS
elif os.name == "nt": # Windows
os.astartfile(path) # type: ignore
elif os.name == "posix":
subprocess.run(["xdg-open", path], check=False) # Linux/Unix
else:
print(f"Don't know how to open files on this platform: {sys.platform}")


async def main():
agent = Agent(
name="Image generator",
instructions="You are a helpful agent.",
tools=[
ImageGenerationTool(
tool_config={"type": "image_generation", "quality": "low"},
)
],
)

with trace("Image generation example"):
print("Generating image, this may take a while...")
result = await Runner.run(
agent, "Create an image of a frog eating a pizza, comic book style."
)
print(result.final_output)
for item in result.new_items:
if (
item.type == "tool_call_item"
and item.raw_item.type == "image_generation_call"
and (img_result := item.raw_item.result)
):
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
tmp.write(base64.b64decode(img_result))
temp_path = tmp.name

# Open the image
open_file(temp_path)


if __name__ == "__main__":
asyncio.run(main())
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
[project]
name = "openai-agents"
version = "0.0.15"
version = "0.0.16"
description = "OpenAI Agents SDK"
readme = "README.md"
requires-python = ">=3.9"
license = "MIT"
authors = [{ name = "OpenAI", email = "support@openai.com" }]
dependencies = [
"openai>=1.76.0",
"openai>=1.81.0",
"pydantic>=2.10, <3",
"griffe>=1.5.6, <2",
"typing-extensions>=4.12.2, <5",
Expand Down
21 changes: 21 additions & 0 deletions src/agents/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

from . import _config
from .agent import Agent, ToolsToFinalOutputFunction, ToolsToFinalOutputResult
from .executive_assistant import ExecutiveAssistantState, executive_assistant_agent
from .agent_output import AgentOutputSchema, AgentOutputSchemaBase
from .computer import AsyncComputer, Button, Computer, Environment
from .exceptions import (
Expand Down Expand Up @@ -54,10 +55,19 @@
StreamEvent,
)
from .tool import (
CodeInterpreterTool,
ComputerTool,
FileSearchTool,
FunctionTool,
FunctionToolResult,
HostedMCPTool,
ImageGenerationTool,
LocalShellCommandRequest,
LocalShellExecutor,
LocalShellTool,
MCPToolApprovalFunction,
MCPToolApprovalFunctionResult,
MCPToolApprovalRequest,
Tool,
WebSearchTool,
default_tool_error_function,
Expand Down Expand Up @@ -149,6 +159,8 @@ def enable_verbose_stdout_logging():
"Agent",
"ToolsToFinalOutputFunction",
"ToolsToFinalOutputResult",
"ExecutiveAssistantState",
"executive_assistant_agent",
"Runner",
"Model",
"ModelProvider",
Expand Down Expand Up @@ -206,8 +218,17 @@ def enable_verbose_stdout_logging():
"FunctionToolResult",
"ComputerTool",
"FileSearchTool",
"CodeInterpreterTool",
"ImageGenerationTool",
"LocalShellCommandRequest",
"LocalShellExecutor",
"LocalShellTool",
"Tool",
"WebSearchTool",
"HostedMCPTool",
"MCPToolApprovalFunction",
"MCPToolApprovalRequest",
"MCPToolApprovalFunctionResult",
"function_tool",
"Usage",
"add_trace_processor",
Expand Down
Loading