From e6b9893e8ce70d2e527f2e87436d65ed6b344558 Mon Sep 17 00:00:00 2001 From: thein Date: Fri, 4 Jul 2025 16:15:59 -0400 Subject: [PATCH 1/6] update mcp server for prompts --- src/agents/mcp/server.py | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/src/agents/mcp/server.py b/src/agents/mcp/server.py index f6c2b58ef..b9a98bd7b 100644 --- a/src/agents/mcp/server.py +++ b/src/agents/mcp/server.py @@ -13,7 +13,7 @@ from mcp.client.sse import sse_client from mcp.client.streamable_http import GetSessionIdCallback, streamablehttp_client from mcp.shared.message import SessionMessage -from mcp.types import CallToolResult, InitializeResult +from mcp.types import CallToolResult, InitializeResult, GetPromptResult, ListPromptsResult from typing_extensions import NotRequired, TypedDict from ..exceptions import UserError @@ -63,6 +63,22 @@ async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> C """Invoke a tool on the server.""" pass + @abc.abstractmethod + async def list_prompts( + self, + ) -> ListPromptsResult: + """List the prompts available on the server.""" + pass + + @abc.abstractmethod + async def get_prompt( + self, + name: str, + arguments: dict[str, Any] | None = None + ) -> GetPromptResult: + """Get a specific prompt from the server.""" + pass + class _MCPServerWithClientSession(MCPServer, abc.ABC): """Base class for MCP servers that use a `ClientSession` to communicate with the server.""" @@ -261,6 +277,26 @@ async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> C return await self.session.call_tool(tool_name, arguments) + async def list_prompts( + self, + ) -> ListPromptsResult: + """List the prompts available on the server.""" + if not self.session: + raise UserError("Server not initialized. Make sure you call `connect()` first.") + + return await self.session.list_prompts() + + async def get_prompt( + self, + name: str, + arguments: dict[str, Any] | None = None + ) -> GetPromptResult: + """Get a specific prompt from the server.""" + if not self.session: + raise UserError("Server not initialized. Make sure you call `connect()` first.") + + return await self.session.get_prompt(name, arguments) + async def cleanup(self): """Cleanup the server.""" async with self._cleanup_lock: From f5faa690a7e9848de8a958863bd8798e7e69f681 Mon Sep 17 00:00:00 2001 From: thein Date: Fri, 4 Jul 2025 16:17:17 -0400 Subject: [PATCH 2/6] test prompt server --- examples/mcp/prompt_server/README.md | 29 ++++++++ examples/mcp/prompt_server/main.py | 107 +++++++++++++++++++++++++++ examples/mcp/prompt_server/server.py | 36 +++++++++ 3 files changed, 172 insertions(+) create mode 100644 examples/mcp/prompt_server/README.md create mode 100644 examples/mcp/prompt_server/main.py create mode 100644 examples/mcp/prompt_server/server.py diff --git a/examples/mcp/prompt_server/README.md b/examples/mcp/prompt_server/README.md new file mode 100644 index 000000000..c1b1c3b37 --- /dev/null +++ b/examples/mcp/prompt_server/README.md @@ -0,0 +1,29 @@ +# MCP Prompt Server Example + +This example uses a local MCP prompt server in [server.py](server.py). + +Run the example via: + +``` +uv run python examples/mcp/prompt_server/main.py +``` + +## Details + +The example uses the `MCPServerStreamableHttp` class from `agents.mcp`. The server runs in a sub-process at `http://localhost:8000/mcp` and provides user-controlled prompts that generate agent instructions. + +The server exposes prompts like `generate_code_review_instructions` that take parameters such as focus area and programming language. The agent calls these prompts to dynamically generate its system instructions based on user-provided parameters. + +## Workflow + +The example demonstrates two key functions: + +1. **`show_available_prompts`** - Lists all available prompts on the MCP server, showing users what prompts they can select from. This demonstrates the discovery aspect of MCP prompts. + +2. **`demo_code_review`** - Shows the complete user-controlled prompt workflow: + - Calls `generate_code_review_instructions` with specific parameters (focus: "security vulnerabilities", language: "python") + - Uses the generated instructions to create an Agent with specialized code review capabilities + - Runs the agent against vulnerable sample code (command injection via `os.system`) + - The agent analyzes the code and provides security-focused feedback using available tools + +This pattern allows users to dynamically configure agent behavior through MCP prompts rather than hardcoded instructions. \ No newline at end of file diff --git a/examples/mcp/prompt_server/main.py b/examples/mcp/prompt_server/main.py new file mode 100644 index 000000000..189f03874 --- /dev/null +++ b/examples/mcp/prompt_server/main.py @@ -0,0 +1,107 @@ +import asyncio +import os +import shutil +import subprocess +import time +from typing import Any + +from agents import Agent, Runner, gen_trace_id, trace +from agents.mcp import MCPServer, MCPServerStreamableHttp +from agents.model_settings import ModelSettings + + +async def get_instructions_from_prompt(mcp_server: MCPServer, prompt_name: str, **kwargs) -> str: + """Get agent instructions by calling MCP prompt endpoint (user-controlled)""" + print(f"Getting instructions from prompt: {prompt_name}") + + try: + prompt_result = await mcp_server.get_prompt(prompt_name, kwargs) + instructions = prompt_result.messages[0].content.text + print("Generated instructions") + return instructions + except Exception as e: + print(f"Failed to get instructions: {e}") + return f"You are a helpful assistant. Error: {e}" + + +async def demo_code_review(mcp_server: MCPServer): + """Demo: Code review with user-selected prompt""" + print("=== CODE REVIEW DEMO ===") + + # User explicitly selects prompt and parameters + instructions = await get_instructions_from_prompt( + mcp_server, + "generate_code_review_instructions", + focus="security vulnerabilities", + language="python" + ) + + agent = Agent( + name="Code Reviewer Agent", + instructions=instructions, # Instructions from MCP prompt + model_settings=ModelSettings(tool_choice="auto"), + ) + + message = """Please review this code: + +def process_user_input(user_input): + command = f"echo {user_input}" + os.system(command) + return "Command executed" + +""" + + print(f"Running: {message[:60]}...") + result = await Runner.run(starting_agent=agent, input=message) + print(result.final_output) + print("\n" + "="*50 + "\n") + + + +async def show_available_prompts(mcp_server: MCPServer): + """Show available prompts for user selection""" + print("=== AVAILABLE PROMPTS ===") + + prompts_result = await mcp_server.list_prompts() + print("User can select from these prompts:") + for i, prompt in enumerate(prompts_result.prompts, 1): + print(f" {i}. {prompt.name} - {prompt.description}") + print() + + +async def main(): + async with MCPServerStreamableHttp( + name="Simple Prompt Server", + params={"url": "http://localhost:8000/mcp"}, + ) as server: + trace_id = gen_trace_id() + with trace(workflow_name="Simple Prompt Demo", trace_id=trace_id): + print(f"Trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\n") + + await show_available_prompts(server) + await demo_code_review(server) + + +if __name__ == "__main__": + if not shutil.which("uv"): + raise RuntimeError("uv is not installed") + + process: subprocess.Popen[Any] | None = None + try: + this_dir = os.path.dirname(os.path.abspath(__file__)) + server_file = os.path.join(this_dir, "server.py") + + print("Starting Simple Prompt Server...") + process = subprocess.Popen(["uv", "run", server_file]) + time.sleep(3) + print("Server started\n") + except Exception as e: + print(f"Error starting server: {e}") + exit(1) + + try: + asyncio.run(main()) + finally: + if process: + process.terminate() + print("Server terminated.") \ No newline at end of file diff --git a/examples/mcp/prompt_server/server.py b/examples/mcp/prompt_server/server.py new file mode 100644 index 000000000..9cb7095e7 --- /dev/null +++ b/examples/mcp/prompt_server/server.py @@ -0,0 +1,36 @@ +import json +from datetime import datetime +from typing import Any, Dict +from mcp.server.fastmcp import FastMCP + +# Create server +mcp = FastMCP("Prompt Server") + +# Instruction-generating prompts (user-controlled) +@mcp.prompt() +def generate_code_review_instructions(focus: str = "general code quality", language: str = "python") -> str: + """Generate agent instructions for code review tasks""" + print(f"[debug-server] generate_code_review_instructions({focus}, {language})") + + return f"""You are a senior {language} code review specialist. Your role is to provide comprehensive code analysis with focus on {focus}. + +INSTRUCTIONS: +- Analyze code for quality, security, performance, and best practices +- Provide specific, actionable feedback with examples +- Identify potential bugs, vulnerabilities, and optimization opportunities +- Suggest improvements with code examples when applicable +- Be constructive and educational in your feedback +- Focus particularly on {focus} aspects + +RESPONSE FORMAT: +1. Overall Assessment +2. Specific Issues Found +3. Security Considerations +4. Performance Notes +5. Recommended Improvements +6. Best Practices Suggestions + +Use the available tools to check current time if you need timestamps for your analysis.""" + +if __name__ == "__main__": + mcp.run(transport="streamable-http") \ No newline at end of file From 23f5a1424ba076f1a66771fdcd83c0662c5ac1a4 Mon Sep 17 00:00:00 2001 From: thein Date: Fri, 4 Jul 2025 16:37:13 -0400 Subject: [PATCH 3/6] add pytest --- examples/mcp/prompt_server/main.py | 19 +- examples/mcp/prompt_server/server.py | 13 +- tests/mcp/test_prompt_server.py | 299 +++++++++++++++++++++++++++ 3 files changed, 315 insertions(+), 16 deletions(-) create mode 100644 tests/mcp/test_prompt_server.py diff --git a/examples/mcp/prompt_server/main.py b/examples/mcp/prompt_server/main.py index 189f03874..98da93edd 100644 --- a/examples/mcp/prompt_server/main.py +++ b/examples/mcp/prompt_server/main.py @@ -13,7 +13,7 @@ async def get_instructions_from_prompt(mcp_server: MCPServer, prompt_name: str, **kwargs) -> str: """Get agent instructions by calling MCP prompt endpoint (user-controlled)""" print(f"Getting instructions from prompt: {prompt_name}") - + try: prompt_result = await mcp_server.get_prompt(prompt_name, kwargs) instructions = prompt_result.messages[0].content.text @@ -27,15 +27,15 @@ async def get_instructions_from_prompt(mcp_server: MCPServer, prompt_name: str, async def demo_code_review(mcp_server: MCPServer): """Demo: Code review with user-selected prompt""" print("=== CODE REVIEW DEMO ===") - + # User explicitly selects prompt and parameters instructions = await get_instructions_from_prompt( mcp_server, "generate_code_review_instructions", focus="security vulnerabilities", - language="python" + language="python", ) - + agent = Agent( name="Code Reviewer Agent", instructions=instructions, # Instructions from MCP prompt @@ -50,18 +50,17 @@ def process_user_input(user_input): return "Command executed" """ - + print(f"Running: {message[:60]}...") result = await Runner.run(starting_agent=agent, input=message) print(result.final_output) - print("\n" + "="*50 + "\n") - + print("\n" + "=" * 50 + "\n") async def show_available_prompts(mcp_server: MCPServer): """Show available prompts for user selection""" print("=== AVAILABLE PROMPTS ===") - + prompts_result = await mcp_server.list_prompts() print("User can select from these prompts:") for i, prompt in enumerate(prompts_result.prompts, 1): @@ -77,7 +76,7 @@ async def main(): trace_id = gen_trace_id() with trace(workflow_name="Simple Prompt Demo", trace_id=trace_id): print(f"Trace: https://platform.openai.com/traces/trace?trace_id={trace_id}\n") - + await show_available_prompts(server) await demo_code_review(server) @@ -104,4 +103,4 @@ async def main(): finally: if process: process.terminate() - print("Server terminated.") \ No newline at end of file + print("Server terminated.") diff --git a/examples/mcp/prompt_server/server.py b/examples/mcp/prompt_server/server.py index 9cb7095e7..01dcbac34 100644 --- a/examples/mcp/prompt_server/server.py +++ b/examples/mcp/prompt_server/server.py @@ -1,17 +1,17 @@ -import json -from datetime import datetime -from typing import Any, Dict from mcp.server.fastmcp import FastMCP # Create server mcp = FastMCP("Prompt Server") + # Instruction-generating prompts (user-controlled) @mcp.prompt() -def generate_code_review_instructions(focus: str = "general code quality", language: str = "python") -> str: +def generate_code_review_instructions( + focus: str = "general code quality", language: str = "python" +) -> str: """Generate agent instructions for code review tasks""" print(f"[debug-server] generate_code_review_instructions({focus}, {language})") - + return f"""You are a senior {language} code review specialist. Your role is to provide comprehensive code analysis with focus on {focus}. INSTRUCTIONS: @@ -32,5 +32,6 @@ def generate_code_review_instructions(focus: str = "general code quality", langu Use the available tools to check current time if you need timestamps for your analysis.""" + if __name__ == "__main__": - mcp.run(transport="streamable-http") \ No newline at end of file + mcp.run(transport="streamable-http") diff --git a/tests/mcp/test_prompt_server.py b/tests/mcp/test_prompt_server.py new file mode 100644 index 000000000..42a132810 --- /dev/null +++ b/tests/mcp/test_prompt_server.py @@ -0,0 +1,299 @@ +import pytest + +from agents import Agent, Runner +from agents.mcp import MCPServer + +from ..fake_model import FakeModel +from ..test_responses import get_text_message + + +class FakeMCPPromptServer(MCPServer): + """Fake MCP server for testing prompt functionality""" + + def __init__(self, server_name: str = "fake_prompt_server"): + self.prompts = [] + self.prompt_results = {} + self._server_name = server_name + + def add_prompt(self, name: str, description: str, arguments: dict = None): + """Add a prompt to the fake server""" + from mcp.types import Prompt + + prompt = Prompt(name=name, description=description, arguments=arguments or []) + self.prompts.append(prompt) + + def set_prompt_result(self, name: str, result: str): + """Set the result that should be returned for a prompt""" + self.prompt_results[name] = result + + async def connect(self): + pass + + async def cleanup(self): + pass + + async def list_prompts(self, run_context=None, agent=None): + """List available prompts""" + from mcp.types import ListPromptsResult + + return ListPromptsResult(prompts=self.prompts) + + async def get_prompt(self, name: str, arguments: dict = None): + """Get a prompt with arguments""" + from mcp.types import GetPromptResult, PromptMessage, TextContent + + if name not in self.prompt_results: + raise ValueError(f"Prompt '{name}' not found") + + content = self.prompt_results[name] + + # If it's a format string, try to format it with arguments + if arguments and "{" in content: + try: + content = content.format(**arguments) + except KeyError: + pass # Use original content if formatting fails + + message = PromptMessage(role="user", content=TextContent(type="text", text=content)) + + return GetPromptResult(description=f"Generated prompt for {name}", messages=[message]) + + async def list_tools(self, run_context=None, agent=None): + return [] + + async def call_tool(self, tool_name: str, arguments: dict = None): + raise NotImplementedError("This fake server doesn't support tools") + + @property + def name(self) -> str: + return self._server_name + + +@pytest.mark.asyncio +async def test_list_prompts(): + """Test listing available prompts""" + server = FakeMCPPromptServer() + server.add_prompt( + "generate_code_review_instructions", "Generate agent instructions for code review tasks" + ) + + result = await server.list_prompts() + + assert len(result.prompts) == 1 + assert result.prompts[0].name == "generate_code_review_instructions" + assert "code review" in result.prompts[0].description + + +@pytest.mark.asyncio +async def test_get_prompt_without_arguments(): + """Test getting a prompt without arguments""" + server = FakeMCPPromptServer() + server.add_prompt("simple_prompt", "A simple prompt") + server.set_prompt_result("simple_prompt", "You are a helpful assistant.") + + result = await server.get_prompt("simple_prompt") + + assert len(result.messages) == 1 + assert result.messages[0].content.text == "You are a helpful assistant." + + +@pytest.mark.asyncio +async def test_get_prompt_with_arguments(): + """Test getting a prompt with arguments""" + server = FakeMCPPromptServer() + server.add_prompt( + "generate_code_review_instructions", "Generate agent instructions for code review tasks" + ) + server.set_prompt_result( + "generate_code_review_instructions", + "You are a senior {language} code review specialist. Focus on {focus}.", + ) + + result = await server.get_prompt( + "generate_code_review_instructions", + {"focus": "security vulnerabilities", "language": "python"}, + ) + + assert len(result.messages) == 1 + expected_text = ( + "You are a senior python code review specialist. Focus on security vulnerabilities." + ) + assert result.messages[0].content.text == expected_text + + +@pytest.mark.asyncio +async def test_get_prompt_not_found(): + """Test getting a prompt that doesn't exist""" + server = FakeMCPPromptServer() + + with pytest.raises(ValueError, match="Prompt 'nonexistent' not found"): + await server.get_prompt("nonexistent") + + +@pytest.mark.asyncio +async def test_agent_with_prompt_instructions(): + """Test using prompt-generated instructions with an agent""" + server = FakeMCPPromptServer() + server.add_prompt( + "generate_code_review_instructions", "Generate agent instructions for code review tasks" + ) + server.set_prompt_result( + "generate_code_review_instructions", + "You are a code reviewer. Analyze the provided code for security issues.", + ) + + # Get instructions from prompt + prompt_result = await server.get_prompt("generate_code_review_instructions") + instructions = prompt_result.messages[0].content.text + + # Create agent with prompt-generated instructions + model = FakeModel() + agent = Agent(name="prompt_agent", instructions=instructions, model=model, mcp_servers=[server]) + + # Mock model response + model.add_multiple_turn_outputs( + [[get_text_message("Code analysis complete. Found security vulnerability.")]] + ) + + # Run the agent + result = await Runner.run(agent, input="Review this code: def unsafe_exec(cmd): os.system(cmd)") + + assert "Code analysis complete" in result.final_output + assert ( + agent.instructions + == "You are a code reviewer. Analyze the provided code for security issues." + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("streaming", [False, True]) +async def test_agent_with_prompt_instructions_streaming(streaming: bool): + """Test using prompt-generated instructions with streaming and non-streaming""" + server = FakeMCPPromptServer() + server.add_prompt( + "generate_code_review_instructions", "Generate agent instructions for code review tasks" + ) + server.set_prompt_result( + "generate_code_review_instructions", + "You are a {language} code reviewer focusing on {focus}.", + ) + + # Get instructions from prompt with arguments + prompt_result = await server.get_prompt( + "generate_code_review_instructions", {"language": "Python", "focus": "security"} + ) + instructions = prompt_result.messages[0].content.text + + # Create agent + model = FakeModel() + agent = Agent( + name="streaming_prompt_agent", instructions=instructions, model=model, mcp_servers=[server] + ) + + model.add_multiple_turn_outputs([[get_text_message("Security analysis complete.")]]) + + if streaming: + result = Runner.run_streamed(agent, input="Review code") + async for _ in result.stream_events(): + pass + final_result = result.final_output + else: + result = await Runner.run(agent, input="Review code") + final_result = result.final_output + + assert "Security analysis complete" in final_result + assert agent.instructions == "You are a Python code reviewer focusing on security." + + +@pytest.mark.asyncio +async def test_multiple_prompts(): + """Test server with multiple prompts""" + server = FakeMCPPromptServer() + + # Add multiple prompts + server.add_prompt( + "generate_code_review_instructions", "Generate agent instructions for code review tasks" + ) + server.add_prompt( + "generate_testing_instructions", "Generate agent instructions for testing tasks" + ) + + server.set_prompt_result("generate_code_review_instructions", "You are a code reviewer.") + server.set_prompt_result("generate_testing_instructions", "You are a test engineer.") + + # Test listing prompts + prompts_result = await server.list_prompts() + assert len(prompts_result.prompts) == 2 + + prompt_names = [p.name for p in prompts_result.prompts] + assert "generate_code_review_instructions" in prompt_names + assert "generate_testing_instructions" in prompt_names + + # Test getting each prompt + review_result = await server.get_prompt("generate_code_review_instructions") + assert review_result.messages[0].content.text == "You are a code reviewer." + + testing_result = await server.get_prompt("generate_testing_instructions") + assert testing_result.messages[0].content.text == "You are a test engineer." + + +@pytest.mark.asyncio +async def test_prompt_with_complex_arguments(): + """Test prompt with complex argument formatting""" + server = FakeMCPPromptServer() + server.add_prompt( + "generate_detailed_instructions", "Generate detailed instructions with multiple parameters" + ) + server.set_prompt_result( + "generate_detailed_instructions", + "You are a {role} specialist. Your focus is on {focus}. " + + "You work with {language} code. Your experience level is {level}.", + ) + + arguments = { + "role": "security", + "focus": "vulnerability detection", + "language": "Python", + "level": "senior", + } + + result = await server.get_prompt("generate_detailed_instructions", arguments) + + expected = ( + "You are a security specialist. Your focus is on vulnerability detection. " + "You work with Python code. Your experience level is senior." + ) + assert result.messages[0].content.text == expected + + +@pytest.mark.asyncio +async def test_prompt_with_missing_arguments(): + """Test prompt with missing arguments in format string""" + server = FakeMCPPromptServer() + server.add_prompt("incomplete_prompt", "Prompt with missing arguments") + server.set_prompt_result("incomplete_prompt", "You are a {role} working on {task}.") + + # Only provide one of the required arguments + result = await server.get_prompt("incomplete_prompt", {"role": "developer"}) + + # Should return the original string since formatting fails + assert result.messages[0].content.text == "You are a {role} working on {task}." + + +@pytest.mark.asyncio +async def test_prompt_server_cleanup(): + """Test that prompt server cleanup works correctly""" + server = FakeMCPPromptServer() + server.add_prompt("test_prompt", "Test prompt") + server.set_prompt_result("test_prompt", "Test result") + + # Test that server works before cleanup + result = await server.get_prompt("test_prompt") + assert result.messages[0].content.text == "Test result" + + # Cleanup should not raise any errors + await server.cleanup() + + # Server should still work after cleanup (in this fake implementation) + result = await server.get_prompt("test_prompt") + assert result.messages[0].content.text == "Test result" From c7fa5662de3c0909d1c9bed47bf601c00479f8e2 Mon Sep 17 00:00:00 2001 From: thein Date: Fri, 4 Jul 2025 17:13:58 -0400 Subject: [PATCH 4/6] update docs --- docs/mcp.md | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/docs/mcp.md b/docs/mcp.md index d30a916ac..eef61a047 100644 --- a/docs/mcp.md +++ b/docs/mcp.md @@ -4,7 +4,7 @@ The [Model context protocol](https://modelcontextprotocol.io/introduction) (aka > MCP is an open protocol that standardizes how applications provide context to LLMs. Think of MCP like a USB-C port for AI applications. Just as USB-C provides a standardized way to connect your devices to various peripherals and accessories, MCP provides a standardized way to connect AI models to different data sources and tools. -The Agents SDK has support for MCP. This enables you to use a wide range of MCP servers to provide tools to your Agents. +The Agents SDK has support for MCP. This enables you to use a wide range of MCP servers to provide tools and prompts to your Agents. ## MCP servers @@ -135,6 +135,38 @@ The `ToolFilterContext` provides access to: - `agent`: The agent requesting the tools - `server_name`: The name of the MCP server +## Prompts + +MCP servers can also provide prompts that can be used to dynamically generate agent instructions. This allows you to create reusable instruction templates that can be customized with parameters. + +### Using prompts + +MCP servers that support prompts provide two key methods: + +- `list_prompts()`: Lists all available prompts on the server +- `get_prompt(name, arguments)`: Gets a specific prompt with optional parameters + +```python +# List available prompts +prompts_result = await server.list_prompts() +for prompt in prompts_result.prompts: + print(f"Prompt: {prompt.name} - {prompt.description}") + +# Get a specific prompt with parameters +prompt_result = await server.get_prompt( + "generate_code_review_instructions", + {"focus": "security vulnerabilities", "language": "python"} +) +instructions = prompt_result.messages[0].content.text + +# Use the prompt-generated instructions with an Agent +agent = Agent( + name="Code Reviewer", + instructions=instructions, # Instructions from MCP prompt + mcp_servers=[server] +) +``` + ## Caching Every time an Agent runs, it calls `list_tools()` on the MCP server. This can be a latency hit, especially if the server is a remote server. To automatically cache the list of tools, you can pass `cache_tools_list=True` to [`MCPServerStdio`][agents.mcp.server.MCPServerStdio], [`MCPServerSse`][agents.mcp.server.MCPServerSse], and [`MCPServerStreamableHttp`][agents.mcp.server.MCPServerStreamableHttp]. You should only do this if you're certain the tool list will not change. From 6b5af0678686eecf696169b34567a4b80b15ac85 Mon Sep 17 00:00:00 2001 From: thein Date: Fri, 4 Jul 2025 17:41:19 -0400 Subject: [PATCH 5/6] formatted files --- examples/mcp/prompt_server/main.py | 6 +++++- src/agents/mcp/server.py | 20 +++++++------------- tests/mcp/test_prompt_server.py | 20 +++++++++++--------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/examples/mcp/prompt_server/main.py b/examples/mcp/prompt_server/main.py index 98da93edd..8f2991fc0 100644 --- a/examples/mcp/prompt_server/main.py +++ b/examples/mcp/prompt_server/main.py @@ -16,7 +16,11 @@ async def get_instructions_from_prompt(mcp_server: MCPServer, prompt_name: str, try: prompt_result = await mcp_server.get_prompt(prompt_name, kwargs) - instructions = prompt_result.messages[0].content.text + content = prompt_result.messages[0].content + if hasattr(content, 'text'): + instructions = content.text + else: + instructions = str(content) print("Generated instructions") return instructions except Exception as e: diff --git a/src/agents/mcp/server.py b/src/agents/mcp/server.py index b9a98bd7b..b7e41c91d 100644 --- a/src/agents/mcp/server.py +++ b/src/agents/mcp/server.py @@ -13,7 +13,7 @@ from mcp.client.sse import sse_client from mcp.client.streamable_http import GetSessionIdCallback, streamablehttp_client from mcp.shared.message import SessionMessage -from mcp.types import CallToolResult, InitializeResult, GetPromptResult, ListPromptsResult +from mcp.types import CallToolResult, GetPromptResult, InitializeResult, ListPromptsResult from typing_extensions import NotRequired, TypedDict from ..exceptions import UserError @@ -72,13 +72,11 @@ async def list_prompts( @abc.abstractmethod async def get_prompt( - self, - name: str, - arguments: dict[str, Any] | None = None + self, name: str, arguments: dict[str, Any] | None = None ) -> GetPromptResult: """Get a specific prompt from the server.""" pass - + class _MCPServerWithClientSession(MCPServer, abc.ABC): """Base class for MCP servers that use a `ClientSession` to communicate with the server.""" @@ -134,9 +132,7 @@ async def _apply_tool_filter( return await self._apply_dynamic_tool_filter(tools, run_context, agent) def _apply_static_tool_filter( - self, - tools: list[MCPTool], - static_filter: ToolFilterStatic + self, tools: list[MCPTool], static_filter: ToolFilterStatic ) -> list[MCPTool]: """Apply static tool filtering based on allowlist and blocklist.""" filtered_tools = tools @@ -283,18 +279,16 @@ async def list_prompts( """List the prompts available on the server.""" if not self.session: raise UserError("Server not initialized. Make sure you call `connect()` first.") - + return await self.session.list_prompts() async def get_prompt( - self, - name: str, - arguments: dict[str, Any] | None = None + self, name: str, arguments: dict[str, Any] | None = None ) -> GetPromptResult: """Get a specific prompt from the server.""" if not self.session: raise UserError("Server not initialized. Make sure you call `connect()` first.") - + return await self.session.get_prompt(name, arguments) async def cleanup(self): diff --git a/tests/mcp/test_prompt_server.py b/tests/mcp/test_prompt_server.py index 42a132810..15afe28e4 100644 --- a/tests/mcp/test_prompt_server.py +++ b/tests/mcp/test_prompt_server.py @@ -1,3 +1,5 @@ +from typing import Any + import pytest from agents import Agent, Runner @@ -11,15 +13,15 @@ class FakeMCPPromptServer(MCPServer): """Fake MCP server for testing prompt functionality""" def __init__(self, server_name: str = "fake_prompt_server"): - self.prompts = [] - self.prompt_results = {} + self.prompts: list[Any] = [] + self.prompt_results: dict[str, str] = {} self._server_name = server_name - def add_prompt(self, name: str, description: str, arguments: dict = None): + def add_prompt(self, name: str, description: str, arguments: dict[str, Any] | None = None): """Add a prompt to the fake server""" from mcp.types import Prompt - prompt = Prompt(name=name, description=description, arguments=arguments or []) + prompt = Prompt(name=name, description=description, arguments=[]) self.prompts.append(prompt) def set_prompt_result(self, name: str, result: str): @@ -38,7 +40,7 @@ async def list_prompts(self, run_context=None, agent=None): return ListPromptsResult(prompts=self.prompts) - async def get_prompt(self, name: str, arguments: dict = None): + async def get_prompt(self, name: str, arguments: dict[str, Any] | None = None): """Get a prompt with arguments""" from mcp.types import GetPromptResult, PromptMessage, TextContent @@ -61,7 +63,7 @@ async def get_prompt(self, name: str, arguments: dict = None): async def list_tools(self, run_context=None, agent=None): return [] - async def call_tool(self, tool_name: str, arguments: dict = None): + async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None = None): raise NotImplementedError("This fake server doesn't support tools") @property @@ -193,10 +195,10 @@ async def test_agent_with_prompt_instructions_streaming(streaming: bool): model.add_multiple_turn_outputs([[get_text_message("Security analysis complete.")]]) if streaming: - result = Runner.run_streamed(agent, input="Review code") - async for _ in result.stream_events(): + streaming_result = Runner.run_streamed(agent, input="Review code") + async for _ in streaming_result.stream_events(): pass - final_result = result.final_output + final_result = streaming_result.final_output else: result = await Runner.run(agent, input="Review code") final_result = result.final_output From dd7b5fd93d5b68ea16b38af7aa36504bc0a83e91 Mon Sep 17 00:00:00 2001 From: thein Date: Fri, 4 Jul 2025 17:48:01 -0400 Subject: [PATCH 6/6] add make get/list prompt --- tests/mcp/helpers.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tests/mcp/helpers.py b/tests/mcp/helpers.py index e0d8a813d..31d43c228 100644 --- a/tests/mcp/helpers.py +++ b/tests/mcp/helpers.py @@ -4,7 +4,7 @@ from typing import Any from mcp import Tool as MCPTool -from mcp.types import CallToolResult, TextContent +from mcp.types import CallToolResult, GetPromptResult, ListPromptsResult, PromptMessage, TextContent from agents.mcp import MCPServer from agents.mcp.server import _MCPServerWithClientSession @@ -94,6 +94,18 @@ async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> C content=[TextContent(text=self.tool_results[-1], type="text")], ) + async def list_prompts(self, run_context=None, agent=None) -> ListPromptsResult: + """Return empty list of prompts for fake server""" + return ListPromptsResult(prompts=[]) + + async def get_prompt( + self, name: str, arguments: dict[str, Any] | None = None + ) -> GetPromptResult: + """Return a simple prompt result for fake server""" + content = f"Fake prompt content for {name}" + message = PromptMessage(role="user", content=TextContent(type="text", text=content)) + return GetPromptResult(description=f"Fake prompt: {name}", messages=[message]) + @property def name(self) -> str: return self._server_name