|
| 1 | +""" |
| 2 | +Interactive MCP Host Capabilities Demo |
| 3 | +
|
| 4 | +This example demonstrates advanced MCP host capabilities including: |
| 5 | +- Sampling: Language model text generation requests from MCP server back to host |
| 6 | +- Elicitation: Interactive user input collection through command-line |
| 7 | +- Roots: File system root listing |
| 8 | +
|
| 9 | +The demo is fully interactive and allows you to communicate directly with |
| 10 | +the MCP server through the command line interface. |
| 11 | +""" |
| 12 | + |
| 13 | +import argparse |
| 14 | +import asyncio |
| 15 | +import json |
| 16 | +import logging |
| 17 | +import sys |
| 18 | +from pathlib import Path |
| 19 | + |
| 20 | +import yaml |
| 21 | +from autogen_agentchat.agents import AssistantAgent, UserProxyAgent |
| 22 | +from autogen_agentchat.conditions import MaxMessageTermination |
| 23 | +from autogen_agentchat.teams import RoundRobinGroupChat |
| 24 | +from autogen_agentchat.ui import Console |
| 25 | +from autogen_core.models import ChatCompletionClient |
| 26 | +from autogen_ext.models.openai import OpenAIChatCompletionClient |
| 27 | +from autogen_ext.tools.mcp import ( |
| 28 | + ChatCompletionClientSampler, |
| 29 | + McpSessionHost, |
| 30 | + McpWorkbench, |
| 31 | + StaticRootsProvider, |
| 32 | + StdioElicitor, |
| 33 | + StdioServerParams, |
| 34 | +) |
| 35 | +from mcp.types import Root |
| 36 | +from pydantic import FileUrl |
| 37 | + |
| 38 | +# Configure logging |
| 39 | +logging.basicConfig( |
| 40 | + level=logging.INFO, |
| 41 | + format="%(message)s", # Clean format for demo output |
| 42 | +) |
| 43 | +logger = logging.getLogger(__name__) |
| 44 | +logging.getLogger("autogen_core").setLevel(logging.WARNING) |
| 45 | + |
| 46 | + |
| 47 | +def load_model_client_from_config(config_path: str) -> ChatCompletionClient: |
| 48 | + """Load a ChatCompletionClient from a JSON or YAML config file. |
| 49 | +
|
| 50 | + Args: |
| 51 | + config_path: Path to the JSON or YAML config file |
| 52 | +
|
| 53 | + Returns: |
| 54 | + ChatCompletionClient: Loaded model client |
| 55 | +
|
| 56 | + Raises: |
| 57 | + FileNotFoundError: If config file doesn't exist |
| 58 | + ValueError: If config format is invalid or unsupported file type |
| 59 | + """ |
| 60 | + config_file = Path(config_path) |
| 61 | + if not config_file.exists(): |
| 62 | + raise FileNotFoundError(f"Config file not found: {config_path}") |
| 63 | + |
| 64 | + # Load config based on file extension |
| 65 | + if config_file.suffix.lower() == ".json": |
| 66 | + with open(config_file, "r") as f: |
| 67 | + config_data = json.load(f) |
| 68 | + elif config_file.suffix.lower() in [".yml", ".yaml"]: |
| 69 | + with open(config_file, "r") as f: |
| 70 | + config_data = yaml.safe_load(f) |
| 71 | + else: |
| 72 | + raise ValueError(f"Unsupported config file type: {config_file.suffix}. Use .json, .yml, or .yaml") |
| 73 | + |
| 74 | + if not isinstance(config_data, dict): |
| 75 | + raise ValueError("Config file must contain a JSON/YAML object") |
| 76 | + |
| 77 | + logger.info(f"📄 Loading ChatCompletionClient from config: {config_path}") |
| 78 | + return ChatCompletionClient.load_component(config_data) |
| 79 | + |
| 80 | + |
| 81 | +async def interactive_mcp_demo(config_path: str | None = None): |
| 82 | + """Interactive MCP host capabilities demo with command-line interface.""" |
| 83 | + logger.info("🌟 Interactive MCP Host Capabilities Demo") |
| 84 | + logger.info("=" * 60) |
| 85 | + logger.info("This demo showcases MCP server-to-host communication:") |
| 86 | + logger.info("• Sampling: MCP server requests language model generation") |
| 87 | + logger.info("• Elicitation: MCP server requests user input via AgentElicitor") |
| 88 | + logger.info("• Roots: MCP server lists available file system roots") |
| 89 | + logger.info("=" * 60) |
| 90 | + |
| 91 | + # Setup model client for sampling |
| 92 | + if config_path: |
| 93 | + logger.info("⚙️ Loading model client from config file...") |
| 94 | + model_client = load_model_client_from_config(config_path) |
| 95 | + else: |
| 96 | + logger.info("⚙️ Setting up default OpenAI model client (gpt-4)...") |
| 97 | + model_client = OpenAIChatCompletionClient(model="gpt-4o-mini") |
| 98 | + |
| 99 | + other_assistant = AssistantAgent( |
| 100 | + "booking_assistant", |
| 101 | + model_client=model_client, |
| 102 | + description="An AI assistant who helps a user book 5pm reservations.", |
| 103 | + ) |
| 104 | + |
| 105 | + sampler = ChatCompletionClientSampler(model_client) |
| 106 | + |
| 107 | + # Start runtime and create AgentElicitor that targets the UserProxy |
| 108 | + logger.info("🎯 Creating StdioElicitor...") |
| 109 | + elicitor = StdioElicitor() |
| 110 | + |
| 111 | + roots = StaticRootsProvider( |
| 112 | + [Root(uri=FileUrl("file:///home"), name="Home"), Root(uri=FileUrl("file:///tmp"), name="Tmp")] |
| 113 | + ) |
| 114 | + |
| 115 | + # Create host with all capabilities including elicitation |
| 116 | + logger.info("🏠 Creating MCP session host with sampling, elicitation, and roots support...") |
| 117 | + host = McpSessionHost( |
| 118 | + sampler=sampler, # Support sampling via model clicent |
| 119 | + elicitor=elicitor, # Support elicitation via booking_assistant |
| 120 | + roots=roots, # support roots in /home and /tmp |
| 121 | + ) |
| 122 | + |
| 123 | + # Setup workbench with host |
| 124 | + logger.info("🔧 Creating MCP Workbench for mcp_example_server...") |
| 125 | + mcp_workbench = McpWorkbench( |
| 126 | + server_params=StdioServerParams( |
| 127 | + command=sys.executable, |
| 128 | + args=[str(Path(__file__).parent / "mcp_example_server.py")], |
| 129 | + read_timeout_seconds=60, |
| 130 | + ), |
| 131 | + host=host, |
| 132 | + ) |
| 133 | + |
| 134 | + # Create assistant with MCP capabilities |
| 135 | + assistant = AssistantAgent( |
| 136 | + "mcp_assistant", |
| 137 | + model_client=model_client, |
| 138 | + workbench=mcp_workbench, |
| 139 | + description="An AI assistant with access to MCP tools that can request sampling and elicitation from the host", |
| 140 | + ) |
| 141 | + |
| 142 | + # Create RoundRobinGroupChat with the agents |
| 143 | + logger.info("🔄 Setting up RoundRobinGroupChat...") |
| 144 | + team = RoundRobinGroupChat( |
| 145 | + [assistant, other_assistant], termination_condition=MaxMessageTermination(max_messages=2) |
| 146 | + ) |
| 147 | + |
| 148 | + # Run the team with the initial task |
| 149 | + tasks = ["Book a table for 2 at 7pm", "Generate a poem about computer protocols.", "ls /home", "ls /bin"] |
| 150 | + for task in tasks: |
| 151 | + await team.reset() |
| 152 | + result = await Console(team.run_stream(task=task)) |
| 153 | + |
| 154 | + logger.info("💬 Team conversation:") |
| 155 | + for message in result.messages: |
| 156 | + header = f"--- {type(message).__name__.upper()} ---" |
| 157 | + logger.info(header) |
| 158 | + logger.info(message.model_dump_json(indent=2)) |
| 159 | + |
| 160 | + |
| 161 | +def parse_arguments() -> argparse.Namespace: |
| 162 | + """Parse command line arguments.""" |
| 163 | + parser = argparse.ArgumentParser( |
| 164 | + description="Interactive MCP Host Capabilities Demo with AgentElicitor", |
| 165 | + formatter_class=argparse.RawDescriptionHelpFormatter, |
| 166 | + epilog=""" |
| 167 | +Examples: |
| 168 | + # Run with default OpenAI GPT-4 client |
| 169 | + python mcp_elicitation_example.py |
| 170 | +
|
| 171 | + # Run with custom model client from config |
| 172 | + python mcp_elicitation_example.py --config model_config.json |
| 173 | + python mcp_elicitation_example.py --config model_config.yaml |
| 174 | +
|
| 175 | +Config file format (JSON/YAML): |
| 176 | +{ |
| 177 | + "component_type": "OpenAIChatCompletionClient", |
| 178 | + "model": "gpt-4", |
| 179 | + "api_key": "your-api-key" |
| 180 | +} |
| 181 | + """, |
| 182 | + ) |
| 183 | + |
| 184 | + parser.add_argument( |
| 185 | + "--config", |
| 186 | + "-c", |
| 187 | + type=str, |
| 188 | + help="Path to JSON or YAML config file containing ChatCompletionClient configuration", |
| 189 | + ) |
| 190 | + |
| 191 | + return parser.parse_args() |
| 192 | + |
| 193 | + |
| 194 | +async def main(): |
| 195 | + """ |
| 196 | + Run the interactive MCP host capabilities demonstration. |
| 197 | +
|
| 198 | + This demo allows direct command-line interaction with an MCP-enabled assistant |
| 199 | + that can use tools requiring host-side capabilities like sampling and elicitation. |
| 200 | + """ |
| 201 | + args = parse_arguments() |
| 202 | + |
| 203 | + try: |
| 204 | + await interactive_mcp_demo(config_path=args.config) |
| 205 | + except KeyboardInterrupt: |
| 206 | + logger.info("\n👋 Demo interrupted by user. Goodbye!") |
| 207 | + except Exception as e: |
| 208 | + logger.error(f"❌ Error running demo: {e}") |
| 209 | + logger.info("Troubleshooting tips:") |
| 210 | + logger.info("1. Install the everything server:") |
| 211 | + logger.info(" npm install -g @modelcontextprotocol/server-everything") |
| 212 | + logger.info("2. Ensure your OpenAI API key is configured") |
| 213 | + logger.info("3. Check that Node.js and npx are available in your PATH") |
| 214 | + logger.info("4. Make sure you have internet connectivity for npm package download") |
| 215 | + if args.config: |
| 216 | + logger.info("5. Check that your config file exists and contains valid ChatCompletionClient configuration") |
| 217 | + |
| 218 | + |
| 219 | +if __name__ == "__main__": |
| 220 | + """ |
| 221 | + Interactive MCP Host Capabilities Demo with AgentElicitor |
| 222 | +
|
| 223 | + This demo provides a command-line interface to interact with an MCP-enabled |
| 224 | + assistant that demonstrates advanced host capabilities: |
| 225 | +
|
| 226 | + 🔄 Sampling: MCP server can request language model text generation from the host |
| 227 | + ❓ Elicitation: MCP server can request interactive user input via AgentElicitor → UserProxy |
| 228 | + 📁 Roots: MCP server can request file system root listings from the host |
| 229 | +
|
| 230 | + Key Features: |
| 231 | + - AgentElicitor routes elicitation requests from MCP server to UserProxyAgent |
| 232 | + - Full bidirectional communication between MCP server and AutoGen agents |
| 233 | + - Interactive command-line interface for real-time demonstration |
| 234 | +
|
| 235 | + Prerequisites: |
| 236 | + 1. Install the everything reference server: |
| 237 | + npm install -g @modelcontextprotocol/server-everything |
| 238 | + 2. Set up your OpenAI API key (required for sampling capability) |
| 239 | + 3. Ensure Node.js and npx are available in your PATH |
| 240 | +
|
| 241 | + Usage: |
| 242 | + - Run with default model: python mcp_elicitation_example.py |
| 243 | + - Run with custom model: python mcp_elicitation_example.py --config model.json |
| 244 | + - Interact through the command-line interface |
| 245 | + - Ask the assistant to use MCP tools that demonstrate host capabilities |
| 246 | + - Watch elicitation requests get routed through the AgentElicitor |
| 247 | + - Type 'quit' to exit the interactive session |
| 248 | + """ |
| 249 | + |
| 250 | + # Run the interactive demo |
| 251 | + asyncio.run(main()) |
0 commit comments