|
4 | 4 | """ |
5 | 5 |
|
6 | 6 | import asyncio |
| 7 | +import re |
| 8 | +from os import getenv |
7 | 9 |
|
8 | | -from microsoft.teams.ai import Agent, ListMemory |
9 | | -from microsoft.teams.api import MessageActivity, TypingActivityInput |
| 10 | +from dotenv import find_dotenv, load_dotenv |
| 11 | +from microsoft.teams.ai import Agent, ChatPrompt, ListMemory |
| 12 | +from microsoft.teams.api import MessageActivity, MessageActivityInput, TypingActivityInput |
10 | 13 | from microsoft.teams.apps import ActivityContext, App |
11 | 14 | from microsoft.teams.devtools import DevToolsPlugin |
12 | | -from microsoft.teams.mcpplugin import McpClientPlugin |
13 | | -from microsoft.teams.openai import OpenAIResponsesAIModel |
| 15 | +from microsoft.teams.mcpplugin import McpClientPlugin, McpClientPluginParams |
| 16 | +from microsoft.teams.openai import OpenAICompletionsAIModel, OpenAIResponsesAIModel |
| 17 | + |
| 18 | +load_dotenv(find_dotenv(usecwd=True)) |
14 | 19 |
|
15 | 20 | app = App(plugins=[DevToolsPlugin()]) |
16 | 21 |
|
17 | | -responses_openai_ai_model = OpenAIResponsesAIModel(stateful=True) |
18 | | -chat_memory = ListMemory() |
| 22 | + |
| 23 | +def get_required_env(key: str) -> str: |
| 24 | + value = getenv(key) |
| 25 | + if not value: |
| 26 | + raise ValueError(f"Required environment variable {key} is not set") |
| 27 | + return value |
| 28 | + |
| 29 | + |
| 30 | +AZURE_OPENAI_MODEL = get_required_env("AZURE_OPENAI_MODEL") |
| 31 | + |
| 32 | + |
| 33 | +# GitHub PAT for MCP server (optional) |
| 34 | +def get_optional_env(key: str) -> str | None: |
| 35 | + return getenv(key) |
| 36 | + |
| 37 | + |
| 38 | +# This example uses a PersonalAccessToken, but you may get |
| 39 | +# the user's oauth token as well by getting them to sign in |
| 40 | +# and then using app.sign_in to get their token. |
| 41 | +GITHUB_PAT = get_optional_env("GITHUB_PAT") |
| 42 | + |
| 43 | +# Set up AI models |
| 44 | +completions_model = OpenAICompletionsAIModel(model=AZURE_OPENAI_MODEL) |
| 45 | +responses_model = OpenAIResponsesAIModel(model=AZURE_OPENAI_MODEL, stateful=True) |
| 46 | + |
| 47 | +# Configure MCP Client Plugin with multiple remote servers (as shown in docs) |
19 | 48 | mcp_plugin = McpClientPlugin() |
| 49 | + |
| 50 | +# Add multiple MCP servers to demonstrate the concept from documentation |
20 | 51 | mcp_plugin.use_mcp_server("https://learn.microsoft.com/api/mcp") |
21 | 52 |
|
22 | | -responses_agent = Agent(responses_openai_ai_model, memory=chat_memory, plugins=[mcp_plugin]) |
| 53 | +# Add GitHub MCP server with authentication headers (demonstrates header functionality) |
| 54 | +if GITHUB_PAT: |
| 55 | + mcp_plugin.use_mcp_server( |
| 56 | + "https://api.githubcopilot.com/mcp/", McpClientPluginParams(headers={"Authorization": f"Bearer {GITHUB_PAT}"}) |
| 57 | + ) |
| 58 | + print("✅ GitHub MCP server configured with authentication") |
| 59 | +else: |
| 60 | + print("⚠️ GITHUB_PAT not found - GitHub MCP server not configured") |
| 61 | + print(" Set GITHUB_PAT environment variable to enable GitHub MCP integration") |
| 62 | +# Example of additional servers (commented out - would need actual working endpoints): |
| 63 | +# mcp_plugin.use_mcp_server("https://example.com/mcp/weather") |
| 64 | +# mcp_plugin.use_mcp_server("https://example.com/mcp/pokemon") |
| 65 | + |
| 66 | +# Memory for stateful conversations |
| 67 | +chat_memory = ListMemory() |
| 68 | + |
| 69 | +# Agent using Responses API with MCP tools |
| 70 | +responses_agent = Agent(responses_model, memory=chat_memory, plugins=[mcp_plugin]) |
| 71 | + |
| 72 | +# ChatPrompt with MCP tools (demonstrating docs example) |
| 73 | +chat_prompt = ChatPrompt(completions_model, plugins=[mcp_plugin]) |
| 74 | + |
| 75 | + |
| 76 | +# Pattern-based handlers to demonstrate different MCP usage patterns |
| 77 | + |
| 78 | + |
| 79 | +@app.on_message_pattern(re.compile(r"^agent\s+(.+)", re.IGNORECASE)) |
| 80 | +async def handle_agent_chat(ctx: ActivityContext[MessageActivity]): |
| 81 | + """Handle 'agent <query>' command using Agent with MCP tools (stateful)""" |
| 82 | + match = re.match(r"^agent\s+(.+)", ctx.activity.text, re.IGNORECASE) |
| 83 | + if match: |
| 84 | + query = match.group(1).strip() |
| 85 | + |
| 86 | + print(f"[AGENT] Processing: {query}") |
| 87 | + await ctx.send(TypingActivityInput()) |
| 88 | + |
| 89 | + # Use Agent with MCP tools (stateful conversation) |
| 90 | + result = await responses_agent.send(query) |
| 91 | + if result.response.content: |
| 92 | + message = MessageActivityInput(text=result.response.content).add_ai_generated() |
| 93 | + await ctx.send(message) |
| 94 | + |
| 95 | + |
| 96 | +@app.on_message_pattern(re.compile(r"^prompt\s+(.+)", re.IGNORECASE)) |
| 97 | +async def handle_prompt_chat(ctx: ActivityContext[MessageActivity]): |
| 98 | + """Handle 'prompt <query>' command using ChatPrompt with MCP tools (stateless)""" |
| 99 | + match = re.match(r"^prompt\s+(.+)", ctx.activity.text, re.IGNORECASE) |
| 100 | + if match: |
| 101 | + query = match.group(1).strip() |
| 102 | + |
| 103 | + print(f"[PROMPT] Processing: {query}") |
| 104 | + await ctx.send(TypingActivityInput()) |
| 105 | + |
| 106 | + # Use ChatPrompt with MCP tools (demonstrates docs pattern) |
| 107 | + result = await chat_prompt.send( |
| 108 | + input=query, |
| 109 | + instructions=( |
| 110 | + "You are a helpful assistant with access to remote MCP tools.Use them to help answer questions." |
| 111 | + ), |
| 112 | + ) |
| 113 | + |
| 114 | + if result.response.content: |
| 115 | + message = MessageActivityInput(text=result.response.content).add_ai_generated() |
| 116 | + await ctx.send(message) |
| 117 | + |
| 118 | + |
| 119 | +@app.on_message_pattern(re.compile(r"^mcp\s+info", re.IGNORECASE)) |
| 120 | +async def handle_mcp_info(ctx: ActivityContext[MessageActivity]): |
| 121 | + """Handle 'mcp info' command to show available MCP servers and tools""" |
| 122 | + # Build server list dynamically based on what's configured |
| 123 | + servers_info = "**Connected MCP Servers:**\n" |
| 124 | + servers_info += "• `https://learn.microsoft.com/api/mcp` - Microsoft Learn API\n" |
| 125 | + |
| 126 | + if GITHUB_PAT: |
| 127 | + servers_info += "• `https://api.githubcopilot.com/mcp/` - GitHub Copilot API (authenticated)\n" |
| 128 | + else: |
| 129 | + servers_info += "• GitHub MCP server (not configured - set GITHUB_PAT env var)\n" |
| 130 | + |
| 131 | + info_text = ( |
| 132 | + "🔗 **MCP Client Information**\n\n" |
| 133 | + f"{servers_info}\n" |
| 134 | + "**Authentication Demo:**\n" |
| 135 | + "• GitHub server uses Bearer token authentication via headers\n" |
| 136 | + "• Example: `headers={'Authorization': f'Bearer {GITHUB_PAT}'}`\n\n" |
| 137 | + "**Usage Patterns:**\n" |
| 138 | + "• `agent <query>` - Use stateful Agent with MCP tools\n" |
| 139 | + "• `prompt <query>` - Use stateless ChatPrompt with MCP tools\n" |
| 140 | + "• `mcp info` - Show this information\n\n" |
| 141 | + "**How it works:**\n" |
| 142 | + "1. MCP Client connects to remote servers via SSE protocol\n" |
| 143 | + "2. Headers (like Authorization) are passed with each request\n" |
| 144 | + "3. Remote tools are loaded and integrated with ChatPrompt/Agent\n" |
| 145 | + "4. LLM can call remote tools as needed to answer your questions" |
| 146 | + ) |
| 147 | + await ctx.reply(info_text) |
23 | 148 |
|
24 | 149 |
|
| 150 | +# Fallback handler for general chat (uses Agent by default) |
25 | 151 | @app.on_message |
26 | | -async def handle_message(ctx: ActivityContext[MessageActivity]): |
27 | | - """Handle message activities using the new generated handler system.""" |
28 | | - print(f"[GENERATED onMessage] Message received: {ctx.activity.text}") |
29 | | - print(f"[GENERATED onMessage] From: {ctx.activity.from_}") |
| 152 | +async def handle_fallback_message(ctx: ActivityContext[MessageActivity]): |
| 153 | + """Fallback handler using Agent with MCP tools""" |
| 154 | + print(f"[FALLBACK] Message received: {ctx.activity.text}") |
| 155 | + print(f"[FALLBACK] From: {ctx.activity.from_}") |
30 | 156 | await ctx.send(TypingActivityInput()) |
31 | 157 |
|
| 158 | + # Use Agent with MCP tools for general conversation |
32 | 159 | result = await responses_agent.send(ctx.activity.text) |
33 | 160 | if result.response.content: |
34 | | - await ctx.reply(result.response.content) |
| 161 | + message = MessageActivityInput(text=result.response.content).add_ai_generated() |
| 162 | + await ctx.send(message) |
35 | 163 |
|
36 | 164 |
|
37 | 165 | if __name__ == "__main__": |
|
0 commit comments