Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion packages/apps/src/microsoft/teams/apps/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,9 @@ def router(self) -> ActivityRouter:
@property
def id(self) -> Optional[str]:
"""The app's ID from tokens."""
return getattr(self._tokens.bot, "app_id", None) or getattr(self._tokens.graph, "app_id", None)
return (
self._tokens.bot.app_id if self._tokens.bot else self._tokens.graph.app_id if self._tokens.graph else None
)

@property
def name(self) -> Optional[str]:
Expand Down
10 changes: 5 additions & 5 deletions tests/ai-test/src/handlers/feedback_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,12 @@ async def handle_feedback_submission(ctx: ActivityContext[MessageSubmitActionInv
return

# Type-safe access to activity value
value_dict = activity.value.model_dump() if hasattr(activity.value, "model_dump") else {}
action_value: Dict[str, Any] = value_dict.get("actionValue", {})
reaction: str | None = action_value.get("reaction")
feedback_str: str | None = action_value.get("feedback")
assert feedback_str, "No feedback string found in action_value"
invoke_value = activity.value
assert invoke_value.action_name == "feedback"
feedback_str = invoke_value.action_value.feedback
reaction = invoke_value.action_value.reaction
feedback_json: Dict[str, Any] = json.loads(feedback_str)
# { 'feedbackText': 'the ai response was great!' }

if not activity.reply_to_id:
logger.warning(f"No replyToId found for messageId {activity.id}")
Expand Down
12 changes: 12 additions & 0 deletions tests/mcp-client/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Sample: MCP Client


### Available Commands

| Command | Description | Example Usage |
|---------|-------------|---------------|
| `agent <query>` | Use stateful Agent with MCP tools | `agent What's the weather like?` |
| `prompt <query>` | Use stateless ChatPrompt with MCP tools | `prompt Find information about Python` |
| `mcp info` | Show connected MCP servers and usage | `mcp info` |
| `<any message>` | Fallback to Agent with MCP tools | `Hello, can you help me?` |

152 changes: 140 additions & 12 deletions tests/mcp-client/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,34 +4,162 @@
"""

import asyncio
import re
from os import getenv

from microsoft.teams.ai import Agent, ListMemory
from microsoft.teams.api import MessageActivity, TypingActivityInput
from dotenv import find_dotenv, load_dotenv
from microsoft.teams.ai import Agent, ChatPrompt, ListMemory
from microsoft.teams.api import MessageActivity, MessageActivityInput, TypingActivityInput
from microsoft.teams.apps import ActivityContext, App
from microsoft.teams.devtools import DevToolsPlugin
from microsoft.teams.mcpplugin import McpClientPlugin
from microsoft.teams.openai import OpenAIResponsesAIModel
from microsoft.teams.mcpplugin import McpClientPlugin, McpClientPluginParams
from microsoft.teams.openai import OpenAICompletionsAIModel, OpenAIResponsesAIModel

load_dotenv(find_dotenv(usecwd=True))

app = App(plugins=[DevToolsPlugin()])

responses_openai_ai_model = OpenAIResponsesAIModel(stateful=True)
chat_memory = ListMemory()

def get_required_env(key: str) -> str:
value = getenv(key)
if not value:
raise ValueError(f"Required environment variable {key} is not set")
return value


AZURE_OPENAI_MODEL = get_required_env("AZURE_OPENAI_MODEL")


# GitHub PAT for MCP server (optional)
def get_optional_env(key: str) -> str | None:
return getenv(key)


# This example uses a PersonalAccessToken, but you may get
# the user's oauth token as well by getting them to sign in
# and then using app.sign_in to get their token.
GITHUB_PAT = get_optional_env("GITHUB_PAT")

# Set up AI models
completions_model = OpenAICompletionsAIModel(model=AZURE_OPENAI_MODEL)
responses_model = OpenAIResponsesAIModel(model=AZURE_OPENAI_MODEL, stateful=True)

# Configure MCP Client Plugin with multiple remote servers (as shown in docs)
mcp_plugin = McpClientPlugin()

# Add multiple MCP servers to demonstrate the concept from documentation
mcp_plugin.use_mcp_server("https://learn.microsoft.com/api/mcp")

responses_agent = Agent(responses_openai_ai_model, memory=chat_memory, plugins=[mcp_plugin])
# Add GitHub MCP server with authentication headers (demonstrates header functionality)
if GITHUB_PAT:
mcp_plugin.use_mcp_server(
"https://api.githubcopilot.com/mcp/", McpClientPluginParams(headers={"Authorization": f"Bearer {GITHUB_PAT}"})
)
print("✅ GitHub MCP server configured with authentication")
else:
print("⚠️ GITHUB_PAT not found - GitHub MCP server not configured")
print(" Set GITHUB_PAT environment variable to enable GitHub MCP integration")
# Example of additional servers (commented out - would need actual working endpoints):
# mcp_plugin.use_mcp_server("https://example.com/mcp/weather")
# mcp_plugin.use_mcp_server("https://example.com/mcp/pokemon")

# Memory for stateful conversations
chat_memory = ListMemory()

# Agent using Responses API with MCP tools
responses_agent = Agent(responses_model, memory=chat_memory, plugins=[mcp_plugin])

# ChatPrompt with MCP tools (demonstrating docs example)
chat_prompt = ChatPrompt(completions_model, plugins=[mcp_plugin])


# Pattern-based handlers to demonstrate different MCP usage patterns


@app.on_message_pattern(re.compile(r"^agent\s+(.+)", re.IGNORECASE))
async def handle_agent_chat(ctx: ActivityContext[MessageActivity]):
"""Handle 'agent <query>' command using Agent with MCP tools (stateful)"""
match = re.match(r"^agent\s+(.+)", ctx.activity.text, re.IGNORECASE)
if match:
query = match.group(1).strip()

print(f"[AGENT] Processing: {query}")
await ctx.send(TypingActivityInput())

# Use Agent with MCP tools (stateful conversation)
result = await responses_agent.send(query)
if result.response.content:
message = MessageActivityInput(text=result.response.content).add_ai_generated()
await ctx.send(message)


@app.on_message_pattern(re.compile(r"^prompt\s+(.+)", re.IGNORECASE))
async def handle_prompt_chat(ctx: ActivityContext[MessageActivity]):
"""Handle 'prompt <query>' command using ChatPrompt with MCP tools (stateless)"""
match = re.match(r"^prompt\s+(.+)", ctx.activity.text, re.IGNORECASE)
if match:
query = match.group(1).strip()

print(f"[PROMPT] Processing: {query}")
await ctx.send(TypingActivityInput())

# Use ChatPrompt with MCP tools (demonstrates docs pattern)
result = await chat_prompt.send(
input=query,
instructions=(
"You are a helpful assistant with access to remote MCP tools.Use them to help answer questions."
),
)

if result.response.content:
message = MessageActivityInput(text=result.response.content).add_ai_generated()
await ctx.send(message)


@app.on_message_pattern(re.compile(r"^mcp\s+info", re.IGNORECASE))
async def handle_mcp_info(ctx: ActivityContext[MessageActivity]):
"""Handle 'mcp info' command to show available MCP servers and tools"""
# Build server list dynamically based on what's configured
servers_info = "**Connected MCP Servers:**\n"
servers_info += "• `https://learn.microsoft.com/api/mcp` - Microsoft Learn API\n"

if GITHUB_PAT:
servers_info += "• `https://api.githubcopilot.com/mcp/` - GitHub Copilot API (authenticated)\n"
else:
servers_info += "• GitHub MCP server (not configured - set GITHUB_PAT env var)\n"

info_text = (
"🔗 **MCP Client Information**\n\n"
f"{servers_info}\n"
"**Authentication Demo:**\n"
"• GitHub server uses Bearer token authentication via headers\n"
"• Example: `headers={'Authorization': f'Bearer {GITHUB_PAT}'}`\n\n"
"**Usage Patterns:**\n"
"• `agent <query>` - Use stateful Agent with MCP tools\n"
"• `prompt <query>` - Use stateless ChatPrompt with MCP tools\n"
"• `mcp info` - Show this information\n\n"
"**How it works:**\n"
"1. MCP Client connects to remote servers via SSE protocol\n"
"2. Headers (like Authorization) are passed with each request\n"
"3. Remote tools are loaded and integrated with ChatPrompt/Agent\n"
"4. LLM can call remote tools as needed to answer your questions"
)
await ctx.reply(info_text)


# Fallback handler for general chat (uses Agent by default)
@app.on_message
async def handle_message(ctx: ActivityContext[MessageActivity]):
"""Handle message activities using the new generated handler system."""
print(f"[GENERATED onMessage] Message received: {ctx.activity.text}")
print(f"[GENERATED onMessage] From: {ctx.activity.from_}")
async def handle_fallback_message(ctx: ActivityContext[MessageActivity]):
"""Fallback handler using Agent with MCP tools"""
print(f"[FALLBACK] Message received: {ctx.activity.text}")
print(f"[FALLBACK] From: {ctx.activity.from_}")
await ctx.send(TypingActivityInput())

# Use Agent with MCP tools for general conversation
result = await responses_agent.send(ctx.activity.text)
if result.response.content:
await ctx.reply(result.response.content)
message = MessageActivityInput(text=result.response.content).add_ai_generated()
await ctx.send(message)


if __name__ == "__main__":
Expand Down
11 changes: 11 additions & 0 deletions tests/mcp-server/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# Sample: MCP Server

### Available Tools

| Tool | Description | Parameters | Example Usage |
|------|-------------|------------|---------------|
| `echo` | Echo back input text | `input: str` | Echo functionality from docs |
| `get_weather` | Get weather for a location | `location: str` | Always returns "sunny" |
| `calculate` | Basic arithmetic operations | `operation: str, a: float, b: float` | add, subtract, multiply, divide |
| `alert` | Send proactive message to Teams user | `user_id: str, message: str` | Human-in-the-loop notifications |

86 changes: 82 additions & 4 deletions tests/mcp-server/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
"""

import asyncio
from typing import Dict

from microsoft.teams.ai import Function
from microsoft.teams.api.activities.message.message import MessageActivity
Expand All @@ -13,9 +14,25 @@
from microsoft.teams.mcpplugin import McpServerPlugin
from pydantic import BaseModel

mcp_server_plugin = McpServerPlugin()
# Configure MCP server with custom name (as shown in docs)
mcp_server_plugin = McpServerPlugin(
name="test-mcp",
)

# Storage for conversation IDs (for proactive messaging)
conversation_storage: Dict[str, str] = {}


# Echo tool from documentation example
class EchoParams(BaseModel):
input: str


async def echo_handler(params: EchoParams) -> str:
return f"You said {params.input}"


# Weather tool (existing)
class GetWeatherParams(BaseModel):
location: str

Expand Down Expand Up @@ -44,7 +61,42 @@ async def calculate_handler(params: CalculateParams) -> str:
return "Unknown operation"


# Direct function call usage
# Alert tool for proactive messaging (as mentioned in docs)
class AlertParams(BaseModel):
user_id: str
message: str


async def alert_handler(params: AlertParams) -> str:
"""
Send proactive message to user via Teams.
This demonstrates the "piping messages to user" feature from docs.
"""
# 1. Validate if the incoming request is allowed to send messages
if not params.user_id or not params.message:
return "Invalid parameters: user_id and message are required"

# 2. Fetch the correct conversation ID for the given user
conversation_id = conversation_storage.get(params.user_id)
if not conversation_id:
return f"No conversation found for user {params.user_id}. User needs to message the bot first."

# 3. Send proactive message (simplified - in real implementation would use proper proactive messaging)
await app.send(conversation_id=conversation_id, activity=params.message)
return f"Alert sent to user {params.user_id}: {params.message} (conversation: {conversation_id})"


# Register echo tool (from documentation)
mcp_server_plugin.use_tool(
Function(
name="echo",
description="echo back whatever you said",
parameter_schema=EchoParams,
handler=echo_handler,
)
)

# Register weather tool
mcp_server_plugin.use_tool(
Function(
name="get_weather",
Expand All @@ -54,7 +106,7 @@ async def calculate_handler(params: CalculateParams) -> str:
)
)

# Second tool registration
# Register calculator tool
mcp_server_plugin.use_tool(
Function(
name="calculate",
Expand All @@ -64,12 +116,38 @@ async def calculate_handler(params: CalculateParams) -> str:
)
)

# Register alert tool for proactive messaging
mcp_server_plugin.use_tool(
Function(
name="alert",
description="Send proactive message to a Teams user",
parameter_schema=AlertParams,
handler=alert_handler,
)
)

app = App(plugins=[mcp_server_plugin, DevToolsPlugin()])


@app.on_message
async def handle_message(ctx: ActivityContext[MessageActivity]):
await ctx.reply(f"You said {ctx.activity.text}")
"""
Handle incoming messages and store conversation IDs for proactive messaging.
This demonstrates the conversation ID storage mentioned in the docs.
"""
# Store conversation ID for this user (for proactive messaging)
user_id = ctx.activity.from_.id
conversation_id = ctx.activity.conversation.id
conversation_storage[user_id] = conversation_id

print(f"User {ctx.activity.from_} just sent a message!")

# Echo back the message with info about stored conversation
await ctx.reply(
f"You said: {ctx.activity.text}\n\n"
f"📝 Stored conversation ID `{conversation_id}` for user `{user_id}` "
f"(for proactive messaging via MCP alert tool)"
)


if __name__ == "__main__":
Expand Down