Skip to content

lint docs examples #286

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
129 changes: 92 additions & 37 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,12 +99,14 @@ from mcp.server.fastmcp import FastMCP
# Create an MCP server
mcp = FastMCP("Demo")


# Add an addition tool
@mcp.tool()
def add(a: int, b: int) -> int:
"""Add two numbers"""
return a + b


# Add a dynamic greeting resource
@mcp.resource("greeting://{name}")
def get_greeting(name: str) -> str:
Expand Down Expand Up @@ -139,34 +141,42 @@ The FastMCP server is your core interface to the MCP protocol. It handles connec

```python
# Add lifespan support for startup/shutdown with strong typing
from contextlib import asynccontextmanager
from dataclasses import dataclass
from typing import AsyncIterator
from mcp.server.fastmcp import FastMCP

from fake_database import Database # Replace with your actual DB type

from mcp.server.fastmcp import Context, FastMCP

# Create a named server
mcp = FastMCP("My App")

# Specify dependencies for deployment and development
mcp = FastMCP("My App", dependencies=["pandas", "numpy"])


@dataclass
class AppContext:
db: Database # Replace with your actual DB type
db: Database


@asynccontextmanager
async def app_lifespan(server: FastMCP) -> AsyncIterator[AppContext]:
"""Manage application lifecycle with type-safe context"""
# Initialize on startup
db = await Database.connect()
try:
# Initialize on startup
await db.connect()
yield AppContext(db=db)
finally:
# Cleanup on shutdown
await db.disconnect()


# Pass lifespan to server
mcp = FastMCP("My App", lifespan=app_lifespan)


# Access type-safe lifespan context in tools
@mcp.tool()
def query_db(ctx: Context) -> str:
Expand All @@ -180,11 +190,17 @@ def query_db(ctx: Context) -> str:
Resources are how you expose data to LLMs. They're similar to GET endpoints in a REST API - they provide data but shouldn't perform significant computation or have side effects:

```python
from mcp.server.fastmcp import FastMCP

mcp = FastMCP("My App")


@mcp.resource("config://app")
def get_config() -> str:
"""Static configuration data"""
return "App configuration here"


@mcp.resource("users://{user_id}/profile")
def get_user_profile(user_id: str) -> str:
"""Dynamic user data"""
Expand All @@ -196,10 +212,17 @@ def get_user_profile(user_id: str) -> str:
Tools let LLMs take actions through your server. Unlike resources, tools are expected to perform computation and have side effects:

```python
import httpx
from mcp.server.fastmcp import FastMCP

mcp = FastMCP("My App")


@mcp.tool()
def calculate_bmi(weight_kg: float, height_m: float) -> float:
"""Calculate BMI given weight in kg and height in meters"""
return weight_kg / (height_m ** 2)
return weight_kg / (height_m**2)


@mcp.tool()
async def fetch_weather(city: str) -> str:
Expand All @@ -214,16 +237,22 @@ async def fetch_weather(city: str) -> str:
Prompts are reusable templates that help LLMs interact with your server effectively:

```python
from mcp.server.fastmcp import FastMCP, types

mcp = FastMCP("My App")


@mcp.prompt()
def review_code(code: str) -> str:
return f"Please review this code:\n\n{code}"


@mcp.prompt()
def debug_error(error: str) -> list[Message]:
def debug_error(error: str) -> list[types.Message]:
return [
UserMessage("I'm seeing this error:"),
UserMessage(error),
AssistantMessage("I'll help debug that. What have you tried so far?")
types.UserMessage("I'm seeing this error:"),
types.UserMessage(error),
types.AssistantMessage("I'll help debug that. What have you tried so far?"),
]
```

Expand All @@ -235,6 +264,9 @@ FastMCP provides an `Image` class that automatically handles image data:
from mcp.server.fastmcp import FastMCP, Image
from PIL import Image as PILImage

mcp = FastMCP("My App")


@mcp.tool()
def create_thumbnail(image_path: str) -> Image:
"""Create a thumbnail from an image"""
Expand All @@ -250,6 +282,9 @@ The Context object gives your tools and resources access to MCP capabilities:
```python
from mcp.server.fastmcp import FastMCP, Context

mcp = FastMCP("My App")


@mcp.tool()
async def long_task(files: list[str], ctx: Context) -> str:
"""Process multiple files with progress tracking"""
Expand Down Expand Up @@ -322,16 +357,19 @@ from mcp.server.fastmcp import FastMCP

mcp = FastMCP("Echo")


@mcp.resource("echo://{message}")
def echo_resource(message: str) -> str:
"""Echo a message as a resource"""
return f"Resource echo: {message}"


@mcp.tool()
def echo_tool(message: str) -> str:
"""Echo a message as a tool"""
return f"Tool echo: {message}"


@mcp.prompt()
def echo_prompt(message: str) -> str:
"""Create an echo prompt"""
Expand All @@ -343,20 +381,21 @@ def echo_prompt(message: str) -> str:
A more complex example showing database integration:

```python
from mcp.server.fastmcp import FastMCP
import sqlite3

from mcp.server.fastmcp import FastMCP

mcp = FastMCP("SQLite Explorer")


@mcp.resource("schema://main")
def get_schema() -> str:
"""Provide the database schema as a resource"""
conn = sqlite3.connect("database.db")
schema = conn.execute(
"SELECT sql FROM sqlite_master WHERE type='table'"
).fetchall()
schema = conn.execute("SELECT sql FROM sqlite_master WHERE type='table'").fetchall()
return "\n".join(sql[0] for sql in schema if sql[0])


@mcp.tool()
def query_data(sql: str) -> str:
"""Execute SQL queries safely"""
Expand All @@ -378,20 +417,27 @@ For more control, you can use the low-level server implementation directly. This
from contextlib import asynccontextmanager
from typing import AsyncIterator

from fake_database import Database # Replace with your actual DB type

from mcp.server import Server


@asynccontextmanager
async def server_lifespan(server: Server) -> AsyncIterator[dict]:
"""Manage server startup and shutdown lifecycle."""
# Initialize resources on startup
db = await Database.connect()
try:
# Initialize resources on startup
await db.connect()
yield {"db": db}
finally:
# Clean up on shutdown
await db.disconnect()


# Pass lifespan to server
server = Server("example-server", lifespan=server_lifespan)


# Access lifespan context in handlers
@server.call_tool()
async def query_db(name: str, arguments: dict) -> list:
Expand All @@ -406,14 +452,15 @@ The lifespan API provides:
- Type-safe context passing between lifespan and request handlers

```python
from mcp.server.lowlevel import Server, NotificationOptions
from mcp.server.models import InitializationOptions
import mcp.server.stdio
import mcp.types as types
from mcp.server.lowlevel import NotificationOptions, Server
from mcp.server.models import InitializationOptions

# Create a server instance
server = Server("example-server")


@server.list_prompts()
async def handle_list_prompts() -> list[types.Prompt]:
return [
Expand All @@ -422,18 +469,16 @@ async def handle_list_prompts() -> list[types.Prompt]:
description="An example prompt template",
arguments=[
types.PromptArgument(
name="arg1",
description="Example argument",
required=True
name="arg1", description="Example argument", required=True
)
]
],
)
]


@server.get_prompt()
async def handle_get_prompt(
name: str,
arguments: dict[str, str] | None
name: str, arguments: dict[str, str] | None
) -> types.GetPromptResult:
if name != "example-prompt":
raise ValueError(f"Unknown prompt: {name}")
Expand All @@ -443,14 +488,12 @@ async def handle_get_prompt(
messages=[
types.PromptMessage(
role="user",
content=types.TextContent(
type="text",
text="Example prompt text"
)
content=types.TextContent(type="text", text="Example prompt text"),
)
]
],
)


async def run():
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.run(
Expand All @@ -462,12 +505,14 @@ async def run():
capabilities=server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
)
)
),
),
)


if __name__ == "__main__":
import asyncio

asyncio.run(run())
```

Expand All @@ -476,18 +521,21 @@ if __name__ == "__main__":
The SDK provides a high-level client interface for connecting to MCP servers:

```python
from mcp import ClientSession, StdioServerParameters
from mcp import ClientSession, StdioServerParameters, types
from mcp.client.stdio import stdio_client

# Create server parameters for stdio connection
server_params = StdioServerParameters(
command="python", # Executable
args=["example_server.py"], # Optional command line arguments
env=None # Optional environment variables
command="python", # Executable
args=["example_server.py"], # Optional command line arguments
env=None, # Optional environment variables
)


# Optional: create a sampling callback
async def handle_sampling_message(message: types.CreateMessageRequestParams) -> types.CreateMessageResult:
async def handle_sampling_message(
message: types.CreateMessageRequestParams,
) -> types.CreateMessageResult:
return types.CreateMessageResult(
role="assistant",
content=types.TextContent(
Expand All @@ -498,17 +546,22 @@ async def handle_sampling_message(message: types.CreateMessageRequestParams) ->
stopReason="endTurn",
)


async def run():
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write, sampling_callback=handle_sampling_message) as session:
async with ClientSession(
read, write, sampling_callback=handle_sampling_message
) as session:
# Initialize the connection
await session.initialize()

# List available prompts
prompts = await session.list_prompts()

# Get a prompt
prompt = await session.get_prompt("example-prompt", arguments={"arg1": "value"})
prompt = await session.get_prompt(
"example-prompt", arguments={"arg1": "value"}
)

# List available resources
resources = await session.list_resources()
Expand All @@ -522,8 +575,10 @@ async def run():
# Call a tool
result = await session.call_tool("tool-name", arguments={"arg1": "value"})


if __name__ == "__main__":
import asyncio

asyncio.run(run())
```

Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ dev-dependencies = [
"trio>=0.26.2",
"pytest-flakefinder>=1.1.0",
"pytest-xdist>=3.6.1",
"pytest-examples>=0.0.14",
]

[build-system]
Expand Down
15 changes: 15 additions & 0 deletions tests/test_examples.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Tests for example servers"""

import pytest
from pytest_examples import CodeExample, EvalExample, find_examples

from mcp.shared.memory import (
create_connected_server_and_client_session as client_session,
Expand Down Expand Up @@ -70,3 +71,17 @@ async def test_desktop(monkeypatch):
assert isinstance(content.text, str)
assert "/fake/path/file1.txt" in content.text
assert "/fake/path/file2.txt" in content.text


@pytest.mark.parametrize("example", find_examples("README.md"), ids=str)
def test_docs_examples(example: CodeExample, eval_example: EvalExample):
ruff_ignore: list[str] = ["F841", "I001"]

eval_example.set_config(
ruff_ignore=ruff_ignore, target_version="py310", line_length=88
)

if eval_example.update_examples: # pragma: no cover
eval_example.format(example)
else:
eval_example.lint(example)
Loading