Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,15 @@ WORKDIR /app
COPY pyproject.toml /app/
COPY README.md /app/
COPY src /app/src
COPY engines /app/engines
COPY build-engines.py /app/build-engines.py

RUN uv sync

ENV PATH="/app/.venv/bin:$PATH"

RUN python /app/build-engines.py

EXPOSE 8000

CMD ["python", "src/server.py"]
7 changes: 6 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ A Model Context Protocol (MCP) server implementation that integrates with [SerpA
## Features

- **Multi-Engine Search**: Google, Bing, Yahoo, DuckDuckGo, YouTube, eBay, and [more](https://serpapi.com/search-engine-apis)
- **Engine Resources**: Per-engine parameter schemas available via MCP resources (see Search Tool)
- **Real-time Weather Data**: Location-based weather with forecasts via search queries
- **Stock Market Data**: Company financials and market data through search integration
- **Dynamic Result Processing**: Automatically detects and formats different result types
Expand Down Expand Up @@ -68,6 +69,7 @@ curl "https://mcp.serpapi.com/mcp" -H "Authorization: Bearer your_key" -d '...'
## Search Tool

The MCP server has one main Search Tool that supports all SerpApi engines and result types. You can find all available parameters on the [SerpApi API reference](https://serpapi.com/search-api).
Engine parameter schemas are also exposed as MCP resources: `serpapi://engines` (index) and `serpapi://engines/<engine>`.

The parameters you can provide are specific for each API engine. Some sample parameters are provided below:

Expand All @@ -87,7 +89,7 @@ The parameters you can provide are specific for each API engine. Some sample par
{"name": "search", "arguments": {"params": {"q": "detailed search"}, "mode": "complete"}}
```

**Supported Engines:** Google, Bing, Yahoo, DuckDuckGo, YouTube, eBay, and more.
**Supported Engines:** Google, Bing, Yahoo, DuckDuckGo, YouTube, eBay, and more (see `serpapi://engines`).

**Result Types:** Answer boxes, organic results, news, images, shopping - automatically detected and formatted.

Expand All @@ -100,6 +102,9 @@ uv sync && uv run src/server.py
# Docker
docker build -t serpapi-mcp . && docker run -p 8000:8000 serpapi-mcp

# Regenerate engine resources (Playground scrape)
python build-engines.py

# Testing with MCP Inspector
npx @modelcontextprotocol/inspector
# Configure: URL mcp.serpapi.com/YOUR_KEY/mcp, Transport "Streamable HTTP transport"
Expand Down
114 changes: 114 additions & 0 deletions build-engines.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
#!/usr/bin/env python3
"""Build SerpApi engine parameter data for MCP usage."""

from __future__ import annotations

import html
import json
from pathlib import Path
from urllib.request import Request, urlopen

from bs4 import BeautifulSoup
from markdownify import markdownify

PLAYGROUND_URL = "https://serpapi.com/playground"
EXCLUDED_ENGINES = {
"google_scholar_profiles",
"google_light_fast",
"google_lens_image_sources",
}
PARAM_KEEP_KEYS = {"html", "type", "options", "required"}
OUTPUT_DIR = Path("engines")
TIMEOUT_SECONDS = 30
USER_AGENT = "Mozilla/5.0"


def html_to_markdown(value: str) -> str:
"""Convert HTML to markdown, normalizing whitespace."""
md = markdownify(html.unescape(value), strip=["a"])
return " ".join(md.split())



def normalize_options(options: list[object]) -> list[object]:
"""Normalize option values, simplifying [value, label] pairs where possible."""
normalized = []
for option in options:
if isinstance(option, list) and option:
value = option[0]
label = option[1] if len(option) > 1 else None
if label is not None and (isinstance(value, (int, float)) or (isinstance(value, str) and value.isdigit())) and value != label:
normalized.append(option)
else:
normalized.append(value)
else:
normalized.append(option)
return normalized


def fetch_props(url: str) -> dict[str, object]:
"""Fetch playground HTML and extract React props."""
req = Request(url, headers={"User-Agent": USER_AGENT})
with urlopen(req, timeout=TIMEOUT_SECONDS) as resp:
page_html = resp.read().decode("utf-8", errors="ignore")
soup = BeautifulSoup(page_html, "html.parser")
node = soup.find(attrs={"data-react-props": True})
if not node:
raise RuntimeError("Failed to locate data-react-props in playground HTML.")
return json.loads(html.unescape(node["data-react-props"]))


def normalize_engine(engine: str, payload: dict[str, object]) -> dict[str, object]:
"""Normalize engine payload, extracting relevant parameter metadata."""
normalized_params: dict[str, dict[str, object]] = {}
common_params: dict[str, dict[str, object]] = {}
if isinstance(payload, dict):
for group_name, group in payload.items():
if not isinstance(group, dict):
continue
if not isinstance(params := group.get("parameters"), dict):
continue
for param_name, param in params.items():
if not isinstance(param, dict):
continue
filtered = {k: v for k, v in param.items() if k in PARAM_KEEP_KEYS}
if isinstance(options := filtered.get("options"), list):
filtered["options"] = normalize_options(options)
if isinstance(html_value := filtered.pop("html", None), str):
filtered["description"] = html_to_markdown(html_value)
if filtered:
filtered["group"] = group_name
if group_name == "serpapi_parameters":
common_params[param_name] = filtered
else:
normalized_params[param_name] = filtered

return {"engine": engine, "params": normalized_params, "common_params": common_params}


def main() -> int:
"""Main entry point: fetch playground data and generate engine files."""
props = fetch_props(PLAYGROUND_URL)
if not isinstance(params := props.get("parameters"), dict):
raise RuntimeError("Playground props missing 'parameters' map.")

OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
engines = []

for engine, payload in sorted(params.items()):
if not isinstance(engine, str) or engine in EXCLUDED_ENGINES:
continue
if not isinstance(payload, dict):
continue
(OUTPUT_DIR / f"{engine}.json").write_text(
json.dumps(normalize_engine(engine, payload), indent=2, ensure_ascii=False),
encoding="utf-8",
)
engines.append(engine)

print(f"Wrote {len(engines)} engine files to {OUTPUT_DIR}")
return 0


if __name__ == "__main__":
raise SystemExit(main())
Loading