Skip to content

Commit e0a33f5

Browse files
committed
mcp server
1 parent fb45440 commit e0a33f5

File tree

12 files changed

+929
-26
lines changed

12 files changed

+929
-26
lines changed

.github/workflows/release.yml

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -195,10 +195,35 @@ jobs:
195195
id: get_version
196196
run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT
197197

198-
- name: Download all artifacts
198+
- name: Download Python package
199199
uses: actions/download-artifact@v4
200200
with:
201-
path: ./artifacts
201+
name: python-memora-dist
202+
path: ./artifacts/python-memora-dist
203+
204+
- name: Download Rust CLI (Linux)
205+
uses: actions/download-artifact@v4
206+
with:
207+
name: rust-cli-memora-linux-amd64
208+
path: ./artifacts/rust-cli-memora-linux-amd64
209+
210+
- name: Download Rust CLI (macOS Intel)
211+
uses: actions/download-artifact@v4
212+
with:
213+
name: rust-cli-memora-darwin-amd64
214+
path: ./artifacts/rust-cli-memora-darwin-amd64
215+
216+
- name: Download Rust CLI (macOS ARM)
217+
uses: actions/download-artifact@v4
218+
with:
219+
name: rust-cli-memora-darwin-arm64
220+
path: ./artifacts/rust-cli-memora-darwin-arm64
221+
222+
- name: Download Helm chart
223+
uses: actions/download-artifact@v4
224+
with:
225+
name: helm-chart
226+
path: ./artifacts/helm-chart
202227

203228
- name: Prepare release assets
204229
run: |

.github/workflows/test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ jobs:
4545
python-version-file: ".python-version"
4646

4747
- name: Install dependencies
48-
run: uv sync --all-extras --dev
48+
run: uv sync --extra test
4949

5050
- name: Run migrations
5151
working-directory: ./memora

docker/api.Dockerfile

Lines changed: 30 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,50 @@
1-
FROM python:3.11-slim
1+
FROM python:3.11-slim AS builder
22

3-
# Install system dependencies
3+
# Install build dependencies
44
RUN apt-get update && apt-get install -y \
5-
curl \
65
build-essential \
76
&& rm -rf /var/lib/apt/lists/*
87

98
# Set working directory
109
WORKDIR /app
1110

12-
# Copy project files
13-
COPY memora /app/memora
11+
# Copy only dependency files first for better caching
12+
COPY memora/pyproject.toml memora/README.md /app/memora/
13+
COPY memora/memora /app/memora/memora
14+
15+
# Install uv for faster dependency installation
16+
RUN pip install --no-cache-dir uv
17+
18+
# Install Python dependencies to a virtual environment
19+
WORKDIR /app/memora
20+
RUN uv venv /opt/venv && \
21+
. /opt/venv/bin/activate && \
22+
uv pip install --no-cache -e .
23+
24+
# Production stage
25+
FROM python:3.11-slim
1426

15-
# Install Python dependencies
27+
# Install only runtime dependencies
28+
RUN apt-get update && apt-get install -y \
29+
curl \
30+
libgomp1 \
31+
&& rm -rf /var/lib/apt/lists/*
32+
33+
# Copy virtual environment from builder
34+
COPY --from=builder /opt/venv /opt/venv
35+
COPY --from=builder /app/memora /app/memora
36+
37+
# Set working directory
1638
WORKDIR /app/memora
17-
RUN pip install --no-cache-dir -e .
1839

1940
# Expose API port
2041
EXPOSE 8080
2142

2243
# Set environment variables
2344
ENV PYTHONUNBUFFERED=1
2445
ENV DATABASE_URL=postgresql://memora:memora_dev@postgres:5432/memora
46+
ENV PATH="/opt/venv/bin:$PATH"
47+
ENV PYTHONPATH=/app/memora
2548

2649
# Run the API server
2750
CMD ["python", "-m", "memora.web.server", "--host", "0.0.0.0", "--port", "8080"]

memora-mcp-server/README.md

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
# Memora MCP Server
2+
3+
Remote MCP server for integrating Memora memory capabilities with Claude Desktop and other MCP clients.
4+
5+
## Configuration
6+
7+
Required environment variables:
8+
- `MEMORA_AGENT_ID`: The agent ID to use for all operations
9+
- `MEMORA_API_URL`: Memora API endpoint (default: http://localhost:8080)
10+
- `MEMORA_API_KEY`: API key for authentication (optional)
11+
12+
## Usage
13+
14+
### Start the HTTP/SSE Server
15+
```bash
16+
export MEMORA_AGENT_ID=your-agent-id
17+
export MEMORA_API_URL=http://localhost:8080
18+
export PORT=8765 # optional, default is 8765
19+
export HOST=127.0.0.1 # optional, default is 127.0.0.1
20+
uv run memora-mcp-server
21+
```
22+
23+
The server will start on `http://127.0.0.1:8765`
24+
25+
### Claude Desktop Integration
26+
27+
Add to your Claude Desktop config (`~/Library/Application Support/Claude/claude_desktop_config.json` on macOS):
28+
29+
```json
30+
{
31+
"mcpServers": {
32+
"memora": {
33+
"url": "http://127.0.0.1:8765/sse"
34+
}
35+
}
36+
}
37+
```
38+
39+
Make sure the Memora MCP server is running before starting Claude Desktop.
40+
41+
## Available Tools
42+
43+
- `memora_put`: Store facts/memories with required context
44+
- `memora_search`: Search through memories using semantic search

memora-mcp-server/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
"""Memora MCP Server - Remote MCP server for Memora memory system."""
2+
3+
__version__ = "0.0.1"

memora-mcp-server/client.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
"""Memora API client wrapper."""
2+
3+
import httpx
4+
from typing import Any
5+
6+
7+
class MemoraClient:
8+
"""Client for interacting with Memora API."""
9+
10+
def __init__(self, api_url: str, agent_id: str, api_key: str | None = None):
11+
self.api_url = api_url.rstrip("/")
12+
self.agent_id = agent_id
13+
self.headers = {}
14+
if api_key:
15+
self.headers["Authorization"] = f"Bearer {api_key}"
16+
17+
async def remember(
18+
self, content: str, context: str
19+
) -> dict[str, Any]:
20+
"""Store a memory using batch endpoint."""
21+
async with httpx.AsyncClient() as client:
22+
payload = {
23+
"agent_id": self.agent_id,
24+
"items": [
25+
{
26+
"content": content,
27+
"context": context,
28+
}
29+
]
30+
}
31+
32+
response = await client.post(
33+
f"{self.api_url}/api/memories/batch",
34+
json=payload,
35+
headers=self.headers,
36+
timeout=30.0,
37+
)
38+
response.raise_for_status()
39+
return response.json()
40+
41+
async def search(
42+
self, query: str, max_tokens: int = 4096
43+
) -> dict[str, Any]:
44+
"""Search memories using search endpoint."""
45+
async with httpx.AsyncClient() as client:
46+
payload = {
47+
"agent_id": self.agent_id,
48+
"query": query,
49+
"thinking_budget": 100,
50+
"max_tokens": max_tokens,
51+
"reranker": "heuristic",
52+
"trace": False,
53+
}
54+
55+
response = await client.post(
56+
f"{self.api_url}/api/search",
57+
json=payload,
58+
headers=self.headers,
59+
timeout=30.0,
60+
)
61+
response.raise_for_status()
62+
return response.json()

memora-mcp-server/config.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
"""Configuration management for Memora MCP Server."""
2+
3+
import os
4+
from dataclasses import dataclass
5+
6+
7+
@dataclass
8+
class Config:
9+
"""MCP Server configuration."""
10+
11+
agent_id: str
12+
api_url: str = "http://localhost:8080"
13+
api_key: str | None = None
14+
15+
@classmethod
16+
def from_env(cls) -> "Config":
17+
"""Load configuration from environment variables."""
18+
agent_id = os.getenv("MEMORA_AGENT_ID")
19+
if not agent_id:
20+
raise ValueError("MEMORA_AGENT_ID environment variable is required")
21+
22+
return cls(
23+
agent_id=agent_id,
24+
api_url=os.getenv("MEMORA_API_URL", "http://localhost:8080"),
25+
api_key=os.getenv("MEMORA_API_KEY"),
26+
)

memora-mcp-server/pyproject.toml

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
[project]
2+
name = "memora-mcp-server"
3+
version = "0.0.1"
4+
description = "Remote MCP server for Memora memory system"
5+
readme = "README.md"
6+
requires-python = ">=3.11"
7+
dependencies = [
8+
"fastmcp>=0.7.0",
9+
"httpx>=0.28.1",
10+
]
11+
12+
[project.scripts]
13+
memora-mcp-server = "server:main"
14+
15+
[tool.hatch.build.targets.wheel]
16+
packages = ["."]
17+
18+
[build-system]
19+
requires = ["hatchling"]
20+
build-backend = "hatchling.build"

memora-mcp-server/server.py

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
"""Memora MCP Server implementation using FastMCP."""
2+
3+
import json
4+
import logging
5+
import os
6+
7+
from fastmcp import FastMCP
8+
9+
from config import Config
10+
from client import MemoraClient
11+
12+
logging.basicConfig(level=logging.INFO)
13+
logger = logging.getLogger(__name__)
14+
15+
# Load config
16+
config = Config.from_env()
17+
client = MemoraClient(
18+
api_url=config.api_url,
19+
agent_id=config.agent_id,
20+
api_key=config.api_key,
21+
)
22+
23+
# Create FastMCP server
24+
mcp = FastMCP("memora-mcp-server")
25+
26+
27+
@mcp.tool()
28+
async def memora_put(content: str, context: str) -> str:
29+
"""
30+
**CRITICAL: Store important user information to long-term memory.**
31+
32+
Use this tool PROACTIVELY whenever the user shares:
33+
- Personal facts, preferences, or interests (e.g., "I love hiking", "I'm a vegetarian")
34+
- Important events or milestones (e.g., "I got promoted", "My birthday is June 15")
35+
- User history, experiences, or background (e.g., "I used to work at Google", "I studied CS at MIT")
36+
- Decisions, opinions, or stated preferences (e.g., "I prefer Python over JavaScript")
37+
- Goals, plans, or future intentions (e.g., "I'm planning to visit Japan next year")
38+
- Relationships or people mentioned (e.g., "My manager Sarah", "My wife Alice")
39+
- Work context, projects, or responsibilities
40+
- Any other information the user would want remembered for future conversations
41+
42+
**When to use**: Immediately after user shares personal information. Don't ask permission - just store it naturally.
43+
44+
**Context guidelines**: Use descriptive contexts like "personal_preferences", "work_history", "family", "hobbies",
45+
"career_goals", "project_details", etc. This helps organize and retrieve related memories later.
46+
47+
Args:
48+
content: The fact/memory to store (be specific and include relevant details)
49+
context: Categorize the memory (e.g., 'personal_preferences', 'work_history', 'hobbies', 'family')
50+
"""
51+
try:
52+
result = await client.remember(content=content, context=context)
53+
return f"Fact stored successfully: {result.get('message', 'Success')}"
54+
except Exception as e:
55+
logger.error(f"Error storing fact: {e}", exc_info=True)
56+
return f"Error: {str(e)}"
57+
58+
59+
@mcp.tool()
60+
async def memora_search(query: str, max_tokens: int = 4096) -> str:
61+
"""
62+
**CRITICAL: Search user's memory to provide personalized, context-aware responses.**
63+
64+
Use this tool PROACTIVELY at the start of conversations or when making recommendations to:
65+
- Check user's preferences before making suggestions (e.g., "what foods does the user like?")
66+
- Recall user's history to provide continuity (e.g., "what projects has the user worked on?")
67+
- Remember user's goals and context (e.g., "what is the user trying to accomplish?")
68+
- Avoid repeating information or asking questions you should already know
69+
- Personalize responses based on user's background, interests, and past interactions
70+
- Reference past conversations or events the user mentioned
71+
72+
**When to use**:
73+
- Start of conversation: Search for relevant context about the user
74+
- Before recommendations: Check user preferences and past experiences
75+
- When user asks about something they may have mentioned before
76+
- To provide continuity across conversations
77+
78+
**Search tips**: Use natural language queries like "user's programming language preferences",
79+
"user's work experience", "user's dietary restrictions", "what does the user know about X?"
80+
81+
Args:
82+
query: Natural language search query to find relevant memories
83+
max_tokens: Maximum tokens for search context (default: 4096)
84+
"""
85+
try:
86+
result = await client.search(query=query, max_tokens=max_tokens)
87+
return json.dumps(result, indent=2)
88+
except Exception as e:
89+
logger.error(f"Error searching: {e}", exc_info=True)
90+
return f"Error: {str(e)}"
91+
92+
93+
def main():
94+
"""Main entry point."""
95+
port = int(os.getenv("PORT", "8765"))
96+
host = os.getenv("HOST", "127.0.0.1")
97+
98+
logger.info(f"Starting Memora MCP Server for agent: {config.agent_id}")
99+
logger.info(f"MCP server starting on http://{host}:{port}")
100+
101+
mcp.run(transport="sse", host=host, port=port)
102+
103+
104+
if __name__ == "__main__":
105+
main()

memora/pyproject.toml

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,6 @@ dependencies = [
1515
"pydantic>=2.0.0",
1616
"rich>=13.0.0",
1717
"sentence-transformers>=2.2.0",
18-
"pytest>=7.0.0",
19-
"pytest-asyncio>=0.21.0",
2018
"langchain-text-splitters>=0.3.0",
2119
"fastapi[standard]>=0.120.3",
2220
"uvicorn>=0.38.0",
@@ -25,12 +23,18 @@ dependencies = [
2523
"pgvector>=0.4.1",
2624
"greenlet>=3.2.4",
2725
"psycopg2-binary>=2.9.11",
28-
"pytest-timeout>=2.4.0",
2926
"dateparser>=1.2.0",
3027
"tiktoken>=0.12.0",
3128
"httpx>=0.27.0",
3229
]
3330

31+
[project.optional-dependencies]
32+
test = [
33+
"pytest>=7.0.0",
34+
"pytest-asyncio>=0.21.0",
35+
"pytest-timeout>=2.4.0",
36+
]
37+
3438
[tool.hatch.build.targets.wheel]
3539
packages = ["memora"]
3640

0 commit comments

Comments
 (0)