|
| 1 | +"""Memora MCP Server implementation using FastMCP.""" |
| 2 | + |
| 3 | +import json |
| 4 | +import logging |
| 5 | +import os |
| 6 | + |
| 7 | +from fastmcp import FastMCP |
| 8 | + |
| 9 | +from config import Config |
| 10 | +from client import MemoraClient |
| 11 | + |
| 12 | +logging.basicConfig(level=logging.INFO) |
| 13 | +logger = logging.getLogger(__name__) |
| 14 | + |
| 15 | +# Load config |
| 16 | +config = Config.from_env() |
| 17 | +client = MemoraClient( |
| 18 | + api_url=config.api_url, |
| 19 | + agent_id=config.agent_id, |
| 20 | + api_key=config.api_key, |
| 21 | +) |
| 22 | + |
| 23 | +# Create FastMCP server |
| 24 | +mcp = FastMCP("memora-mcp-server") |
| 25 | + |
| 26 | + |
| 27 | +@mcp.tool() |
| 28 | +async def memora_put(content: str, context: str) -> str: |
| 29 | + """ |
| 30 | + **CRITICAL: Store important user information to long-term memory.** |
| 31 | +
|
| 32 | + Use this tool PROACTIVELY whenever the user shares: |
| 33 | + - Personal facts, preferences, or interests (e.g., "I love hiking", "I'm a vegetarian") |
| 34 | + - Important events or milestones (e.g., "I got promoted", "My birthday is June 15") |
| 35 | + - User history, experiences, or background (e.g., "I used to work at Google", "I studied CS at MIT") |
| 36 | + - Decisions, opinions, or stated preferences (e.g., "I prefer Python over JavaScript") |
| 37 | + - Goals, plans, or future intentions (e.g., "I'm planning to visit Japan next year") |
| 38 | + - Relationships or people mentioned (e.g., "My manager Sarah", "My wife Alice") |
| 39 | + - Work context, projects, or responsibilities |
| 40 | + - Any other information the user would want remembered for future conversations |
| 41 | +
|
| 42 | + **When to use**: Immediately after user shares personal information. Don't ask permission - just store it naturally. |
| 43 | +
|
| 44 | + **Context guidelines**: Use descriptive contexts like "personal_preferences", "work_history", "family", "hobbies", |
| 45 | + "career_goals", "project_details", etc. This helps organize and retrieve related memories later. |
| 46 | +
|
| 47 | + Args: |
| 48 | + content: The fact/memory to store (be specific and include relevant details) |
| 49 | + context: Categorize the memory (e.g., 'personal_preferences', 'work_history', 'hobbies', 'family') |
| 50 | + """ |
| 51 | + try: |
| 52 | + result = await client.remember(content=content, context=context) |
| 53 | + return f"Fact stored successfully: {result.get('message', 'Success')}" |
| 54 | + except Exception as e: |
| 55 | + logger.error(f"Error storing fact: {e}", exc_info=True) |
| 56 | + return f"Error: {str(e)}" |
| 57 | + |
| 58 | + |
| 59 | +@mcp.tool() |
| 60 | +async def memora_search(query: str, max_tokens: int = 4096) -> str: |
| 61 | + """ |
| 62 | + **CRITICAL: Search user's memory to provide personalized, context-aware responses.** |
| 63 | +
|
| 64 | + Use this tool PROACTIVELY at the start of conversations or when making recommendations to: |
| 65 | + - Check user's preferences before making suggestions (e.g., "what foods does the user like?") |
| 66 | + - Recall user's history to provide continuity (e.g., "what projects has the user worked on?") |
| 67 | + - Remember user's goals and context (e.g., "what is the user trying to accomplish?") |
| 68 | + - Avoid repeating information or asking questions you should already know |
| 69 | + - Personalize responses based on user's background, interests, and past interactions |
| 70 | + - Reference past conversations or events the user mentioned |
| 71 | +
|
| 72 | + **When to use**: |
| 73 | + - Start of conversation: Search for relevant context about the user |
| 74 | + - Before recommendations: Check user preferences and past experiences |
| 75 | + - When user asks about something they may have mentioned before |
| 76 | + - To provide continuity across conversations |
| 77 | +
|
| 78 | + **Search tips**: Use natural language queries like "user's programming language preferences", |
| 79 | + "user's work experience", "user's dietary restrictions", "what does the user know about X?" |
| 80 | +
|
| 81 | + Args: |
| 82 | + query: Natural language search query to find relevant memories |
| 83 | + max_tokens: Maximum tokens for search context (default: 4096) |
| 84 | + """ |
| 85 | + try: |
| 86 | + result = await client.search(query=query, max_tokens=max_tokens) |
| 87 | + return json.dumps(result, indent=2) |
| 88 | + except Exception as e: |
| 89 | + logger.error(f"Error searching: {e}", exc_info=True) |
| 90 | + return f"Error: {str(e)}" |
| 91 | + |
| 92 | + |
| 93 | +def main(): |
| 94 | + """Main entry point.""" |
| 95 | + port = int(os.getenv("PORT", "8765")) |
| 96 | + host = os.getenv("HOST", "127.0.0.1") |
| 97 | + |
| 98 | + logger.info(f"Starting Memora MCP Server for agent: {config.agent_id}") |
| 99 | + logger.info(f"MCP server starting on http://{host}:{port}") |
| 100 | + |
| 101 | + mcp.run(transport="sse", host=host, port=port) |
| 102 | + |
| 103 | + |
| 104 | +if __name__ == "__main__": |
| 105 | + main() |
0 commit comments