Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
275 changes: 79 additions & 196 deletions examples/nanda_agent.py
Original file line number Diff line number Diff line change
@@ -1,273 +1,156 @@
# examples/nanda_agent.py
#!/usr/bin/env python3
"""
LLM-Powered Modular NANDA Agent
LLM-Powered NANDA Agent

This agent uses Anthropic Claude for intelligent responses based on configurable personality and expertise.
Simply update the AGENT_CONFIG section to create different agent personalities.
Configurable agent using Anthropic Claude for intelligent responses.
Customize via environment variables or the config dict.
"""
import os
import sys
import time
import asyncio
import uuid
from datetime import datetime
from typing import Dict, List, Any

# Add the parent directory to the path to allow importing streamlined_adapter
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

from nanda_core.core.adapter import NANDA

# Try to import Anthropic - will fail gracefully if not available
from dotenv import load_dotenv
load_dotenv()

try:
from anthropic import Anthropic
ANTHROPIC_AVAILABLE = True
except ImportError:
ANTHROPIC_AVAILABLE = False
print("⚠️ Warning: anthropic library not available. Install with: pip install anthropic")
print("⚠️ anthropic library not available. Install with: pip install anthropic")

# =============================================================================
# AGENT CONFIGURATION - Customize this section for different agents
# CONFIGURATION
# =============================================================================

# Get configuration from environment variables or use defaults
def get_agent_config():
"""Load agent configuration from environment variables or use defaults"""
def get_config():
"""Load configuration from environment variables"""
base_id = os.getenv("AGENT_ID", "helpful-agent")
agent_id = f"{base_id}-{uuid.uuid4().hex[:6]}" if '-' not in base_id else base_id

# Generate agent_id with hex suffix for uniqueness
base_agent_id = os.getenv("AGENT_ID", "helpful-ubuntu-agent")
if not base_agent_id.endswith('-') and '-' not in base_agent_id.split('-')[-1]:
# Add 6-character hex suffix if not already present
hex_suffix = uuid.uuid4().hex[:6]
agent_id = f"{base_agent_id}-{hex_suffix}"
else:
agent_id = base_agent_id
capabilities = os.getenv("AGENT_CAPABILITIES", "general assistance,conversation")
capabilities_list = [cap.strip() for cap in capabilities.split(",")]

print(f"Generated agent_id: {agent_id}")
agent_name = os.getenv("AGENT_NAME", "Ubuntu Helper")
agent_name = os.getenv("AGENT_NAME", "Helper Agent")
domain = os.getenv("AGENT_DOMAIN", "general assistance")
specialization = os.getenv("AGENT_SPECIALIZATION", "helpful and friendly AI assistant")
description = os.getenv("AGENT_DESCRIPTION", "I am a helpful AI assistant specializing in general tasks and Ubuntu system administration.")
capabilities = os.getenv("AGENT_CAPABILITIES", "general assistance,Ubuntu system administration,Python development,cloud deployment,agent-to-agent communication")
registry_url = os.getenv("REGISTRY_URL", None)
public_url = os.getenv("PUBLIC_URL", None)

# Parse capabilities into a list
expertise_list = [cap.strip() for cap in capabilities.split(",")]
specialization = os.getenv("AGENT_SPECIALIZATION", "helpful AI assistant")
description = os.getenv("AGENT_DESCRIPTION", "I'm a helpful AI assistant.")

# Create dynamic system prompt based on configuration
system_prompt = f"""You are {agent_name}, a {specialization} working in the domain of {domain}.
system_prompt = f"""You are {agent_name}, a {specialization} in {domain}.

{description}

You are part of the NANDA (Network of Autonomous Distributed Agents) system. You can communicate with other agents and help users with various tasks.
You are part of the NANDA agent network and can communicate with other agents using @agent-id syntax.

Your capabilities include:
{chr(10).join([f"- {cap}" for cap in expertise_list])}
Your capabilities: {', '.join(capabilities_list)}

Always be helpful, accurate, and concise in your responses. If you're unsure about something, say so honestly. You can also help with basic calculations, provide time information, and engage in casual conversation.

When someone asks about yourself, mention that you're part of the NANDA agent network and can communicate with other agents using the @agent_name syntax."""
Be helpful, accurate, and concise."""

return {
"agent_id": agent_id,
"agent_name": agent_name,
"domain": domain,
"specialization": specialization,
"description": description,
"expertise": expertise_list,
"registry_url": registry_url,
"public_url": public_url,
"system_prompt": system_prompt,
"capabilities": capabilities_list,
"port": int(os.getenv("PORT", "6000")),
"registry_url": os.getenv("REGISTRY_URL"),
"public_url": os.getenv("PUBLIC_URL") or f"http://localhost:{os.getenv('PORT', '6000')}",
"anthropic_api_key": os.getenv("ANTHROPIC_API_KEY"),
"model": "claude-3-haiku-20240307" # Fast and cost-effective model
"model": os.getenv("ANTHROPIC_MODEL", "claude-3-haiku-20240307"),
"system_prompt": system_prompt
}

# Load configuration
AGENT_CONFIG = get_agent_config()

# Port configuration - use environment variable or default to 6000
PORT = int(os.getenv("PORT", "6000"))

# =============================================================================
# LLM-POWERED AGENT LOGIC - Uses Anthropic Claude for intelligent responses
# AGENT LOGIC
# =============================================================================

def create_llm_agent_logic(config: Dict[str, Any]):
"""
Creates an LLM-powered agent logic function based on the provided configuration.
Uses Anthropic Claude for intelligent, context-aware responses.
"""
def create_agent_logic(config):
"""Create agent logic function with LLM or fallback"""

# Initialize Anthropic client
anthropic_client = None
client = None
if ANTHROPIC_AVAILABLE and config.get("anthropic_api_key"):
try:
anthropic_client = Anthropic(api_key=config["anthropic_api_key"])
print(f"βœ… Anthropic Claude initialized for {config['agent_name']}")
client = Anthropic(api_key=config["anthropic_api_key"])
print(f"βœ… Claude initialized ({config['model']})")
except Exception as e:
print(f"❌ Failed to initialize Anthropic: {e}")
anthropic_client = None

# Prepare system prompt (already formatted in get_agent_config)
system_prompt = config["system_prompt"]
print(f"❌ Claude initialization failed: {e}")

def llm_agent_logic(message: str, conversation_id: str) -> str:
"""LLM-powered agent logic with fallback to basic responses"""
def agent_logic(message: str, conversation_id: str) -> str:
"""Process message with Claude or fallback"""

# If LLM is available, use it for intelligent responses
if anthropic_client:
if client:
try:
# Add current time context if time-related query
context_info = ""
if any(time_word in message.lower() for time_word in ['time', 'date', 'when']):
context_info = f"\n\nCurrent time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
# Add time context if relevant
context = ""
if any(word in message.lower() for word in ['time', 'date', 'when']):
context = f"\n\nCurrent time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"

response = anthropic_client.messages.create(
response = client.messages.create(
model=config["model"],
max_tokens=500,
system=system_prompt + context_info,
messages=[
{
"role": "user",
"content": message
}
]
system=config["system_prompt"] + context,
messages=[{"role": "user", "content": message}]
)

return response.content[0].text.strip()

except Exception as e:
print(f"❌ LLM Error: {e}")
# Fall back to basic response
return f"Sorry, I'm having trouble processing that right now. Error: {str(e)}"
return f"Error: {str(e)}"

# Fallback to basic responses if LLM not available
# Fallback responses
msg = message.lower().strip()
if any(greeting in msg for greeting in ['hello', 'hi', 'hey']):
return f"Hello! I'm {config['agent_name']}. Set ANTHROPIC_API_KEY for full capabilities."
elif 'time' in msg:
return f"Current time: {datetime.now().strftime('%H:%M:%S')}"
else:
return _basic_fallback_response(message, config)

return llm_agent_logic

def _basic_fallback_response(message: str, config: Dict[str, Any]) -> str:
"""Basic fallback responses when LLM is not available"""
msg = message.lower().strip()

# Handle greetings
if any(greeting in msg for greeting in ['hello', 'hi', 'hey']):
return f"Hello! I'm {config['agent_name']}, but I need an Anthropic API key to provide intelligent responses. Please set ANTHROPIC_API_KEY environment variable."
return f"I'm {config['agent_name']}. Set ANTHROPIC_API_KEY to enable LLM responses."

# Handle time requests
elif 'time' in msg:
current_time = datetime.now().strftime("%H:%M:%S")
return f"The current time is {current_time}."

# Handle basic calculations
elif any(op in message for op in ['+', '-', '*', '/', '=']):
try:
calculation = message.replace('x', '*').replace('X', '*').replace('=', '').strip()
result = eval(calculation)
return f"Calculation result: {calculation} = {result}"
except:
return "Sorry, I couldn't calculate that. Please check your expression."

# Default fallback
else:
return f"I'm {config['agent_name']}, but I need an Anthropic API key to provide intelligent responses. Please set ANTHROPIC_API_KEY environment variable and restart me."
return agent_logic

# =============================================================================
# MAIN EXECUTION
# MAIN
# =============================================================================

def main():
"""Main function to start the LLM-powered modular agent"""
print(f"πŸ€– Starting {AGENT_CONFIG['agent_name']}")
print(f"πŸ“ Specialization: {AGENT_CONFIG['specialization']}")
print(f"🎯 Domain: {AGENT_CONFIG['domain']}")
print(f"πŸ› οΈ Capabilities: {', '.join(AGENT_CONFIG['expertise'])}")
if AGENT_CONFIG['registry_url']:
print(f"🌐 Registry: {AGENT_CONFIG['registry_url']}")
async def main():
config = get_config()

# Check for Anthropic API key
if not AGENT_CONFIG.get("anthropic_api_key"):
print("⚠️ Warning: ANTHROPIC_API_KEY not found in environment variables")
print(" The agent will use basic fallback responses only")
print(" Set ANTHROPIC_API_KEY to enable LLM capabilities")
else:
print(f"🧠 LLM Model: {AGENT_CONFIG['model']}")
print(f"πŸ€– {config['agent_name']} ({config['agent_id']})")
print(f"🎯 {config['domain']} - {config['specialization']}")
print(f"πŸ”— {config['public_url']}/a2a")
if config['registry_url']:
print(f"🌐 Registry: {config['registry_url']}")

# Create the LLM-powered agent logic based on configuration
agent_logic = create_llm_agent_logic(AGENT_CONFIG)
agent_logic = create_agent_logic(config)

# Create and start the NANDA agent
nanda = NANDA(
agent_id=AGENT_CONFIG["agent_id"],
agent_id=config["agent_id"],
agent_logic=agent_logic,
port=PORT,
registry_url=AGENT_CONFIG["registry_url"],
public_url=AGENT_CONFIG["public_url"],
enable_telemetry=True
agent_name=config["agent_name"],
domain=config["domain"],
specialization=config["specialization"],
description=config["description"],
capabilities=config["capabilities"],
port=config["port"],
registry_url=config["registry_url"],
public_url=config["public_url"],
enable_telemetry=True,
protocols={"a2a": {"enabled": True}}
)

print(f"πŸš€ Agent URL: http://localhost:{PORT}/a2a")
print("πŸ’‘ Try these messages:")
print(" - 'Hello there'")
print(" - 'Tell me about yourself'")
print(" - 'What time is it?'")
print(" - 'How can you help with Ubuntu?'")
print(" - 'Explain Python virtual environments'")
print(" - '5 + 3'")
print("\nπŸ›‘ Press Ctrl+C to stop")

# Start the agent
nanda.start()

def create_custom_agent(agent_name, specialization, domain, expertise_list, port=6000, anthropic_api_key=None, registry_url=None):
"""
Helper function to quickly create a custom LLM-powered agent with different config

Example usage:
create_custom_agent(
agent_name="Data Scientist",
specialization="analytical and precise AI assistant",
domain="data science",
expertise_list=["data analysis", "statistics", "machine learning", "Python"],
port=6001,
anthropic_api_key="sk-ant-xxxxx"
)
"""
custom_config = AGENT_CONFIG.copy()
custom_config.update({
"agent_id": agent_name.lower().replace(" ", "-"),
"agent_name": agent_name,
"specialization": specialization,
"domain": domain,
"expertise": expertise_list,
"registry_url": registry_url,
"anthropic_api_key": anthropic_api_key or os.getenv("ANTHROPIC_API_KEY"),
"system_prompt": f"""You are {agent_name}, a {specialization} working in the domain of {domain}.

You are part of the NANDA (Network of Autonomous Distributed Agents) system. You can communicate with other agents and help users with various tasks.

Your capabilities include:
{chr(10).join([f"- {expertise}" for expertise in expertise_list])}

Always be helpful, accurate, and concise in your responses. If you're unsure about something, say so honestly.

When someone asks about yourself, mention that you're part of the NANDA agent network and can communicate with other agents using the @agent_name syntax."""
})

agent_logic = create_llm_agent_logic(custom_config)

nanda = NANDA(
agent_id=custom_config["agent_id"],
agent_logic=agent_logic,
port=port,
registry_url=custom_config["registry_url"],
enable_telemetry=True
)
print("\nπŸ’¬ Try: 'Hello', 'What time is it?', '@other-agent Hello!'")
print("πŸ›‘ Press Ctrl+C to stop\n")

print(f"πŸ€– Starting custom LLM agent: {agent_name}")
print(f"πŸš€ Agent URL: http://localhost:{port}/a2a")
nanda.start()
await nanda.start()

if __name__ == "__main__":
main()
asyncio.run(main())
4 changes: 2 additions & 2 deletions nanda_core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
"""

from .core.adapter import NANDA
from .core.agent_bridge import SimpleAgentBridge
from .core.agent_bridge import AgentBridge

__all__ = [
"NANDA",
"SimpleAgentBridge"
"AgentBridge"
]
4 changes: 2 additions & 2 deletions nanda_core/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
"""

from .adapter import NANDA
from .agent_bridge import SimpleAgentBridge
from .agent_bridge import AgentBridge

__all__ = [
"NANDA",
"SimpleAgentBridge"
"AgentBridge"
]
Loading