diff --git a/Makefile b/Makefile index db10e0cb..21d1a3a3 100644 --- a/Makefile +++ b/Makefile @@ -1,50 +1,49 @@ -.ONESHELL: -.SHELLFLAGS := -c - -.PHONY: run -.PHONY: uninstall -.PHONY: install -.PHONY: install-pre-commit -.PHONY: build -.PHONY: deploy - +.PHONY: setup run deploy stop install uninstall build install-pre-commit +# Conda detection detect_conda_bin := $(shell bash -c 'if [ "${CONDA_EXE} " == " " ]; then \ CONDA_EXE=$$((find /opt/conda/bin/conda || find ~/anaconda3/bin/conda || \ find /usr/local/anaconda3/bin/conda || find ~/miniconda3/bin/conda || \ find /root/miniconda/bin/conda || find ~/Anaconda3/Scripts/conda || \ find $$CONDA/bin/conda) 2>/dev/null); fi; \ - if [ "${CONDA_EXE}_" == "_" ]; then \ - echo "Please install Anaconda w/ Python 3.10+ first"; \ - echo "See: https://www.anaconda.com/distribution/"; \ - exit 1; fi; \ echo $$(dirname $${CONDA_EXE})') - CONDA_BIN := $(detect_conda_bin) +# Setup - create .env file +setup: + @./setup.sh + +# Run locally (dev mode) run: + docker compose up emqx postgres -d uvicorn main:app --reload -uninstall: - conda env remove -n hummingbot-api -y +# Deploy with Docker +deploy: + docker compose up -d +# Stop all services +stop: + docker compose down + +# Install conda environment install: - if conda env list | grep -q '^hummingbot-api '; then \ + @if conda env list | grep -q '^hummingbot-api '; then \ echo "Environment already exists."; \ else \ conda env create -f environment.yml; \ fi - conda activate hummingbot-api $(MAKE) install-pre-commit +uninstall: + conda env remove -n hummingbot-api -y + install-pre-commit: - /bin/bash -c 'source "${CONDA_BIN}/activate" hummingbot-api && \ + @/bin/bash -c 'source "${CONDA_BIN}/activate" hummingbot-api && \ if ! conda list pre-commit | grep pre-commit &> /dev/null; then \ pip install pre-commit; \ fi && pre-commit install' +# Build Docker image build: docker build -t hummingbot/hummingbot-api:latest . - -deploy: - docker compose up -d diff --git a/README.md b/README.md index 7bd41373..9486a201 100644 --- a/README.md +++ b/README.md @@ -1,176 +1,63 @@ # Hummingbot API -**The central hub for running Hummingbot trading bots - now with AI assistant integration via MCP (Model Context Protocol).** +A REST API for managing Hummingbot trading bots across multiple exchanges, with AI assistant integration via MCP. -A comprehensive RESTful API framework for managing trading operations across multiple exchanges. The Hummingbot API provides a centralized platform to aggregate all your trading functionalities, from basic account management to sophisticated automated trading strategies. - -## ๐Ÿš€ Quick Start - -Run the setup script to deploy the Hummingbot API platform: +## Quick Start ```bash git clone https://github.com/hummingbot/hummingbot-api.git cd hummingbot-api -chmod +x setup.sh -./setup.sh +make setup # Creates .env (prompts for passwords) +make deploy # Starts all services ``` -### Setup Process - -The script will prompt you for: - -1. **Credentials** (required): - - Config password (for encrypting bot credentials) - - API username and password - -2. **Optional Services**: - - **Dashboard**: For web-based visual interface - -3. **Gateway**: Optional passphrase for DEX trading - -### What Gets Installed - -**Core services** (always installed): -- โœ… **Hummingbot API** (port 8000) - REST API backend -- โœ… **PostgreSQL** - Database for trading data -- โœ… **EMQX** - Message broker for real-time communication -- โœ… **Swagger UI** (port 8000/docs) - API documentation - -**Optional service** (enable during setup): -- ๐Ÿ“Š **Dashboard** (port 8501) - Web interface - -**Note**: MCP (AI assistant integration) is configured separately - see below - -### After Setup - -**1. Access Swagger UI (Default)** - -The API documentation is immediately available: -- URL: http://localhost:8000/docs -- Use the username/password you configured -- Test all API endpoints directly +That's it! The API is now running at http://localhost:8000 -**2. Connect AI Assistant (Optional)** +## Available Commands -To connect an AI assistant via MCP: +| Command | Description | +|---------|-------------| +| `make setup` | Create `.env` file with configuration | +| `make deploy` | Start all services (API, PostgreSQL, EMQX) | +| `make stop` | Stop all services | +| `make run` | Run API locally in dev mode | +| `make install` | Install conda environment for development | +| `make build` | Build Docker image | -**Claude Desktop:** -1. Install from [https://claude.ai/download](https://claude.ai/download) -2. Add to your config file: - - **macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json` - - **Windows**: `%APPDATA%\Claude\claude_desktop_config.json` +## Services - ```json - { - "mcpServers": { - "hummingbot": { - "command": "docker", - "args": ["run", "--rm", "-i", "-e", "HUMMINGBOT_API_URL=http://host.docker.internal:8000", "-v", "hummingbot_mcp:/root/.hummingbot_mcp", "hummingbot/hummingbot-mcp:latest"] - } - } - } - ``` -3. Restart Claude Desktop -4. **First-time setup - Add exchange credentials:** - - "Set up my Solana wallet" โ†’ Uses `setup_connector` tool for progressive credential setup - - Or for CEX: "Set up my Binance account" โ†’ Guides you through API key setup -5. **Try trading operations:** - - "What's the current price for swapping SOL to USDC?" - - "Execute a swap: sell 0.01 SOL for USDC with 1% slippage" +After `make deploy`, these services are available: -**3. Access Dashboard (If Enabled)** +| Service | URL | Description | +|---------|-----|-------------| +| **API** | http://localhost:8000 | REST API | +| **Swagger UI** | http://localhost:8000/docs | Interactive API documentation | +| **PostgreSQL** | localhost:5432 | Database | +| **EMQX** | localhost:1883 | MQTT broker | +| **EMQX Dashboard** | http://localhost:18083 | Broker admin (admin/public) | -If you enabled Dashboard during setup: -- URL: http://localhost:8501 -- Use the same username/password from setup +## Connect AI Assistant (MCP) -## What is Hummingbot API? +### Claude Code (CLI) -The Hummingbot API is designed to be your central hub for trading operations, offering: - -- **๐Ÿค– AI Assistant Integration**: Control your trading with natural language via MCP (Claude, ChatGPT, Gemini) -- **Multi-Exchange Account Management**: Create and manage multiple trading accounts across different exchanges -- **Portfolio Monitoring**: Real-time balance tracking and portfolio distribution analysis -- **Trade Execution**: Execute trades, manage orders, and monitor positions across all your accounts -- **Automated Trading**: Deploy and control Hummingbot instances with automated strategies -- **Strategy Management**: Add, configure, and manage trading strategies in real-time -- **Complete Flexibility**: Build any trading product on top of this robust API framework - -## ๐ŸŽฏ Ways to Interact with Hummingbot API - -Choose the method that best fits your workflow: - -### 1. ๐Ÿ”ง Swagger UI - API Documentation (Default) -**Interactive REST API documentation and testing** +```bash +claude mcp add --transport stdio hummingbot -- \ + docker run --rm -i \ + -e HUMMINGBOT_API_URL=http://host.docker.internal:8000 \ + -v hummingbot_mcp:/root/.hummingbot_mcp \ + hummingbot/hummingbot-mcp:latest +``` -- **Best for**: Developers and power users who want full control -- **Advantages**: - - Complete API access - all endpoints available - - Direct endpoint testing - - Integration development - - No additional setup required -- **Setup**: Automatically available after running setup -- **Access**: http://localhost:8000/docs +Then use natural language: +- "Show my portfolio balances" +- "Set up my Binance account" +- "Create a market making strategy for ETH-USDT" -### 2. ๐Ÿค– MCP - AI Assistant (Optional) -**Natural language trading commands through Claude, ChatGPT, or Gemini** +### Claude Desktop -- **Best for**: Users who prefer conversational interaction -- **Advantages**: - - Natural language commands - - Full access to all API features - - Contextual help and explanations - - Complex multi-step operations made simple - - Progressive credential setup with `setup_connector` tool -- **Setup**: Answer "y" when prompted during setup, then connect your AI assistant -- **Examples**: - - First-time: "Set up my Solana wallet" โ†’ Guides through credential setup - - Trading: "What's the price to swap 0.01 SOL for USDC? Execute the trade" - -### 3. ๐Ÿ“Š Dashboard - Web Interface (Optional) -**Visual interface for common operations** - -- **Best for**: Users who prefer graphical interfaces -- **Advantages**: - - Intuitive visual workflows - - Real-time charts and graphs - - Quick access to common tasks -- **Limitations**: Not all API functions available (focused on core features) -- **Setup**: Answer "y" when prompted during setup -- **Access**: http://localhost:8501 - -Whether you're building a trading dashboard, implementing algorithmic strategies, or creating a comprehensive trading platform, the Hummingbot API provides all the tools you need. - -## ๐Ÿ”Œ Setting Up MCP with Claude Code - -If you're using Claude Code (the CLI tool), you can connect to the Hummingbot MCP server directly from your development environment. - -### Quick Setup - -1. **Enable MCP during setup** (if not already done): - ```bash - ./setup.sh # Answer "y" to "Enable MCP server for AI assistant usage?" - ``` - -2. **Add the MCP server to Claude Code**: - ```bash - claude mcp add --transport stdio hummingbot -- docker run --rm -i -e HUMMINGBOT_API_URL=http://host.docker.internal:8000 -v hummingbot_mcp:/root/.hummingbot_mcp hummingbot/hummingbot-mcp:latest - ``` - - This configures Claude Code to communicate with the Hummingbot MCP server. - -3. **Start using Hummingbot in Claude Code**: - - Open your terminal with Claude Code - - Use natural language commands to interact with your trading operations: - ``` - "What are my current portfolio balances?" - "Show me active trading bots" - "Create a new market making strategy for ETH-USDT" - ``` - -### Configuration File - -The command above automatically creates/updates `.mcp.json` in your project root: +Add to your config file: +- **macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json` +- **Windows**: `%APPDATA%\Claude\claude_desktop_config.json` ```json { @@ -183,646 +70,69 @@ The command above automatically creates/updates `.mcp.json` in your project root } ``` -### Managing the Connection - -**List configured MCP servers:** -```bash -claude mcp list -``` - -**View server details:** -```bash -claude mcp get hummingbot -``` - -**Remove the server:** -```bash -claude mcp remove hummingbot -``` - -### Prerequisites - -- Claude Code CLI installed (see [Claude Code documentation](https://docs.claude.com/en/docs/claude-code)) -- MCP service enabled during Hummingbot API setup -- Docker running with `hummingbot-mcp` container active - -### Verify Setup - -Check that the MCP container is running: -```bash -docker ps | grep hummingbot-mcp -``` - -If the container isn't running, re-run setup with MCP enabled: -```bash -./setup.sh # Answer "y" to MCP prompt -``` - -## ๐ŸŒ Gateway Setup (For DEX Trading) - -Gateway is required for decentralized exchange (DEX) trading. The Hummingbot API can manage Gateway containers for you - no separate installation needed! - -### Option 1: Using Swagger UI (API) - -1. **Access Swagger UI**: http://localhost:8000/docs -2. **Navigate to Gateway endpoints**: Look for `/manage-gateway` or similar endpoints -3. **Start Gateway**: - ```json - POST /manage-gateway - { - "action": "start", - "passphrase": "your-secure-passphrase", - "dev_mode": true - } - ``` - -The API automatically handles OS-specific networking: -- **macOS/Windows**: Uses `host.docker.internal` to connect to the API -- **Linux**: Uses appropriate network configuration - -### Option 2: Using MCP AI Assistant - -If you enabled MCP during setup, you can manage Gateway with natural language: - -**Example commands:** -- "Start Gateway in development mode with passphrase 'admin'" -- "Check Gateway status" -- "Stop the Gateway container" -- "Restart Gateway with a new passphrase" - -The `manage_gateway_container` MCP tool will: -- Pull the Gateway Docker image if needed -- Start the container with proper configuration -- Configure networking based on your OS -- Report Gateway status and connection info - -### Verify Gateway is Running - -**Check container status:** -```bash -docker ps | grep gateway -``` - -**View Gateway logs:** -```bash -docker logs gateway -f -``` - -**Test Gateway API** (dev mode only): -```bash -curl http://localhost:15888/ -``` - -### Gateway Access - -Once running, Gateway will be available at: -- **Development mode**: `http://localhost:15888` -- **Production mode**: `https://localhost:15888` (requires certificates) -- **API Documentation**: `http://localhost:15888/docs` (dev mode only) - -### Troubleshooting - -**Gateway won't start:** -- Ensure Docker is running -- Check if port 15888 is available -- Review logs: `docker logs gateway` - -**Multiple Gateway containers running:** -If you have multiple Gateway containers (e.g., from previous setups), you may experience connection issues or unexpected behavior. - -```bash -# Check for multiple Gateway containers -docker ps -a | grep gateway - -# If you see multiple containers, stop and remove old ones -docker stop gateway-old-name -docker rm gateway-old-name - -# Keep only the one you want to use -# The Hummingbot API expects the container to be named 'gateway' -docker rename your-container-name gateway -``` - -**Connection issues:** -- Verify Gateway URL in your `.env` file and `docker-compose.yml` -- The API uses `GATEWAY_URL=http://host.docker.internal:15888` (configured in docker-compose.yml) -- Ensure Gateway container is on the same Docker network: `docker network inspect hummingbot-api_emqx-bridge` -- macOS/Windows users: `host.docker.internal` should work automatically -- Linux users: Check that `extra_hosts` is properly configured in docker-compose.yml - -## ๐Ÿณ Docker Compose Architecture - -The Hummingbot API uses Docker Compose to orchestrate multiple services into a complete trading platform: - -### Services Overview - -```yaml -services: - # dashboard: # Optional - Web UI (enable during setup or uncomment manually) - hummingbot-api: # Core FastAPI backend (port 8000) - Always installed - emqx: # MQTT message broker (port 1883) - Always installed - postgres: # PostgreSQL database (port 5432) - Always installed -``` - -### Network Configuration - -All services communicate via the `emqx-bridge` Docker network: -- **Internal communication**: Services reference each other by container name (e.g., `hummingbot-api:8000`) -- **External access**: Exposed ports allow access from your host machine - -### Environment Variables - -The setup script creates a `.env` file with all necessary configuration: - -```bash -# Security -USERNAME=admin # API authentication username -PASSWORD=admin # API authentication password -CONFIG_PASSWORD=admin # Bot credentials encryption key - -# Services (auto-configured) -BROKER_HOST=emqx -DATABASE_URL=postgresql+asyncpg://hbot:hummingbot-api@postgres:5432/hummingbot_api -``` - -### Persistent Storage - -Docker volumes ensure data persistence: -- `postgres-data`: Trading data and bot performance -- `emqx-data`, `emqx-log`, `emqx-etc`: Message broker state - -## System Dependencies - -The platform includes these essential services: - -### 1. PostgreSQL Database -Stores all trading data including: -- Orders and trade history -- Account states and balances -- Positions and funding payments -- Performance metrics - -**Note:** The database is automatically initialized using environment variables. The included `init-db.sql` serves as a safety net. - -### 2. EMQX Message Broker -Enables real-time communication with trading bots: -- Receives live updates from running bots -- Sends commands to control bot execution -- Handles real-time data streaming +Restart Claude Desktop after adding. -## Installation & Setup +## Gateway (DEX Trading) -### Prerequisites -- Docker and Docker Compose installed -- Git for cloning the repository +Gateway enables decentralized exchange trading. Start it via MCP: -### Quick Start +> "Start Gateway in development mode with passphrase 'admin'" -1. **Clone the repository** - ```bash - git clone https://github.com/hummingbot/hummingbot-api.git - cd hummingbot-api - ``` +Or via API at http://localhost:8000/docs using the Gateway endpoints. -2. **Make setup script executable and run it** - ```bash - chmod +x setup.sh - ./setup.sh - ``` - -3. **Configure your environment** - During setup, you'll configure several important variables: - - - **Config Password**: Used to encrypt and hash API keys and credentials for security - - **Username & Password**: Basic authentication credentials for API access (used by dashboards and other systems) - - **Additional configurations**: Available in the `.env` file including: - - Broker configuration (EMQX settings) - - Database URL - - Market data cleanup settings - - AWS S3 configuration (experimental) - - Banned tokens list (for delisted tokens) - -4. **Set up monitoring (Production recommended)** - For production deployments, add observability through Logfire: - ```bash - export LOGFIRE_TOKEN=your_token_here - ``` - Learn more: [Logfire Documentation](https://logfire.pydantic.dev/docs/) - -After running `setup.sh`, the required Docker images (EMQX, PostgreSQL, and Hummingbot) will be running and ready. - -## Running the API - -You have two deployment options depending on your use case: - -### For Users (Production/Simple Deployment) -```bash -./run.sh -``` -This runs the API in a Docker container - simple and isolated. - -### For Developers (Development Environment) -1. **Install Conda** (if not already installed) -2. **Set up the development environment** - ```bash - make install - ``` - This creates a Conda environment with all dependencies. - -3. **Run in development mode** - ```bash - ./run.sh --dev - ``` - This starts the API from source with hot-reloading enabled. - -## ๐Ÿค– MCP AI Assistant Integration - -### Claude Desktop (Recommended) - -1. **Install Claude Desktop** - - Download from [https://claude.ai/download](https://claude.ai/download) - -2. **Configure the MCP Server** - - Open (or create) your Claude Desktop config file: - - **macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json` - - **Windows**: `%APPDATA%\Claude\claude_desktop_config.json` - -3. **Add the Hummingbot MCP configuration:** - ```json - { - "mcpServers": { - "hummingbot": { - "command": "docker", - "args": ["run", "--rm", "-i", "-e", "HUMMINGBOT_API_URL=http://host.docker.internal:8000", "-v", "hummingbot_mcp:/root/.hummingbot_mcp", "hummingbot/hummingbot-mcp:latest"] - } - } - } - ``` - -4. **Restart Claude Desktop** - -5. **Start using Hummingbot with natural language:** - - **First-time setup**: "Set up my Solana wallet" โ†’ Progressive credential setup with `setup_connector` - - **Trading**: "What's the current price to swap 0.01 SOL for USDC? Execute the trade" - - **Portfolio**: "What are my current portfolio balances across all exchanges?" - - **Gateway**: "Start Gateway in development mode with passphrase 'admin'" - - **Strategies**: "Create a PMM strategy for ETH-USDT on Binance" - -### ChatGPT / OpenAI - -1. **Install the OpenAI CLI** (if available in your region) - - Follow OpenAI's official MCP setup guide - -2. **Configure the MCP server** similar to Claude Desktop: - ```json - { - "mcpServers": { - "hummingbot": { - "command": "docker", - "args": ["run", "--rm", "-i", "-e", "HUMMINGBOT_API_URL=http://host.docker.internal:8000", "-v", "hummingbot_mcp:/root/.hummingbot_mcp", "hummingbot/hummingbot-mcp:latest"] - } - } - } - ``` - -### Google Gemini - -1. **Install Gemini CLI** (if available) - - Refer to Google's MCP integration documentation - -2. **Add Hummingbot MCP server** to your Gemini configuration - -### Available MCP Capabilities - -Once connected, your AI assistant can: -- ๐Ÿ“Š **Portfolio Management**: View balances, positions, and P&L across exchanges -- ๐Ÿ“ˆ **Market Data**: Get real-time prices, orderbook depth, and funding rates -- ๐Ÿค– **Bot Control**: Create, start, stop, and monitor trading bots -- ๐Ÿ“‹ **Order Management**: Place, cancel, and track orders -- ๐Ÿ” **Performance Analytics**: Analyze trading performance and statistics -- โš™๏ธ **Strategy Configuration**: Create and modify trading strategies -- ๐ŸŒ **Gateway Management**: Start, stop, and configure the Gateway container for DEX trading - -## Getting Started (Alternative Methods) - -Once the API is running, you can also access it directly: - -### Option 1: Web Dashboard -1. **Access the Dashboard**: Go to `http://localhost:8501` -2. **Login**: Use the username and password you configured during setup -3. **Explore**: Navigate through the visual interface - -### Option 2: Swagger UI (API Documentation) -1. **Visit the API Documentation**: Go to `http://localhost:8000/docs` -2. **Authenticate**: Use the username and password you configured during setup -3. **Test endpoints**: Use the Swagger interface to test API functionality - -## API Overview - -The Hummingbot API is organized into several functional routers: - -### ๐Ÿณ Docker Management (`/docker`) -- Check Docker daemon status and health -- Pull new Docker images with async support -- Start, stop, and remove containers -- Monitor active and exited containers -- Clean up exited containers -- Archive container data locally or to S3 -- Track image pull status and progress - -### ๐Ÿ’ณ Account Management (`/accounts`) -- Create and delete trading accounts -- Add/remove exchange credentials -- List available credentials per account -- Basic account configuration - -### ๐Ÿ”Œ Connector Discovery (`/connectors`) -**Provides exchange connector information and configuration** -- List available exchange connectors -- Get connector configuration requirements -- Retrieve trading rules and constraints -- Query supported order types per connector - -### ๐Ÿ“Š Portfolio Management (`/portfolio`) -**Centralized portfolio tracking and analytics** -- **Real-time Portfolio State**: Current balances across all accounts -- **Portfolio History**: Time-series data with cursor-based pagination -- **Token Distribution**: Aggregate holdings by token across exchanges -- **Account Distribution**: Percentage-based portfolio allocation analysis -- **Advanced Filtering**: Filter by account names and connectors - -### ๐Ÿ’น Trading Operations (`/trading`) -**Enhanced with POST-based filtering and comprehensive order/trade management** -- **Order Placement**: Execute trades with advanced order types -- **Order Cancellation**: Cancel specific orders by ID -- **Position Tracking**: Real-time perpetual positions with PnL data -- **Active Orders**: Live order monitoring from connector in-flight orders -- **Order History**: Paginated historical orders with advanced filtering -- **Trade History**: Complete execution records with filtering -- **Funding Payments**: Historical funding payment tracking for perpetuals -- **Position Modes**: Configure HEDGE/ONEWAY modes for perpetual trading -- **Leverage Management**: Set and adjust leverage per trading pair - -### ๐Ÿค– Bot Orchestration (`/bot-orchestration`) -- Monitor bot status and MQTT connectivity -- Deploy V2 scripts and controllers -- Start/stop bots with configurable parameters -- Stop and archive bots with background task support -- Retrieve bot performance history -- Real-time bot status monitoring - -### ๐Ÿ“‹ Strategy Management -- **Controllers** (`/controllers`): Manage V2 strategy controllers - - CRUD operations on controller files - - Controller configuration management - - Bot-specific controller configurations - - Template retrieval for new configs -- **Scripts** (`/scripts`): Handle traditional Hummingbot scripts - - CRUD operations on script files - - Script configuration management - - Configuration templates - -### ๐Ÿ“Š Market Data (`/market-data`) -**Professional market data analysis and real-time feeds** -- **Price Discovery**: Real-time prices, funding rates, mark/index prices -- **Candle Data**: Real-time and historical candles with multiple intervals -- **Order Book Analysis**: - - Live order book snapshots - - Price impact calculations - - Volume queries at specific price levels - - VWAP (Volume-Weighted Average Price) calculations -- **Feed Management**: Active feed monitoring with automatic cleanup - -### ๐Ÿ”„ Backtesting (`/backtesting`) -- Run strategy backtests against historical data -- Support for controller configurations -- Customizable trade costs and resolution - -### ๐Ÿ“ˆ Archived Bot Analytics (`/archived-bots`) -**Comprehensive analysis of stopped bot performance** -- List and discover archived bot databases -- Performance metrics and trade analysis -- Historical order and trade retrieval -- Position and executor data extraction -- Controller configuration recovery -- Support for both V1 and V2 bot architectures +Once running, Gateway is available at http://localhost:15888 ## Configuration -### Environment Variables -Key configuration options available in `.env`: +The `.env` file contains all configuration. Key settings: -- **CONFIG_PASSWORD**: Encrypts API keys and credentials -- **USERNAME/PASSWORD**: API authentication credentials -- **BROKER_HOST/PORT**: EMQX message broker settings -- **DATABASE_URL**: PostgreSQL connection string -- **ACCOUNT_UPDATE_INTERVAL**: Balance update frequency (minutes) -- **AWS_API_KEY/AWS_SECRET_KEY**: S3 archiving (optional) -- **BANNED_TOKENS**: Comma-separated list of tokens to exclude -- **LOGFIRE_TOKEN**: Observability and monitoring (production) - -### Bot Instance Structure -Each bot maintains its own isolated environment: -``` -bots/instances/hummingbot-{name}/ -โ”œโ”€โ”€ conf/ # Configuration files -โ”œโ”€โ”€ data/ # Bot databases and state -โ””โ”€โ”€ logs/ # Execution logs -``` - -## Development - -### Code Quality Tools ```bash -# Install pre-commit hooks -make install-pre-commit - -# Format code (runs automatically) -black --line-length 130 . -isort --line-length 130 --profile black . +USERNAME=admin # API username +PASSWORD=admin # API password +CONFIG_PASSWORD=admin # Encrypts bot credentials +DATABASE_URL=... # PostgreSQL connection +GATEWAY_URL=... # Gateway URL (for DEX) ``` -### Testing -The API includes comprehensive backtesting capabilities. Test using: -- Backtesting router for strategy validation -- Swagger UI at `http://localhost:8000/docs` -- Integration testing with live containers +Edit `.env` and restart with `make deploy` to apply changes. -## Architecture +## API Features -### Core Components -1. **FastAPI Application**: HTTP API with Basic Auth -2. **Docker Service**: Container lifecycle management -3. **Bot Orchestrator**: Strategy deployment and monitoring -4. **Accounts Service**: Multi-exchange account management -5. **Market Data Manager**: Real-time feeds and historical data -6. **MQTT Broker**: Real-time bot communication +- **Portfolio**: Balances, positions, P&L across all exchanges +- **Trading**: Place orders, manage positions, track history +- **Bots**: Deploy, monitor, and control trading bots +- **Market Data**: Prices, orderbooks, candles, funding rates +- **Strategies**: Create and manage trading strategies -### Data Models -- Orders and trades with multi-account support -- Portfolio states and balance tracking -- Position management for perpetual trading -- Historical performance analytics +Full API documentation at http://localhost:8000/docs -## Authentication - -All API endpoints require HTTP Basic Authentication. Include your configured credentials in all requests: +## Development ```bash -curl -u username:password http://localhost:8000/endpoint +make install # Create conda environment +conda activate hummingbot-api +make run # Run with hot-reload ``` ## Troubleshooting -### Database Connection Issues - -If you encounter PostgreSQL database connection errors (such as "role 'hbot' does not exist" or "database 'hummingbot_api' does not exist"), use the automated fix script: - +**API won't start?** ```bash -chmod +x fix-database.sh -./fix-database.sh +docker compose logs hummingbot-api ``` -This script will: -1. Check if PostgreSQL is running -2. Verify that the `hbot` user and `hummingbot_api` database exist -3. Automatically fix any missing configuration -4. Test the connection to ensure everything works - -#### "role 'postgres' does not exist" Error - -If you see errors like `FATAL: role "postgres" does not exist` in the PostgreSQL logs: - -**Cause**: The PostgreSQL container is configured to create only the `hbot` user (via `POSTGRES_USER=hbot`). The default `postgres` superuser is NOT created. This error occurs when something tries to connect using the default `postgres` username. - -**Solutions**: - -1. **Always specify the correct user** when connecting: - ```bash - # Correct - use hbot user - docker exec -it hummingbot-postgres psql -U hbot -d hummingbot_api - - # Incorrect - tries to use 'postgres' user (doesn't exist) - docker exec -it hummingbot-postgres psql - ``` - -2. **If you need the postgres superuser** (not recommended), you can create it: - ```bash - docker exec -it hummingbot-postgres psql -U hbot -d postgres -c "CREATE ROLE postgres WITH SUPERUSER LOGIN PASSWORD 'your-password';" - ``` - -3. **Complete database reset** (โš ๏ธ deletes all data): - ```bash - docker compose down -v - ./setup.sh - ``` - -#### Manual Database Verification - -If you prefer to check manually: - -```bash -# Check if containers are running -docker ps | grep -E "hummingbot-postgres|hummingbot-broker" - -# Check PostgreSQL logs -docker logs hummingbot-postgres - -# Verify database connection (use hbot user, not postgres) -docker exec -it hummingbot-postgres psql -U hbot -d hummingbot_api - -# List all database users -docker exec -it hummingbot-postgres psql -U hbot -d postgres -c "\du" -``` - -#### "database 'hbot' does not exist" During Setup - -If you see this error during `./setup.sh`: - -``` -โš ๏ธ Database initialization may be incomplete. Running manual initialization... -psql: error: connection to server on socket "/var/run/postgresql/.s.PGSQL.5432" failed: FATAL: database "hbot" does not exist -โŒ Failed to initialize database. -``` - -**Cause**: The setup script tried to connect to a database named `hbot` (the username) instead of `hummingbot_api` (the actual database name). This was a bug in older versions of setup.sh. - -**Solution**: - -1. **Update setup.sh**: Pull the latest version with the fix: - ```bash - git pull origin main - ``` - -2. **Or manually fix the database**: - ```bash - # The database already exists, just verify it - docker exec hummingbot-postgres psql -U hbot -d postgres -c "\l" - - # You should see 'hummingbot_api' in the list - # Test connection - docker exec hummingbot-postgres psql -U hbot -d hummingbot_api -c "SELECT version();" - ``` - -3. **If database doesn't exist**, run the fix script: - ```bash - chmod +x fix-database.sh - ./fix-database.sh - ``` - -**Prevention**: This issue is fixed in the latest version of setup.sh. The script now correctly specifies `-d postgres` when running manual initialization. - -#### Complete Database Reset - -If you need to start fresh (โš ๏ธ this will delete all data): - +**Database issues?** ```bash -# Stop all containers and remove volumes -docker compose down -v - -# Restart setup -./setup.sh +docker compose down -v # Reset all data +make deploy # Fresh start ``` -### EMQX Broker Issues - -If bots can't connect to the broker: - +**Check service status:** ```bash -# Check EMQX status -docker logs hummingbot-broker - -# Restart EMQX -docker compose restart emqx - -# Access EMQX dashboard (if needed) -# http://localhost:18083 -# Default credentials: admin/public +docker ps | grep hummingbot ``` -### Common Issues - -**Issue**: API won't start - "Database connection failed" -- **Solution**: Run `./fix-database.sh` to repair the database configuration - -**Issue**: Bot containers won't start -- **Solution**: Check Docker daemon is running and you have sufficient resources - -**Issue**: Can't access API at localhost:8000 -- **Solution**: Verify the API container is running: `docker ps | grep hummingbot-api` - -**Issue**: Authentication fails -- **Solution**: Check your USERNAME and PASSWORD in the `.env` file - -**Issue**: Old bot data causing conflicts -- **Solution**: Clean up old volumes: `docker compose down -v` (โš ๏ธ deletes data) - -## Support & Documentation - -- **API Documentation**: Available at `http://localhost:8000/docs` when running -- **Detailed Examples**: Check the `CLAUDE.md` file for comprehensive API usage examples -- **Issues**: Report bugs and feature requests through the project's issue tracker -- **Database Troubleshooting**: Use `./fix-database.sh` for automated fixes ---- +## Support -Ready to start trading? Deploy your first account and start exploring the powerful capabilities of the Hummingbot API! \ No newline at end of file +- **API Docs**: http://localhost:8000/docs +- **Issues**: https://github.com/hummingbot/hummingbot-api/issues diff --git a/dashboard-credentials.yml b/dashboard-credentials.yml deleted file mode 100644 index 92ad99b5..00000000 --- a/dashboard-credentials.yml +++ /dev/null @@ -1,15 +0,0 @@ -# This only works if you change the env variable in the docker-compose.yml -credentials: - usernames: - admin: - email: admin@gmail.com - name: John Doe - logged_in: False - password: abc -cookie: - expiry_days: 0 - key: some_signature_key # Must be string - name: some_cookie_name -pre-authorized: - emails: - - admin@admin.com diff --git a/database/repositories/gateway_clmm_repository.py b/database/repositories/gateway_clmm_repository.py index 9c69d3da..af11b0df 100644 --- a/database/repositories/gateway_clmm_repository.py +++ b/database/repositories/gateway_clmm_repository.py @@ -1,8 +1,8 @@ from datetime import datetime, timezone -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Set, Tuple from decimal import Decimal -from sqlalchemy import desc, select +from sqlalchemy import desc, select, distinct from sqlalchemy.ext.asyncio import AsyncSession from database.models import GatewayCLMMPosition, GatewayCLMMEvent @@ -90,6 +90,23 @@ async def close_position(self, position_address: str) -> Optional[GatewayCLMMPos await self.session.flush() return position + async def reopen_position(self, position_address: str) -> Optional[GatewayCLMMPosition]: + """ + Reopen a position that was incorrectly marked as closed. + + This is used when autodiscover finds a position that exists on-chain + but was marked as CLOSED in the database (e.g., due to a failed close transaction). + """ + result = await self.session.execute( + select(GatewayCLMMPosition).where(GatewayCLMMPosition.position_address == position_address) + ) + position = result.scalar_one_or_none() + if position and position.status == "CLOSED": + position.status = "OPEN" + position.closed_at = None + await self.session.flush() + return position + async def get_positions( self, network: Optional[str] = None, @@ -138,6 +155,49 @@ async def get_open_positions( limit=1000 ) + async def get_unique_wallet_configs(self) -> List[Dict]: + """ + Get unique combinations of connector/network/wallet from all positions. + + Returns: + List of dicts with keys: connector, network, wallet_address + This is useful for discovering which wallets to poll for positions. + """ + query = select( + distinct(GatewayCLMMPosition.connector), + GatewayCLMMPosition.network, + GatewayCLMMPosition.wallet_address + ).distinct() + + result = await self.session.execute(query) + rows = result.all() + + return [ + { + "connector": row[0], + "network": row[1], + "wallet_address": row[2] + } + for row in rows + ] + + async def get_position_addresses_set(self, status: Optional[str] = None) -> Set[str]: + """ + Get a set of position addresses in the database. + + Args: + status: Optional filter by status ("OPEN" or "CLOSED"). + If None, returns all positions. + + Returns: + Set of position addresses (useful for quick existence checks) + """ + query = select(GatewayCLMMPosition.position_address) + if status: + query = query.where(GatewayCLMMPosition.status == status) + result = await self.session.execute(query) + return {row[0] for row in result.all()} + # ============================================ # Event Management # ============================================ diff --git a/fix-database.sh b/fix-database.sh deleted file mode 100755 index 50a06cf1..00000000 --- a/fix-database.sh +++ /dev/null @@ -1,147 +0,0 @@ -#!/bin/bash - -# Database Troubleshooting Script -# This script helps diagnose and fix PostgreSQL database initialization issues - -set -e - -# Colors for better output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -echo "๐Ÿ”ง PostgreSQL Database Troubleshooting Tool" -echo "" - -# Check if PostgreSQL container is running -echo -e "${YELLOW}๐Ÿ” Checking PostgreSQL container status...${NC}" -if ! docker ps | grep -q hummingbot-postgres; then - echo -e "${RED}โŒ PostgreSQL container is not running!${NC}" - echo "" - echo -e "${YELLOW}Starting PostgreSQL container...${NC}" - docker compose up postgres -d - sleep 5 -fi - -# Wait for PostgreSQL to be ready -echo -e "${YELLOW}โณ Waiting for PostgreSQL to be ready...${NC}" -MAX_RETRIES=30 -RETRY_COUNT=0 -DB_READY=false - -while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do - if docker exec hummingbot-postgres pg_isready -U postgres > /dev/null 2>&1; then - DB_READY=true - break - fi - RETRY_COUNT=$((RETRY_COUNT + 1)) - echo -ne "\r${YELLOW}โณ Waiting... ($RETRY_COUNT/$MAX_RETRIES)${NC}" - sleep 2 -done -echo "" - -if [ "$DB_READY" = false ]; then - echo -e "${RED}โŒ PostgreSQL is not responding. Check logs:${NC}" - echo "docker logs hummingbot-postgres" - exit 1 -fi - -echo -e "${GREEN}โœ… PostgreSQL is running!${NC}" -echo "" - -# Check current database state -echo -e "${YELLOW}๐Ÿ” Checking database configuration...${NC}" - -# Check if hbot user exists -USER_EXISTS=$(docker exec hummingbot-postgres psql -U postgres -tAc "SELECT 1 FROM pg_roles WHERE rolname='hbot'" 2>/dev/null) - -# Check if database exists -DB_EXISTS=$(docker exec hummingbot-postgres psql -U postgres -tAc "SELECT 1 FROM pg_database WHERE datname='hummingbot_api'" 2>/dev/null) - -echo "" -echo -e "${BLUE}Current Status:${NC}" -if [ "$USER_EXISTS" = "1" ]; then - echo -e " User 'hbot': ${GREEN}โœ“ EXISTS${NC}" -else - echo -e " User 'hbot': ${RED}โœ— MISSING${NC}" -fi - -if [ "$DB_EXISTS" = "1" ]; then - echo -e " Database 'hummingbot_api': ${GREEN}โœ“ EXISTS${NC}" -else - echo -e " Database 'hummingbot_api': ${RED}โœ— MISSING${NC}" -fi -echo "" - -# Fix if needed -if [ "$USER_EXISTS" != "1" ] || [ "$DB_EXISTS" != "1" ]; then - echo -e "${YELLOW}๐Ÿ”ง Fixing database configuration...${NC}" - echo "" - - # Check if init-db.sql exists - if [ ! -f "init-db.sql" ]; then - echo -e "${RED}โŒ init-db.sql file not found!${NC}" - echo "Please ensure you're running this script from the hummingbot-api directory." - exit 1 - fi - - # Run initialization script - echo -e "${YELLOW}Running database initialization...${NC}" - docker exec -i hummingbot-postgres psql -U postgres < init-db.sql - - if [ $? -eq 0 ]; then - echo "" - echo -e "${GREEN}โœ… Database initialized successfully!${NC}" - else - echo "" - echo -e "${RED}โŒ Failed to initialize database${NC}" - exit 1 - fi -else - echo -e "${GREEN}โœ… Database configuration is correct!${NC}" -fi - -# Test connection with hbot user -echo "" -echo -e "${YELLOW}๐Ÿงช Testing connection with hbot user...${NC}" -if docker exec hummingbot-postgres psql -U hbot -d hummingbot_api -c "SELECT version();" > /dev/null 2>&1; then - echo -e "${GREEN}โœ… Connection successful!${NC}" -else - echo -e "${RED}โŒ Connection failed${NC}" - echo "" - echo -e "${YELLOW}Trying to fix permissions...${NC}" - - docker exec -i hummingbot-postgres psql -U postgres << 'EOF' -\c hummingbot_api -GRANT ALL ON SCHEMA public TO hbot; -GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO hbot; -GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO hbot; -ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO hbot; -ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO hbot; -EOF - - if docker exec hummingbot-postgres psql -U hbot -d hummingbot_api -c "SELECT version();" > /dev/null 2>&1; then - echo -e "${GREEN}โœ… Permissions fixed! Connection successful!${NC}" - else - echo -e "${RED}โŒ Still unable to connect. Manual intervention required.${NC}" - exit 1 - fi -fi - -echo "" -echo -e "${GREEN}๐ŸŽ‰ Database is ready to use!${NC}" -echo "" -echo -e "${BLUE}Connection Details:${NC}" -echo " Host: localhost" -echo " Port: 5432" -echo " Database: hummingbot_api" -echo " User: hbot" -echo " Password: hummingbot-api" -echo "" -echo -e "${YELLOW}You can now start the API with:${NC}" -echo " make run" -echo " or" -echo " docker compose up -d" -echo "" \ No newline at end of file diff --git a/init-db.sql b/init-db.sql index 0c0af9f5..81c3f0e9 100644 --- a/init-db.sql +++ b/init-db.sql @@ -1,43 +1,10 @@ --- Database Initialization Script --- --- IMPORTANT: This script serves as a SAFETY NET for edge cases where PostgreSQL's --- automatic initialization (via POSTGRES_USER/POSTGRES_DB env vars) doesn't complete. --- --- In most cases, PostgreSQL will automatically create the user and database from the --- environment variables. However, this script ensures proper initialization when: --- - Volume data persists from incomplete initialization --- - Container restarts interrupt the init process --- - Manual database operations left the system in an inconsistent state --- --- This script is safe to run multiple times (idempotent) +-- Safety net for PostgreSQL initialization +-- PostgreSQL auto-creates user/db from POSTGRES_USER, POSTGRES_DB env vars +-- This script only runs on first container initialization --- Create the hbot user if it doesn't exist -DO $$ -BEGIN - IF NOT EXISTS (SELECT FROM pg_user WHERE usename = 'hbot') THEN - CREATE ROLE hbot WITH LOGIN PASSWORD 'hummingbot-api'; - RAISE NOTICE 'User hbot created successfully'; - ELSE - RAISE NOTICE 'User hbot already exists'; - END IF; -END -$$; - --- Create the database if it doesn't exist -SELECT 'CREATE DATABASE hummingbot_api OWNER hbot' -WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'hummingbot_api')\gexec - --- Grant all privileges on the database -GRANT ALL PRIVILEGES ON DATABASE hummingbot_api TO hbot; - --- Connect to the database and grant schema privileges -\c hummingbot_api hbot - --- Grant privileges on the public schema +-- Ensure proper permissions on public schema GRANT ALL ON SCHEMA public TO hbot; GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO hbot; GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO hbot; - --- Set default privileges for future objects ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO hbot; -ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO hbot; \ No newline at end of file +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO hbot; diff --git a/routers/gateway_clmm.py b/routers/gateway_clmm.py index 69bfad79..72292459 100644 --- a/routers/gateway_clmm.py +++ b/routers/gateway_clmm.py @@ -2,6 +2,7 @@ Gateway CLMM Router - Handles DEX CLMM liquidity operations via Hummingbot Gateway. Supports CLMM connectors (Meteora, Raydium, Uniswap V3) for concentrated liquidity positions. """ +import asyncio import logging from typing import List, Optional from decimal import Decimal @@ -229,9 +230,6 @@ async def _refresh_position_data(position, accounts_service: AccountsService, cl - position status (if closed externally) """ try: - # Parse network to get chain and network name - chain, network = accounts_service.gateway_client.parse_network_id(position.network) - # Get wallet address for the position wallet_address = position.wallet_address @@ -239,7 +237,7 @@ async def _refresh_position_data(position, accounts_service: AccountsService, cl try: positions_list = await accounts_service.gateway_client.clmm_positions_owned( connector=position.connector, - network=network, + chain_network=position.network, # position.network is already in 'chain-network' format wallet_address=wallet_address, pool_address=position.pool_address ) @@ -951,7 +949,7 @@ async def close_clmm_position( try: positions_list = await accounts_service.gateway_client.clmm_positions_owned( connector=request.connector, - network=network, + chain_network=request.network, # request.network is already in 'chain-network' format wallet_address=wallet_address, pool_address=pool_address ) @@ -1034,7 +1032,7 @@ async def close_clmm_position( quote_fee_pending=Decimal("0") ) - # Update current_price with close price before marking as closed + # Update current_price with close price if close_price: await clmm_repo.update_position_liquidity( position_address=request.position_address, @@ -1043,9 +1041,35 @@ async def close_clmm_position( current_price=Decimal(str(close_price)) ) - # Mark position as CLOSED - await clmm_repo.close_position(request.position_address) - logger.info(f"Updated position {request.position_address}: collected fees updated, pending fees reset to 0, status set to CLOSED") + # Verify position is actually closed by checking if it still exists on Gateway + # Gateway returns 500 (or 404) when position doesn't exist + try: + await asyncio.sleep(2) # Wait for transaction to propagate + + verify_result = await accounts_service.gateway_client.clmm_position_info( + connector=request.connector, + chain_network=request.network, + position_address=request.position_address + ) + + # If we get an error response (404 or 500), position is closed + if verify_result and isinstance(verify_result, dict) and "error" in verify_result: + status_code = verify_result.get("status") + if status_code in (404, 500): + await clmm_repo.close_position(request.position_address) + logger.info(f"Position {request.position_address} verified as closed (Gateway returned {status_code})") + else: + logger.warning(f"Unexpected error verifying position close: {verify_result}") + elif verify_result and "address" in verify_result: + # Position still exists - might be a failed close or delayed propagation + logger.warning(f"Position {request.position_address} still exists after close transaction. Will be handled by poller.") + else: + logger.debug(f"Could not verify position close status, will be handled by poller") + + except Exception as verify_error: + logger.warning(f"Error verifying position close: {verify_error}. Will be handled by poller.") + + logger.info(f"Updated position {request.position_address}: collected fees updated, pending fees reset to 0.") except Exception as db_error: logger.error(f"Error recording CLOSE event: {db_error}", exc_info=True) @@ -1123,7 +1147,7 @@ async def collect_fees_from_clmm_position( try: positions_list = await accounts_service.gateway_client.clmm_positions_owned( connector=request.connector, - network=network, + chain_network=request.network, # request.network is already in 'chain-network' format wallet_address=wallet_address, pool_address=pool_address ) @@ -1261,7 +1285,7 @@ async def get_clmm_positions_owned( # Get positions for the specified pool result = await accounts_service.gateway_client.clmm_positions_owned( connector=request.connector, - network=network, + chain_network=request.network, # request.network is already in 'chain-network' format wallet_address=wallet_address, pool_address=request.pool_address ) diff --git a/run.sh b/run.sh deleted file mode 100755 index c6b36427..00000000 --- a/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# Run script for Backend API -# Usage: ./run.sh [--dev] -# --dev: Run API from source using uvicorn -# Without --dev: Run using docker compose - -if [[ "$1" == "--dev" ]]; then - echo "Running API from source..." - # Activate conda environment and run with uvicorn - docker compose up emqx postgres -d - source "$(conda info --base)/etc/profile.d/conda.sh" - conda activate hummingbot-api - uvicorn main:app --reload -else - echo "Running with Docker Compose..." - docker compose up -d -fi \ No newline at end of file diff --git a/services/gateway_client.py b/services/gateway_client.py index 9cfac128..f11feb51 100644 --- a/services/gateway_client.py +++ b/services/gateway_client.py @@ -123,6 +123,38 @@ async def get_default_wallet_address(self, chain: str) -> Optional[str]: logger.error(f"Error getting default wallet for chain {chain}: {e}") return None + async def get_all_wallet_addresses(self, chain: Optional[str] = None) -> Dict[str, List[str]]: + """ + Get all wallet addresses, optionally filtered by chain. + + Args: + chain: Optional chain filter (e.g., 'solana', 'ethereum'). + If not provided, returns wallets for all chains. + + Returns: + Dict mapping chain name to list of wallet addresses. + Example: {"solana": ["addr1", "addr2"], "ethereum": ["addr3"]} + """ + try: + wallets = await self.get_wallets() + if wallets is None: + return {} + + result = {} + for wallet in wallets: + wallet_chain = wallet.get("chain") + if chain and wallet_chain != chain: + continue + + addresses = wallet.get("walletAddresses", []) + if addresses and wallet_chain: + result[wallet_chain] = addresses + + return result + except Exception as e: + logger.error(f"Error getting all wallet addresses: {e}") + return {} + async def add_wallet(self, chain: str, private_key: str, set_default: bool = True) -> Dict: """Add a wallet to Gateway""" return await self._request("POST", "wallet/add", json={ @@ -414,33 +446,68 @@ async def clmm_remove_liquidity( async def clmm_position_info( self, connector: str, - network: str, - wallet_address: str, + chain_network: str, position_address: str ) -> Dict: - """Get CLMM position information including pending fees""" + """ + Get CLMM position information including pending fees. + + Note: Gateway returns 500 instead of 404 when position doesn't exist (is closed). + Callers should treat 500 errors as "position not found/closed". + """ + # Validate required parameters + if not connector: + raise ValueError("connector is required for clmm_position_info") + if not chain_network: + raise ValueError("chain_network is required for clmm_position_info") + if not position_address: + raise ValueError("position_address is required for clmm_position_info") + params = { - "network": network, - "walletAddress": wallet_address, + "connector": connector, + "chainNetwork": chain_network, "positionAddress": position_address } - return await self._request("GET", f"connectors/{connector}/clmm/position-info", params=params) + return await self._request("GET", "trading/clmm/position-info", params=params) async def clmm_positions_owned( self, connector: str, - network: str, + chain_network: str, wallet_address: str, - pool_address: str - ) -> Dict: - """Get all CLMM positions owned by wallet for a specific pool""" + pool_address: Optional[str] = None + ) -> List[Dict]: + """ + Get CLMM positions owned by a wallet. + + Args: + connector: CLMM connector (e.g., 'meteora', 'raydium') + chain_network: Chain and network in format 'chain-network' (e.g., 'solana-mainnet-beta') + wallet_address: Wallet address to query + pool_address: Optional pool address to filter positions. + If not provided, returns ALL positions across all pools. + + Returns: + List of position dictionaries with fields like: + - address: Position NFT address + - poolAddress: Pool address + - baseTokenAddress, quoteTokenAddress + - baseTokenAmount, quoteTokenAmount + - baseFeeAmount, quoteFeeAmount + - lowerBinId, upperBinId + - lowerPrice, upperPrice, price + """ params = { - "network": network, + "connector": connector, + "chainNetwork": chain_network, "walletAddress": wallet_address, - "poolAddress": pool_address } - return await self._request("GET", f"connectors/{connector}/clmm/positions-owned", params=params) + # Only add poolAddress if specified (allows fetching all positions) + if pool_address: + params["poolAddress"] = pool_address + + return await self._request("GET", "trading/clmm/positions-owned", params=params) async def clmm_collect_fees( self, diff --git a/services/gateway_transaction_poller.py b/services/gateway_transaction_poller.py index 9a0b1090..991fa44d 100644 --- a/services/gateway_transaction_poller.py +++ b/services/gateway_transaction_poller.py @@ -373,9 +373,17 @@ async def poll_transaction_once(self, tx_hash: str, network_id: str, wallet_addr return await self._check_transaction_status(chain, network, tx_hash) # ============================================ - # Position State Polling + # Position State Polling & Discovery # ============================================ + # Supported CLMM connectors and their default networks + SUPPORTED_CLMM_CONFIGS = [ + {"connector": "meteora", "chain": "solana", "network": "mainnet-beta"}, + # Add more connectors as they become supported: + # {"connector": "raydium", "chain": "solana", "network": "mainnet-beta"}, + # {"connector": "uniswap", "chain": "ethereum", "network": "mainnet"}, + ] + async def _position_poll_loop(self): """Position state polling loop (runs less frequently).""" while self._running: @@ -384,7 +392,7 @@ async def _position_poll_loop(self): now = datetime.now(timezone.utc) if self._last_position_poll is None or \ (now - self._last_position_poll).total_seconds() >= self.position_poll_interval: - await self._poll_open_positions() + await self._poll_and_discover_positions() self._last_position_poll = now # Sleep for a short time to avoid busy waiting @@ -395,51 +403,281 @@ async def _position_poll_loop(self): logger.error(f"Error in position poll loop: {e}", exc_info=True) await asyncio.sleep(10) - async def _poll_open_positions(self): - """Poll all open CLMM positions and update their state.""" + async def _poll_and_discover_positions(self): + """ + Main position polling method that: + 1. Discovers new positions from Gateway (created via UI or other means) + 2. Updates all open positions with latest state + """ try: # Check if Gateway is available if not await self.gateway_client.ping(): logger.debug("Gateway not available, skipping position polling") return + # Step 1: Discover new positions from Gateway + discovered_count = await self._discover_positions_from_gateway() + if discovered_count > 0: + logger.info(f"Discovered {discovered_count} new positions from Gateway") + + # Step 2: Update all open positions + await self._update_all_open_positions() + + except Exception as e: + logger.error(f"Error in position poll and discovery: {e}", exc_info=True) + + async def _discover_positions_from_gateway(self) -> int: + """ + Discover positions from Gateway that aren't tracked in the database, + and reopen positions that were incorrectly marked as closed. + + This allows tracking positions created directly via UI or other means, + not just those created through the API. + + Also corrects data inconsistencies where a position was marked CLOSED + in the database but is still OPEN on-chain (e.g., due to a failed close + transaction). + + Returns: + Number of newly discovered + reopened positions + """ + discovered_count = 0 + reopened_count = 0 + + try: + # Get all wallet addresses for supported chains + wallet_addresses_by_chain = await self.gateway_client.get_all_wallet_addresses() + if not wallet_addresses_by_chain: + logger.debug("No wallets configured in Gateway, skipping position discovery") + return 0 + + # Get existing position addresses from database (for quick existence check) + async with self.db_manager.get_session_context() as session: + clmm_repo = GatewayCLMMRepository(session) + # Get OPEN positions (to skip - already tracked correctly) + open_positions = await clmm_repo.get_position_addresses_set(status="OPEN") + # Get CLOSED positions (to potentially reopen if still on-chain) + closed_positions = await clmm_repo.get_position_addresses_set(status="CLOSED") + + # Poll each supported connector/chain/wallet combination + for config in self.SUPPORTED_CLMM_CONFIGS: + connector = config["connector"] + chain = config["chain"] + network = config["network"] + + # Get wallet addresses for this chain + wallet_addresses = wallet_addresses_by_chain.get(chain, []) + if not wallet_addresses: + continue + + for wallet_address in wallet_addresses: + try: + # Fetch ALL positions for this wallet (no pool filter) + chain_network = f"{chain}-{network}" + gateway_positions = await self.gateway_client.clmm_positions_owned( + connector=connector, + chain_network=chain_network, + wallet_address=wallet_address, + pool_address=None # Get all positions across all pools + ) + + if not gateway_positions or not isinstance(gateway_positions, list): + continue + + # Process each position + for pos_data in gateway_positions: + position_address = pos_data.get("address") + if not position_address: + continue + + # Skip if already tracked as OPEN + if position_address in open_positions: + continue + + # Check if position was incorrectly marked as CLOSED + if position_address in closed_positions: + # Position exists on-chain but is CLOSED in DB โ†’ reopen it + async with self.db_manager.get_session_context() as session: + clmm_repo = GatewayCLMMRepository(session) + reopened = await clmm_repo.reopen_position(position_address) + if reopened: + reopened_count += 1 + # Move from closed to open set for this run + closed_positions.discard(position_address) + open_positions.add(position_address) + logger.warning(f"Reopened position {position_address} - " + f"was CLOSED in DB but still exists on-chain") + continue + + # Create new position in database + new_position = await self._create_discovered_position( + pos_data=pos_data, + connector=connector, + chain=chain, + network=network, + wallet_address=wallet_address + ) + + if new_position: + discovered_count += 1 + open_positions.add(position_address) + logger.info(f"Discovered new position: {position_address} " + f"(pool: {pos_data.get('poolAddress', 'unknown')[:16]}...)") + + except Exception as e: + logger.warning(f"Error discovering positions for {connector}/{chain}/{wallet_address}: {e}") + continue + + except Exception as e: + logger.error(f"Error in position discovery: {e}", exc_info=True) + + if reopened_count > 0: + logger.info(f"Position discovery complete: {discovered_count} new, {reopened_count} reopened") + + return discovered_count + reopened_count + + async def _create_discovered_position( + self, + pos_data: Dict, + connector: str, + chain: str, + network: str, + wallet_address: str + ) -> Optional[GatewayCLMMPosition]: + """ + Create a database record for a discovered position. + + These positions were created externally (e.g., via UI) and are being + discovered by the poller. + """ + try: + position_address = pos_data.get("address") + pool_address = pos_data.get("poolAddress", "") + + # Extract token addresses + base_token_address = pos_data.get("baseTokenAddress", "") + quote_token_address = pos_data.get("quoteTokenAddress", "") + + # Use full addresses as tokens (consistent with API-created positions) + base_token = base_token_address if base_token_address else "UNKNOWN" + quote_token = quote_token_address if quote_token_address else "UNKNOWN" + trading_pair = f"{base_token}-{quote_token}" + + # Extract price data + current_price = float(pos_data.get("price", 0)) + lower_price = float(pos_data.get("lowerPrice", 0)) + upper_price = float(pos_data.get("upperPrice", 0)) + + # Extract liquidity amounts + base_token_amount = float(pos_data.get("baseTokenAmount", 0)) + quote_token_amount = float(pos_data.get("quoteTokenAmount", 0)) + + # Extract fee data + base_fee_pending = float(pos_data.get("baseFeeAmount", 0)) + quote_fee_pending = float(pos_data.get("quoteFeeAmount", 0)) + + # Extract bin IDs (for Meteora) + lower_bin_id = pos_data.get("lowerBinId") + upper_bin_id = pos_data.get("upperBinId") + + # Calculate in_range status + in_range = "UNKNOWN" + if current_price > 0 and lower_price > 0 and upper_price > 0: + if lower_price <= current_price <= upper_price: + in_range = "IN_RANGE" + else: + in_range = "OUT_OF_RANGE" + + # Calculate percentage: (upper_price - lower_price) / lower_price + percentage = None + if lower_price > 0: + percentage = (upper_price - lower_price) / lower_price + + # Network in unified format + network_id = f"{chain}-{network}" + + # Create position in database + async with self.db_manager.get_session_context() as session: + clmm_repo = GatewayCLMMRepository(session) + + position_data = { + "position_address": position_address, + "pool_address": pool_address, + "network": network_id, + "connector": connector, + "wallet_address": wallet_address, + "trading_pair": trading_pair, + "base_token": base_token, + "quote_token": quote_token, + "status": "OPEN", + "lower_price": lower_price, + "upper_price": upper_price, + "lower_bin_id": lower_bin_id, + "upper_bin_id": upper_bin_id, + "entry_price": current_price, # Best available estimate + "current_price": current_price, + "percentage": percentage, + # For discovered positions, we don't know initial amounts + # Use current amounts as initial (best estimate) + "initial_base_token_amount": base_token_amount, + "initial_quote_token_amount": quote_token_amount, + "base_token_amount": base_token_amount, + "quote_token_amount": quote_token_amount, + "in_range": in_range, + "base_fee_pending": base_fee_pending, + "quote_fee_pending": quote_fee_pending, + "base_fee_collected": 0, + "quote_fee_collected": 0, + } + + position = await clmm_repo.create_position(position_data) + + # Create a DISCOVERED event to mark this position was auto-discovered + event_data = { + "position_id": position.id, + "transaction_hash": f"discovered_{position_address[:16]}", # Synthetic tx hash + "event_type": "DISCOVERED", + "base_token_amount": base_token_amount, + "quote_token_amount": quote_token_amount, + "status": "CONFIRMED" # No actual transaction to confirm + } + await clmm_repo.create_event(event_data) + + return position + + except Exception as e: + logger.error(f"Error creating discovered position {pos_data.get('address')}: {e}", exc_info=True) + return None + + async def _update_all_open_positions(self): + """Update state for all open positions from Gateway.""" + try: async with self.db_manager.get_session_context() as session: clmm_repo = GatewayCLMMRepository(session) # Get all open positions open_positions = await clmm_repo.get_open_positions() if not open_positions: - logger.debug("No open CLMM positions to poll") + logger.debug("No open CLMM positions to update") return - logger.info(f"Polling {len(open_positions)} open CLMM positions") - - # Extract position details before closing session - position_details = [ - { - "position_address": pos.position_address, - "pool_address": pos.pool_address, - "connector": pos.connector, - "network": pos.network, - "wallet_address": pos.wallet_address - } - for pos in open_positions - ] - - # Poll each position in a separate session - for pos_detail in position_details: - try: - async with self.db_manager.get_session_context() as session: - clmm_repo = GatewayCLMMRepository(session) - position = await clmm_repo.get_position_by_address(pos_detail["position_address"]) - if position and position.status == "OPEN": - await self._refresh_position_state(position, clmm_repo) - except Exception as e: - logger.warning(f"Failed to poll position {pos_detail['position_address']}: {e}") - continue + logger.info(f"Updating {len(open_positions)} open CLMM positions") + + # Update each position within the same session + for position in open_positions: + try: + await self._refresh_position_state(position, clmm_repo) + except Exception as e: + logger.warning(f"Failed to update position {position.position_address}: {e}") + continue except Exception as e: - logger.error(f"Error polling open positions: {e}", exc_info=True) + logger.error(f"Error updating open positions: {e}", exc_info=True) + + # Legacy method name for backwards compatibility + async def _poll_open_positions(self): + """Poll all open CLMM positions and update their state. (Legacy wrapper)""" + await self._poll_and_discover_positions() async def _refresh_position_state(self, position: GatewayCLMMPosition, clmm_repo: GatewayCLMMRepository): """ @@ -452,20 +690,25 @@ async def _refresh_position_state(self, position: GatewayCLMMPosition, clmm_repo - position status (if closed externally) """ try: - # Parse network to get chain and network name - parts = position.network.split('-', 1) - if len(parts) != 2: - logger.error(f"Invalid network format for position {position.position_address}: {position.network}") + # Validate position has required fields + if not position.position_address: + logger.error(f"Position ID {position.id} has no position_address, skipping refresh") + return + if not position.wallet_address: + logger.error(f"Position {position.position_address} has no wallet_address, skipping refresh") + return + if not position.connector: + logger.error(f"Position {position.position_address} has no connector, skipping refresh") + return + if not position.network: + logger.error(f"Position {position.position_address} has no network, skipping refresh") return - - chain, network = parts # Get individual position info from Gateway (includes pending fees) try: result = await self.gateway_client.clmm_position_info( connector=position.connector, - network=network, - wallet_address=position.wallet_address, + chain_network=position.network, # position.network is already in 'chain-network' format position_address=position.position_address ) @@ -481,12 +724,14 @@ async def _refresh_position_state(self, position: GatewayCLMMPosition, clmm_repo # Check if Gateway returned an error response if "error" in result: status_code = result.get("status") - # 404 means position doesn't exist on-chain โ†’ close it - if status_code == 404: - logger.info(f"Position {position.position_address} not found on Gateway (404), marking as CLOSED") + + # Gateway returns 500 instead of 404 when position doesn't exist (closed) + # Treat any error (404 or 500) on position-info as "position closed" + if status_code in (404, 500): + logger.info(f"Position {position.position_address} not found on Gateway (status: {status_code}), marking as CLOSED") await clmm_repo.close_position(position.position_address) return - # Other errors (500, timeout, etc.) โ†’ skip update, don't close + # Other errors โ†’ skip update, don't close logger.debug(f"Gateway error for position {position.position_address}: {result.get('error')} (status: {status_code})") return diff --git a/setup.sh b/setup.sh index ee98935f..51cd2c44 100755 --- a/setup.sh +++ b/setup.sh @@ -1,340 +1,47 @@ #!/bin/bash +# Hummingbot API Setup - Creates .env with sensible defaults -# Backend API Setup Script -# This script creates a comprehensive .env file with all configuration options -# following the Pydantic Settings structure established in config.py +set -e -set -e # Exit on any error - -# Colors for better output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -PURPLE='\033[0;35m' -CYAN='\033[0;36m' -NC='\033[0m' # No Color - -echo "๐Ÿš€ Backend API Setup" +echo "Hummingbot API Setup" echo "" -echo -n "Config password [default: admin]: " -read CONFIG_PASSWORD -CONFIG_PASSWORD=${CONFIG_PASSWORD:-admin} - -echo -n "API username [default: admin]: " -read USERNAME -USERNAME=${USERNAME:-admin} - -echo -n "API password [default: admin]: " -read PASSWORD +# Only prompt for password (most common customization) +read -p "API password [default: admin]: " PASSWORD PASSWORD=${PASSWORD:-admin} -echo "" -echo -e "${YELLOW}Optional Services${NC}" -echo -n "Enable Condor Telegram bot? (y/n) [default: n]: " -read ENABLE_CONDOR -ENABLE_CONDOR=${ENABLE_CONDOR:-n} - -if [[ "$ENABLE_CONDOR" =~ ^[Yy]$ ]]; then - echo -n "Telegram Bot Token: " - read TELEGRAM_TOKEN - echo -n "Telegram Allowed User IDs (comma-separated): " - read TELEGRAM_ALLOWED_IDS - echo -n "Pydantic Gateway Key (optional, press Enter to skip): " - read PYDANTIC_GATEWAY_KEY -fi - -echo "" -echo -e "${YELLOW}Gateway Configuration (Optional)${NC}" -echo -n "Gateway passphrase [default: admin, press Enter to skip]: " -read GATEWAY_PASSPHRASE -GATEWAY_PASSPHRASE=${GATEWAY_PASSPHRASE:-admin} - -# Set paths and defaults -BOTS_PATH=$(pwd) - -# Use sensible defaults for everything else -DEBUG_MODE="false" -BROKER_HOST="localhost" -BROKER_PORT="1883" -BROKER_USERNAME="admin" -BROKER_PASSWORD="password" -DATABASE_URL="postgresql+asyncpg://hbot:hummingbot-api@localhost:5432/hummingbot_api" -CLEANUP_INTERVAL="300" -FEED_TIMEOUT="600" -AWS_API_KEY="" -AWS_SECRET_KEY="" -S3_BUCKET="" -LOGFIRE_ENV="dev" -BANNED_TOKENS='["NAV","ARS","ETHW","ETHF","NEWT"]' - -echo "" -echo -e "${GREEN}โœ… Using sensible defaults for MQTT, Database, and other settings${NC}" - -echo "" -echo -e "${GREEN}๐Ÿ“ Creating .env file...${NC}" +read -p "Config password [default: admin]: " CONFIG_PASSWORD +CONFIG_PASSWORD=${CONFIG_PASSWORD:-admin} -# Create .env file with proper structure and comments +# Create .env with sensible defaults cat > .env << EOF -# ================================================================= -# Backend API Environment Configuration -# Generated on: $(date) -# ================================================================= - -# ================================================================= -# ๐Ÿ” Security Configuration -# ================================================================= -USERNAME=$USERNAME +# Hummingbot API Configuration +USERNAME=admin PASSWORD=$PASSWORD -DEBUG_MODE=$DEBUG_MODE CONFIG_PASSWORD=$CONFIG_PASSWORD +DEBUG_MODE=false -# ================================================================= -# ๐Ÿ”— MQTT Broker Configuration (BROKER_*) -# ================================================================= -BROKER_HOST=$BROKER_HOST -BROKER_PORT=$BROKER_PORT -BROKER_USERNAME=$BROKER_USERNAME -BROKER_PASSWORD=$BROKER_PASSWORD - -# ================================================================= -# ๐Ÿ’พ Database Configuration (DATABASE_*) -# ================================================================= -DATABASE_URL=$DATABASE_URL +# MQTT Broker +BROKER_HOST=localhost +BROKER_PORT=1883 +BROKER_USERNAME=admin +BROKER_PASSWORD=password -# ================================================================= -# ๐Ÿ“Š Market Data Feed Manager Configuration (MARKET_DATA_*) -# ================================================================= -MARKET_DATA_CLEANUP_INTERVAL=$CLEANUP_INTERVAL -MARKET_DATA_FEED_TIMEOUT=$FEED_TIMEOUT +# Database (auto-configured by docker-compose) +DATABASE_URL=postgresql+asyncpg://hbot:hummingbot-api@localhost:5432/hummingbot_api -# ================================================================= -# โ˜๏ธ AWS Configuration (AWS_*) - Optional -# ================================================================= -AWS_API_KEY=$AWS_API_KEY -AWS_SECRET_KEY=$AWS_SECRET_KEY -AWS_S3_DEFAULT_BUCKET_NAME=$S3_BUCKET - -# ================================================================= -# โš™๏ธ Application Settings -# ================================================================= -LOGFIRE_ENVIRONMENT=$LOGFIRE_ENV -BANNED_TOKENS=$BANNED_TOKENS - -# ================================================================= -# ๐ŸŒ Gateway Configuration (GATEWAY_*) - Optional -# ================================================================= -GATEWAY_PASSPHRASE=$GATEWAY_PASSPHRASE +# Gateway (optional) GATEWAY_URL=http://localhost:15888 +GATEWAY_PASSPHRASE=admin -# ================================================================= -# ๐Ÿ“ Legacy Settings (maintained for backward compatibility) -# ================================================================= -BOTS_PATH=$BOTS_PATH - +# Paths +BOTS_PATH=$(pwd) EOF -echo -e "${GREEN}โœ… .env file created successfully!${NC}" -echo "" - - -# Enable Condor if requested -if [[ "$ENABLE_CONDOR" =~ ^[Yy]$ ]]; then - echo -e "${GREEN}๐Ÿค– Setting up Condor Telegram bot...${NC}" - - # Create condor directory for config files - mkdir -p condor - - # Create Condor .env file - cat > condor/.env << CONDOR_EOF -TELEGRAM_TOKEN=$TELEGRAM_TOKEN -TELEGRAM_ALLOWED_IDS=$TELEGRAM_ALLOWED_IDS -PYDANTIC_GATEWAY_KEY=$PYDANTIC_GATEWAY_KEY -CONDOR_EOF - - # Create servers.yml for Condor to connect to hummingbot-api - cat > condor/servers.yml << SERVERS_EOF -servers: - local: - host: host.docker.internal - port: 8000 - username: $USERNAME - password: $PASSWORD - default: true -SERVERS_EOF - - echo -e "${GREEN}โœ… Condor configured!${NC}" - echo "" -fi - -# Display configuration summary -echo -e "${BLUE}๐Ÿ“‹ Configuration Summary${NC}" -echo "=======================" -echo -e "${CYAN}Security:${NC} Username: $USERNAME, Debug: $DEBUG_MODE" -echo -e "${CYAN}Broker:${NC} $BROKER_HOST:$BROKER_PORT" -echo -e "${CYAN}Database:${NC} ${DATABASE_URL%%@*}@[hidden]" -echo -e "${CYAN}Market Data:${NC} Cleanup: ${CLEANUP_INTERVAL}s, Timeout: ${FEED_TIMEOUT}s" -echo -e "${CYAN}Environment:${NC} $LOGFIRE_ENV" - -if [ -n "$AWS_API_KEY" ]; then - echo -e "${CYAN}AWS:${NC} Configured with S3 bucket: $S3_BUCKET" -else - echo -e "${CYAN}AWS:${NC} Not configured (optional)" -fi - -echo "" -echo -e "${GREEN}๐ŸŽ‰ Setup Complete!${NC}" -echo "" - -# Check if password verification file exists -if [ ! -f "bots/credentials/master_account/.password_verification" ]; then - echo -e "${YELLOW}๐Ÿ“Œ Note:${NC} Password verification file will be created on first startup" - echo -e " Location: ${BLUE}bots/credentials/master_account/.password_verification${NC}" - echo "" -fi - -echo -e "Next steps:" -echo "1. Review the .env file if needed: cat .env" -echo "2. Install dependencies: make install" -echo "3. Start the API: make run" -echo "" -echo -e "${PURPLE}๐Ÿ’ก Pro tip:${NC} You can modify environment variables in .env file anytime" -echo -e "${PURPLE}๐Ÿ“š Documentation:${NC} Check config.py for all available settings" -echo -e "${PURPLE}๐Ÿ”’ Security:${NC} The password verification file secures bot credentials" -echo "" -echo -e "${GREEN}๐Ÿณ Starting services (API, EMQX, PostgreSQL)...${NC}" - -# Start all services -docker compose up -d & -docker pull hummingbot/hummingbot:latest & - -# Wait for both operations to complete -wait - -echo -e "${GREEN}โœ… All Docker containers started!${NC}" -echo "" - -# Wait for PostgreSQL to be ready -echo -e "${YELLOW}โณ Waiting for PostgreSQL to initialize...${NC}" -sleep 5 - -# Check PostgreSQL connection -MAX_RETRIES=30 -RETRY_COUNT=0 -DB_READY=false - -while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do - if docker exec hummingbot-postgres pg_isready -U hbot -d hummingbot_api > /dev/null 2>&1; then - DB_READY=true - break - fi - RETRY_COUNT=$((RETRY_COUNT + 1)) - echo -ne "\r${YELLOW}โณ Waiting for database... ($RETRY_COUNT/$MAX_RETRIES)${NC}" - sleep 2 -done -echo "" - -if [ "$DB_READY" = true ]; then - echo -e "${GREEN}โœ… PostgreSQL is ready!${NC}" - - # Verify database and user exist - echo -e "${YELLOW}๐Ÿ” Verifying database configuration...${NC}" - - # Check if hbot user exists - USER_EXISTS=$(docker exec hummingbot-postgres psql -U hbot -tAc "SELECT 1 FROM pg_roles WHERE rolname='hbot'" 2>/dev/null) - - # Check if database exists - DB_EXISTS=$(docker exec hummingbot-postgres psql -U hbot -d postgres -tAc "SELECT 1 FROM pg_database WHERE datname='hummingbot_api'" 2>/dev/null) - - if [ "$USER_EXISTS" = "1" ] && [ "$DB_EXISTS" = "1" ]; then - echo -e "${GREEN}โœ… Database 'hummingbot_api' and user 'hbot' verified successfully!${NC}" - else - echo -e "${YELLOW}โš ๏ธ Database initialization may be incomplete. Running manual initialization...${NC}" - - # Run the init script manually (connect to postgres database as hbot user) - docker exec -i hummingbot-postgres psql -U hbot -d postgres < init-db.sql - - if [ $? -eq 0 ]; then - echo -e "${GREEN}โœ… Database manually initialized successfully!${NC}" - else - echo -e "${RED}โŒ Failed to initialize database. See troubleshooting below.${NC}" - fi - fi -else - echo -e "${RED}โŒ PostgreSQL failed to start within timeout period${NC}" - echo "" - echo -e "${YELLOW}Troubleshooting steps:${NC}" - echo "1. Check PostgreSQL logs: docker logs hummingbot-postgres" - echo "2. Verify container status: docker ps -a | grep postgres" - echo "3. Try removing old volumes: docker compose down -v && docker compose up emqx postgres -d" - echo "4. Manually verify database: docker exec -it hummingbot-postgres psql -U postgres" - echo "" -fi - -echo -e "${GREEN}โœ… Setup completed!${NC}" -echo "" - -# Display services information -echo -e "${BLUE}๐ŸŽ‰ Your Hummingbot API Platform is Running!${NC}" -echo "=========================================" echo "" -echo -e "${CYAN}Available Services:${NC}" -echo -e " ๐Ÿ”ง ${GREEN}API${NC} - http://localhost:8000" -echo -e " ๐Ÿ“š ${GREEN}API Docs${NC} - http://localhost:8000/docs (Swagger UI)" -echo -e " ๐Ÿ“ก ${GREEN}EMQX Broker${NC} - localhost:1883" -echo -e " ๐Ÿ’พ ${GREEN}PostgreSQL${NC} - localhost:5432" - -if [[ "$ENABLE_CONDOR" =~ ^[Yy]$ ]]; then - echo -e " ๐Ÿค– ${GREEN}Condor${NC} - Telegram bot (running in condor/)" -fi - -echo "" - -echo -e "${YELLOW}๐Ÿ“ Next Steps:${NC}" -echo "" -echo "1. ${CYAN}Access the API:${NC}" -echo " โ€ข Swagger UI: http://localhost:8000/docs (full REST API documentation)" - -echo "" -echo "2. ${CYAN}Connect an AI Assistant:${NC}" -echo "" -echo " ${GREEN}Claude Code (CLI) Setup:${NC}" -echo " Add the MCP server with one command:" -echo "" -echo -e " ${BLUE}claude mcp add --transport stdio hummingbot -- docker run --rm -i -e HUMMINGBOT_API_URL=http://host.docker.internal:8000 -v hummingbot_mcp:/root/.hummingbot_mcp hummingbot/hummingbot-mcp:latest${NC}" -echo "" -echo " Then use natural language in your terminal:" -echo ' - "Show me my portfolio balances"' -echo ' - "Create a market making strategy for ETH-USDT on Binance"' +echo ".env created successfully!" echo "" -echo " ${PURPLE}Other AI assistants:${NC} See CLAUDE.md, GEMINI.md, or AGENTS.md for setup" - -if [[ "$ENABLE_CONDOR" =~ ^[Yy]$ ]]; then - echo "" - echo "3. ${CYAN}Start Condor Telegram Bot:${NC}" - echo " โ€ข Run: docker run -d --name condor --env-file condor/.env -v \$(pwd)/condor/servers.yml:/app/servers.yml --add-host=host.docker.internal:host-gateway hummingbot/condor:latest" - echo " โ€ข Find your bot in Telegram and send /start" -fi - -echo "" -echo -e "${CYAN}Available Access Methods:${NC}" -echo " โœ… Swagger UI (http://localhost:8000/docs) - Full REST API" -echo " โœ… MCP - AI Assistant integration (Claude, ChatGPT, Gemini)" - -if [[ "$ENABLE_CONDOR" =~ ^[Yy]$ ]]; then - echo " โœ… Condor - Telegram bot for mobile trading" -else - echo " โšช Condor - Run setup.sh again to enable Telegram bot" -fi - -echo "" - -echo -e "${PURPLE}๐Ÿ’ก Tips:${NC}" -echo " โ€ข View logs: docker compose logs -f" -echo " โ€ข Stop services: docker compose down" -echo " โ€ข Restart services: docker compose restart" -echo "" - -echo -e "${GREEN}Ready to start trading! ๐Ÿค–๐Ÿ’ฐ${NC}" +echo "Next steps:" +echo " make deploy # Start all services" +echo " make run # Run API locally (dev mode)" echo ""