From 8b12cbf30ce33cff4484518b7499748780694081 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 18 Mar 2026 14:04:59 -0700 Subject: [PATCH 01/18] feat: update staging compose for demo/preview deployments Update docker-compose.staging.yml to serve as the standard config for staging, demo, and branch preview environments: - Remove local Postgres (DB is always external via DATABASE_IP) - Add RabbitMQ container for Celery task broker - Add NATS container (was present but commented out in depends_on) - Add restart:always to all services - Switch from .envs/.local/.postgres to .envs/.production/.postgres - Remove hardcoded container_name on NATS (allows multiple instances) - Remove awscli service (backups handled by TeamCity) - RabbitMQ credentials configured via .envs/.production/.django, not hardcoded in compose Add compose/staging/docker-compose.db.yml as an optional convenience for running a local PostgreSQL container when no external DB is available (e.g., ood environment, local testing). Co-Authored-By: Claude Opus 4.6 (1M context) --- compose/staging/docker-compose.db.yml | 37 ++++++++++++++ docker-compose.staging.yml | 74 ++++++++++++++------------- 2 files changed, 75 insertions(+), 36 deletions(-) create mode 100644 compose/staging/docker-compose.db.yml diff --git a/compose/staging/docker-compose.db.yml b/compose/staging/docker-compose.db.yml new file mode 100644 index 000000000..f4f1183f1 --- /dev/null +++ b/compose/staging/docker-compose.db.yml @@ -0,0 +1,37 @@ +# Optional local PostgreSQL for staging environments. +# +# Use this when you don't have an external database (e.g., for local testing +# or isolated branch previews). Creates a containerized PostgreSQL instance +# that the staging compose stack connects to via the Docker network. +# +# Usage: +# # Start the database first +# docker compose -f compose/staging/docker-compose.db.yml up -d +# +# # Then start the app (DATABASE_IP points to the host's Docker bridge) +# docker compose -f docker-compose.staging.yml --env-file .envs/.production/.compose up -d +# +# The app's .envs/.production/.postgres should use POSTGRES_HOST=db and the +# DATABASE_IP in .envs/.production/.compose should be set to the host IP +# of the machine running this database container (e.g., 172.17.0.1 for the +# default Docker bridge, or the host's LAN IP). +# +# For ood-style setups where the DB runs on the same host, set: +# DATABASE_IP=172.17.0.1 (or use host.docker.internal on Docker Desktop) + +volumes: + staging_postgres_data: {} + +services: + postgres: + build: + context: ../../ + dockerfile: ./compose/local/postgres/Dockerfile + volumes: + - staging_postgres_data:/var/lib/postgresql/data + - ../../data/db/snapshots:/backups + env_file: + - ../../.envs/.production/.postgres + ports: + - "5432:5432" + restart: always diff --git a/docker-compose.staging.yml b/docker-compose.staging.yml index 684e50e67..45b094c88 100644 --- a/docker-compose.staging.yml +++ b/docker-compose.staging.yml @@ -1,79 +1,81 @@ -# Identical to production.yml, but with the following differences: -# Uses the django production settings file, but staging .env file. -# Uses "local" database +# Staging / demo / branch preview deployment. # -# 1. The database is a service in the Docker Compose configuration rather than external as in production. -# 2. Redis is a service in the Docker Compose configuration rather than external as in production. -# 3. Port 5001 is exposed for the Django application. - -volumes: - ami_local_postgres_data: {} +# Like production, but runs Redis, RabbitMQ, and NATS as local containers +# instead of requiring external infrastructure services. +# Database is always external — set DATABASE_IP in .envs/.production/.compose. +# +# Usage: +# docker compose -f docker-compose.staging.yml --env-file .envs/.production/.compose up -d +# +# For a local database, see compose/staging/docker-compose.db.yml. +# +# Required env files: +# .envs/.production/.compose — DATABASE_IP +# .envs/.production/.django — Django settings, CELERY_BROKER_URL, NATS_URL, etc. +# .envs/.production/.postgres — POSTGRES_HOST=db, POSTGRES_DB, POSTGRES_USER, POSTGRES_PASSWORD services: django: &django build: context: . - # This is the most important setting to test the production configuration of Django. dockerfile: ./compose/production/django/Dockerfile - image: insectai/ami_backend depends_on: - - postgres - redis - # - nats + - rabbitmq + - nats env_file: - ./.envs/.production/.django - - ./.envs/.local/.postgres + - ./.envs/.production/.postgres volumes: - ./config:/app/config ports: - "5001:5000" + extra_hosts: + - "db:${DATABASE_IP}" command: /start restart: always - postgres: - build: - context: . - # There is not a local/staging version of the Postgres Dockerfile. - dockerfile: ./compose/local/postgres/Dockerfile - # Share the local Postgres image with the staging configuration. - # Production uses an external Postgres service. - volumes: - - ami_local_postgres_data:/var/lib/postgresql/data - - ./data/db/snapshots:/backups - env_file: - - ./.envs/.local/.postgres - restart: always - - redis: - image: redis:6 - restart: always - celeryworker: <<: *django scale: 1 ports: [] command: /start-celeryworker + restart: always celerybeat: <<: *django ports: [] command: /start-celerybeat + restart: always flower: <<: *django ports: - "5550:5555" command: /start-flower + restart: always + volumes: + - ./data/flower/:/data/ + + redis: + image: redis:6 + restart: always + + rabbitmq: + image: rabbitmq:3.13-management-alpine + hostname: rabbitmq + ports: + - "15672:15672" + restart: always nats: image: nats:2.10-alpine - container_name: ami_local_nats hostname: nats ports: - - "4222:4222" # Client port - - "8222:8222" # HTTP monitoring port - command: ["-js", "-m", "8222"] # Enable JetStream and monitoring + - "4222:4222" + - "8222:8222" + command: ["-js", "-m", "8222"] healthcheck: test: ["CMD", "wget", "--spider", "-q", "http://localhost:8222/healthz"] interval: 10s From b3d97716d06ac1ce16ce792c7dd2dbaf382c88e4 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 18 Mar 2026 14:50:00 -0700 Subject: [PATCH 02/18] =?UTF-8?q?fix:=20upgrade=20gunicorn=2020.1.0=20?= =?UTF-8?q?=E2=86=92=2023.0.0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gunicorn 20.x requires pkg_resources from setuptools, which was removed in setuptools 82+. Fresh Docker image builds fail with ModuleNotFoundError on startup. gunicorn 23 drops the pkg_resources dependency entirely. Closes #1180 Co-Authored-By: Claude Opus 4.6 (1M context) --- requirements/base.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/base.txt b/requirements/base.txt index 3b208e9df..037eeea17 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -98,5 +98,5 @@ pytest-django==4.5.2 # https://github.com/pytest-dev/pytest-django # ------------------------------------------------------------------------------ newrelic==9.6.0 -gunicorn==20.1.0 # https://github.com/benoitc/gunicorn +gunicorn==23.0.0 # https://github.com/benoitc/gunicorn # psycopg[c]==3.1.9 # https://github.com/psycopg/psycopg From 1bb26462c46e2af948d7e47db0a66a573b39c125 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 18 Mar 2026 15:09:30 -0700 Subject: [PATCH 03/18] fix: address PR review feedback - Add env_file to rabbitmq service so it picks up RABBITMQ_DEFAULT_USER/RABBITMQ_DEFAULT_PASS from .django env - Use ${DATABASE_IP:?} required-variable syntax for fail-fast on missing config - Bind local Postgres to 127.0.0.1 instead of 0.0.0.0 - Clarify DB compose comments: document host-bridge connectivity via DATABASE_IP, remove ambiguous "Docker network" wording Co-Authored-By: Claude Opus 4.6 (1M context) --- compose/staging/docker-compose.db.yml | 22 ++++++++++++---------- docker-compose.staging.yml | 4 +++- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/compose/staging/docker-compose.db.yml b/compose/staging/docker-compose.db.yml index f4f1183f1..7ba5abbb3 100644 --- a/compose/staging/docker-compose.db.yml +++ b/compose/staging/docker-compose.db.yml @@ -1,23 +1,25 @@ # Optional local PostgreSQL for staging environments. # # Use this when you don't have an external database (e.g., for local testing -# or isolated branch previews). Creates a containerized PostgreSQL instance -# that the staging compose stack connects to via the Docker network. +# or isolated branch previews). Publishes PostgreSQL on localhost:5432. # # Usage: # # Start the database first # docker compose -f compose/staging/docker-compose.db.yml up -d # -# # Then start the app (DATABASE_IP points to the host's Docker bridge) +# # Then start the app stack # docker compose -f docker-compose.staging.yml --env-file .envs/.production/.compose up -d # -# The app's .envs/.production/.postgres should use POSTGRES_HOST=db and the -# DATABASE_IP in .envs/.production/.compose should be set to the host IP -# of the machine running this database container (e.g., 172.17.0.1 for the -# default Docker bridge, or the host's LAN IP). +# The app connects to the database via extra_hosts (db → DATABASE_IP). +# Set DATABASE_IP to the Docker bridge gateway so the app container can +# reach the host-published port: # -# For ood-style setups where the DB runs on the same host, set: -# DATABASE_IP=172.17.0.1 (or use host.docker.internal on Docker Desktop) +# .envs/.production/.compose: +# DATABASE_IP=172.17.0.1 # Linux (default Docker bridge gateway) +# DATABASE_IP=host-gateway # Docker Desktop (macOS/Windows) +# +# .envs/.production/.postgres: +# POSTGRES_HOST=db # resolves via extra_hosts to DATABASE_IP volumes: staging_postgres_data: {} @@ -33,5 +35,5 @@ services: env_file: - ../../.envs/.production/.postgres ports: - - "5432:5432" + - "127.0.0.1:5432:5432" restart: always diff --git a/docker-compose.staging.yml b/docker-compose.staging.yml index 45b094c88..052f63d6d 100644 --- a/docker-compose.staging.yml +++ b/docker-compose.staging.yml @@ -32,7 +32,7 @@ services: ports: - "5001:5000" extra_hosts: - - "db:${DATABASE_IP}" + - "db:${DATABASE_IP:?Set DATABASE_IP in .envs/.production/.compose}" command: /start restart: always @@ -65,6 +65,8 @@ services: rabbitmq: image: rabbitmq:3.13-management-alpine hostname: rabbitmq + env_file: + - ./.envs/.production/.django ports: - "15672:15672" restart: always From d5e8f6f1c7128a89512427aa66aa34965984116a Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 18 Mar 2026 15:11:34 -0700 Subject: [PATCH 04/18] fix: remove host port bindings from internal services MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Internal services (Redis, RabbitMQ, NATS) don't need host port exposure — only the app containers talk to them via the Docker network. Removing host ports means multiple instances (branch previews, worktrees) never conflict on these ports. Django and Flower ports are now configurable via DJANGO_PORT and FLOWER_PORT env vars (default 5001 and 5550). Also use host-gateway (works on all platforms) instead of platform-specific Docker bridge IPs in DB compose docs. Co-Authored-By: Claude Opus 4.6 (1M context) --- compose/staging/docker-compose.db.yml | 3 +-- docker-compose.staging.yml | 21 ++++++++++++++------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/compose/staging/docker-compose.db.yml b/compose/staging/docker-compose.db.yml index 7ba5abbb3..1e94bcfc0 100644 --- a/compose/staging/docker-compose.db.yml +++ b/compose/staging/docker-compose.db.yml @@ -15,8 +15,7 @@ # reach the host-published port: # # .envs/.production/.compose: -# DATABASE_IP=172.17.0.1 # Linux (default Docker bridge gateway) -# DATABASE_IP=host-gateway # Docker Desktop (macOS/Windows) +# DATABASE_IP=host-gateway # Recommended (resolves to host on all platforms) # # .envs/.production/.postgres: # POSTGRES_HOST=db # resolves via extra_hosts to DATABASE_IP diff --git a/docker-compose.staging.yml b/docker-compose.staging.yml index 052f63d6d..ba4935498 100644 --- a/docker-compose.staging.yml +++ b/docker-compose.staging.yml @@ -9,6 +9,18 @@ # # For a local database, see compose/staging/docker-compose.db.yml. # +# Multiple instances: This compose file can run multiple instances on the same +# host (e.g., branch previews, worktrees) by setting a unique project name and +# overriding the published ports: +# +# DJANGO_PORT=5002 FLOWER_PORT=5551 \ +# docker compose -p my-preview -f docker-compose.staging.yml \ +# --env-file .envs/.production/.compose up -d +# +# Internal services (Redis, RabbitMQ, NATS) do not publish host ports, so they +# never conflict between instances. Each compose project gets its own isolated +# Docker network. +# # Required env files: # .envs/.production/.compose — DATABASE_IP # .envs/.production/.django — Django settings, CELERY_BROKER_URL, NATS_URL, etc. @@ -30,7 +42,7 @@ services: volumes: - ./config:/app/config ports: - - "5001:5000" + - "${DJANGO_PORT:-5001}:5000" extra_hosts: - "db:${DATABASE_IP:?Set DATABASE_IP in .envs/.production/.compose}" command: /start @@ -52,7 +64,7 @@ services: flower: <<: *django ports: - - "5550:5555" + - "${FLOWER_PORT:-5550}:5555" command: /start-flower restart: always volumes: @@ -67,16 +79,11 @@ services: hostname: rabbitmq env_file: - ./.envs/.production/.django - ports: - - "15672:15672" restart: always nats: image: nats:2.10-alpine hostname: nats - ports: - - "4222:4222" - - "8222:8222" command: ["-js", "-m", "8222"] healthcheck: test: ["CMD", "wget", "--spider", "-q", "http://localhost:8222/healthz"] From 7f75f9826c39ee1c4ce013db8623f4c77a541388 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 18 Mar 2026 15:13:11 -0700 Subject: [PATCH 05/18] docs: add staging deployment guide Setup instructions for single and multi-instance staging deployments, covering environment configuration, database options, migrations, sample data, and port management for running multiple instances on the same host. Co-Authored-By: Claude Opus 4.6 (1M context) --- compose/staging/README.md | 170 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 170 insertions(+) create mode 100644 compose/staging/README.md diff --git a/compose/staging/README.md b/compose/staging/README.md new file mode 100644 index 000000000..473639696 --- /dev/null +++ b/compose/staging/README.md @@ -0,0 +1,170 @@ +# Staging Deployment + +Deploy the Antenna platform with local Redis, RabbitMQ, and NATS containers. +The database is always external — either a dedicated server, a managed service, +or the optional local Postgres container included here. + +## Quick Start (single instance) + +### 1. Configure environment files + +Copy the examples and fill in the values: + +```bash +# Django settings +cp .envs/.production/.django-example .envs/.production/.django + +# Database credentials +cat > .envs/.production/.postgres << 'EOF' +POSTGRES_HOST=db +POSTGRES_PORT=5432 +POSTGRES_DB=antenna_staging +POSTGRES_USER=antenna +POSTGRES_PASSWORD= +EOF + +# Database host IP +cat > .envs/.production/.compose << 'EOF' +DATABASE_IP=host-gateway +EOF +``` + +Key settings to configure in `.envs/.production/.django`: + +| Variable | Example | Notes | +|---|---|---| +| `DJANGO_SECRET_KEY` | `` | Generate with `python -c "from django.core.management.utils import get_random_secret_key; print(get_random_secret_key())"` | +| `DJANGO_ALLOWED_HOSTS` | `*` or `api.staging.example.com` | | +| `REDIS_URL` | `redis://redis:6379/0` | Always use `redis` hostname (local container) | +| `CELERY_BROKER_URL` | `amqp://antenna:password@rabbitmq:5672/` | Always use `rabbitmq` hostname | +| `RABBITMQ_DEFAULT_USER` | `antenna` | Must match the user in `CELERY_BROKER_URL` | +| `RABBITMQ_DEFAULT_PASS` | `` | Must match the password in `CELERY_BROKER_URL` | +| `NATS_URL` | `nats://nats:4222` | Always use `nats` hostname | +| `CELERY_FLOWER_USER` | `flower` | Basic auth for the Flower web UI | +| `CELERY_FLOWER_PASSWORD` | `` | | +| `SENDGRID_API_KEY` | `placeholder` | Set a real key to enable email, or any non-empty string to skip | +| `DJANGO_AWS_STORAGE_BUCKET_NAME` | `my-bucket` | S3-compatible object storage for media/static files | +| `DJANGO_SUPERUSER_EMAIL` | `admin@example.com` | Used by `create_demo_project` command | +| `DJANGO_SUPERUSER_PASSWORD` | `` | Used by `create_demo_project` command | + +### 2. Start the database + +If you have an external database, set `DATABASE_IP` in `.envs/.production/.compose` +to its IP address and skip this step. + +For a local database container: + +```bash +docker compose -f compose/staging/docker-compose.db.yml up -d + +# Set DATABASE_IP to reach the host-published port from app containers +echo "DATABASE_IP=host-gateway" > .envs/.production/.compose +``` + +Verify the database is ready: + +```bash +docker compose -f compose/staging/docker-compose.db.yml logs +# Should show: "database system is ready to accept connections" +``` + +### 3. Build and start the app + +```bash +docker compose -f docker-compose.staging.yml \ + --env-file .envs/.production/.compose build django + +docker compose -f docker-compose.staging.yml \ + --env-file .envs/.production/.compose up -d +``` + +### 4. Run migrations and create an admin user + +```bash +# Shorthand for the compose command +COMPOSE="docker compose -f docker-compose.staging.yml --env-file .envs/.production/.compose" + +# Apply database migrations +$COMPOSE run --rm django python manage.py migrate + +# Create demo project with sample data and admin user +$COMPOSE run --rm django python manage.py create_demo_project + +# Or just create an admin user without sample data +$COMPOSE run --rm django python manage.py createsuperuser --noinput +``` + +### 5. Verify + +```bash +# API root +curl http://localhost:5001/api/v2/ + +# Django admin +# Open http://localhost:5001/admin/ in a browser + +# Flower (Celery monitoring) +# Open http://localhost:5550/ in a browser + +# NATS health (internal, but reachable via docker exec) +docker compose -f docker-compose.staging.yml \ + --env-file .envs/.production/.compose \ + exec nats wget -qO- http://localhost:8222/healthz +``` + +## Multiple Instances on the Same Host + +Internal services (Redis, RabbitMQ, NATS) don't publish host ports, so they +never conflict between instances. Each compose project gets its own isolated +Docker network. + +Only Django and Flower publish host ports. Override them with environment +variables and use a unique project name (`-p`): + +```bash +# Instance A (defaults: Django on 5001, Flower on 5550) +docker compose -p antenna-main \ + -f docker-compose.staging.yml \ + --env-file .envs/.production/.compose up -d + +# Instance B (custom ports) +DJANGO_PORT=5002 FLOWER_PORT=5551 \ + docker compose -p antenna-feature-xyz \ + -f docker-compose.staging.yml \ + --env-file path/to/other/.compose up -d +``` + +Each instance needs its own: +- `.envs/.production/.compose` (can share `DATABASE_IP` if using the same DB server) +- `.envs/.production/.postgres` (use a different `POSTGRES_DB` per instance) +- `.envs/.production/.django` (can share most settings, but use unique `DJANGO_SECRET_KEY`) + +If using the local database container, each instance needs its own DB container +too (or share one by creating multiple databases in it). + +## Stopping and Cleaning Up + +```bash +# Stop the app stack +docker compose -f docker-compose.staging.yml \ + --env-file .envs/.production/.compose down + +# Stop the local database (data is preserved in a Docker volume) +docker compose -f compose/staging/docker-compose.db.yml down + +# Remove everything including database data +docker compose -f compose/staging/docker-compose.db.yml down -v +``` + +## Database Options + +The staging compose supports any PostgreSQL database reachable by IP: + +| Option | `DATABASE_IP` | Notes | +|---|---|---| +| Local container | `host-gateway` | Use `compose/staging/docker-compose.db.yml` | +| Dedicated VM | `` | Best performance for shared environments | +| Managed service | `` | Cloud-hosted PostgreSQL | + +Set `POSTGRES_HOST=db` in `.envs/.production/.postgres` — the `extra_hosts` +directive in the compose file maps `db` to whatever `DATABASE_IP` resolves to. From 6b699b7198321b0a68123aa049c70803b1972f3f Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Mon, 23 Mar 2026 21:16:36 -0700 Subject: [PATCH 06/18] fix: increase DATA_UPLOAD_MAX_MEMORY_SIZE for ML worker results ML workers post classification results with up to 29K categories per image, easily exceeding Django's default 2.5MB request body limit. This caused 413 errors on the demo environment. Co-Authored-By: Claude Opus 4.6 (1M context) --- config/settings/base.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/config/settings/base.py b/config/settings/base.py index dad65ce21..e498515fa 100644 --- a/config/settings/base.py +++ b/config/settings/base.py @@ -386,6 +386,9 @@ CELERY_BROKER_CONNECTION_MAX_RETRIES = None # Retry forever +# Allow large request bodies from ML workers posting classification results +DATA_UPLOAD_MAX_MEMORY_SIZE = 100 * 1024 * 1024 # 100MB (default 2.5MB) + # django-rest-framework # ------------------------------------------------------------------------------- # django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/ From 91a7f49c907db36c155ac4442402cd97c887a184 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 25 Mar 2026 14:46:16 -0700 Subject: [PATCH 07/18] fix(celery): use Redis DB 1 for result backend, separate from cache The default CELERY_RESULT_BACKEND was "rpc://" which uses RabbitMQ for results. This caused channel exhaustion (65,535 limit), connection resets, and worker crashes on the demo environment. Changes: - Derive CELERY_RESULT_BACKEND from REDIS_URL using DB 1 instead of the cache DB 0. This keeps cache and task results isolated so they can be flushed and monitored independently. - Add maxmemory config to staging Redis (8gb, allkeys-lru) - Falls back to rpc:// only if no REDIS_URL is configured - Env var CELERY_RESULT_BACKEND still overrides if explicitly set Redis DB layout: DB 0: Django cache (disposable, allkeys-lru eviction) DB 1: Celery task result metadata (TTL-based via CELERY_RESULT_EXPIRES) Relates to #1189 Co-Authored-By: Claude Opus 4.6 (1M context) --- config/settings/base.py | 22 ++++++++++++++++++++-- docker-compose.staging.yml | 6 ++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/config/settings/base.py b/config/settings/base.py index e498515fa..54a9ac007 100644 --- a/config/settings/base.py +++ b/config/settings/base.py @@ -2,6 +2,7 @@ Base settings to build other settings files upon. """ +import re import socket from pathlib import Path @@ -263,6 +264,21 @@ } REDIS_URL = env("REDIS_URL", default=None) + +# Derive a separate Redis DB for Celery results (DB 1) from REDIS_URL (DB 0). +# This keeps Django cache (DB 0) and Celery task metadata (DB 1) isolated so they +# can be flushed and monitored independently. +# TODO: consider separate Redis instances with different eviction policies: +# allkeys-lru for cache, volatile-ttl for results. See issue #1189. +def _celery_result_backend_url(redis_url): + if not redis_url: + return None + # Replace the DB number at the end of the URL (e.g. /0 -> /1) + return re.sub(r"/\d+$", "/1", redis_url) if "/" in redis_url.split(":")[-1] else redis_url + "/1" + + +CELERY_RESULT_BACKEND_URL = env("CELERY_RESULT_BACKEND", default=None) or _celery_result_backend_url(REDIS_URL) + # NATS # ------------------------------------------------------------------------------ NATS_URL = env("NATS_URL", default="nats://localhost:4222") # type: ignore[no-untyped-call] @@ -310,8 +326,10 @@ # https://docs.celeryq.dev/en/stable/userguide/configuration.html#std:setting-broker_url CELERY_BROKER_URL = env("CELERY_BROKER_URL") # https://docs.celeryq.dev/en/stable/userguide/configuration.html#std:setting-result_backend -# "rpc://" means use RabbitMQ for results backend by default -CELERY_RESULT_BACKEND = env("CELERY_RESULT_BACKEND", default="rpc://") # type: ignore[no-untyped-call] +# Use Redis DB 1 for results (separate from cache on DB 0). +# Falls back to CELERY_RESULT_BACKEND env var if explicitly set, otherwise derives from REDIS_URL. +# See issue #1189 for discussion of result backend architecture. +CELERY_RESULT_BACKEND = CELERY_RESULT_BACKEND_URL or "rpc://" # https://docs.celeryq.dev/en/stable/userguide/configuration.html#result-extended CELERY_RESULT_EXTENDED = True # https://docs.celeryq.dev/en/stable/userguide/configuration.html#result-backend-always-retry diff --git a/docker-compose.staging.yml b/docker-compose.staging.yml index ba4935498..820939d38 100644 --- a/docker-compose.staging.yml +++ b/docker-compose.staging.yml @@ -70,8 +70,14 @@ services: volumes: - ./data/flower/:/data/ + # Redis DB layout: + # DB 0: Django cache (disposable, allkeys-lru eviction) + # DB 1: Celery task result metadata (TTL-based, CELERY_RESULT_EXPIRES) + # TODO: consider separate instances with different eviction policies: + # allkeys-lru for cache, volatile-ttl for results. See issue #1189. redis: image: redis:6 + command: redis-server --maxmemory 8gb --maxmemory-policy allkeys-lru restart: always rabbitmq: From c845143e45f8cb05b5586f52880ea9495f6c35ab Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 25 Mar 2026 15:19:34 -0700 Subject: [PATCH 08/18] docs(celery): add comments explaining CELERY_RESULT_EXTENDED impact Document what CELERY_RESULT_EXTENDED does, why it's expensive (~19KB per task vs ~200B), and note that bulk tasks like process_nats_pipeline_result could use ignore_result=True to avoid storing large ML result JSON in the result backend. Relates to #1189 Co-Authored-By: Claude Opus 4.6 (1M context) --- config/settings/base.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/config/settings/base.py b/config/settings/base.py index 54a9ac007..2d1464d11 100644 --- a/config/settings/base.py +++ b/config/settings/base.py @@ -331,6 +331,14 @@ def _celery_result_backend_url(redis_url): # See issue #1189 for discussion of result backend architecture. CELERY_RESULT_BACKEND = CELERY_RESULT_BACKEND_URL or "rpc://" # https://docs.celeryq.dev/en/stable/userguide/configuration.html#result-extended +# Stores full task args/kwargs/name in the result backend alongside status. +# Useful for: inspecting task arguments in Flower, debugging failed tasks, +# post-hoc analysis of what data a task received. +# Cost: ~19KB per result key (vs ~200B without) because process_nats_pipeline_result +# receives the full ML result JSON as args. With thousands of tasks per job this +# adds significant memory pressure on the result backend. +# TODO: consider disabling this or setting ignore_result=True on bulk tasks +# like process_nats_pipeline_result to reduce result backend load. See #1189. CELERY_RESULT_EXTENDED = True # https://docs.celeryq.dev/en/stable/userguide/configuration.html#result-backend-always-retry # https://github.com/celery/celery/pull/6122 From 14c26f51ad89c5a7303dd809db09c6a34b5a7c09 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Thu, 26 Mar 2026 11:39:55 -0700 Subject: [PATCH 09/18] fix(redis): add redis.conf, disable bgsave, add CELERY_RESULT_EXPIRES MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Move Redis config to compose/staging/redis.conf for clarity - Disable RDB persistence (save "") — bgsave of large datasets saturates disk I/O on small volumes, hanging NATS and other services - Add CELERY_RESULT_EXPIRES=3600 default in base.py to auto-expire task results after 1 hour, preventing unbounded Redis memory growth - Keep maxmemory 8gb and allkeys-lru eviction policy Co-Authored-By: Claude Opus 4.6 (1M context) --- compose/staging/redis.conf | 38 ++++++++++++++++++++++++++++++++++++++ config/settings/base.py | 12 +++++++++--- docker-compose.staging.yml | 9 +++------ 3 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 compose/staging/redis.conf diff --git a/compose/staging/redis.conf b/compose/staging/redis.conf new file mode 100644 index 000000000..84cad1343 --- /dev/null +++ b/compose/staging/redis.conf @@ -0,0 +1,38 @@ +# Redis configuration for staging/demo environments +# +# This is a minimal config for Redis running as a Docker container alongside +# the app. A production deployment would typically use a separate Redis server +# with its own config tuned for the available resources. +# +# Redis DB layout (configured in Django settings, not here): +# DB 0: Django cache (disposable, can be flushed anytime) +# DB 1: Celery task result metadata (auto-expires via CELERY_RESULT_EXPIRES) +# +# Celery result key sizes vary widely depending on the task. ML pipeline result +# tasks (process_nats_pipeline_result) store full detection/classification JSON +# when CELERY_RESULT_EXTENDED=True. Measured on a demo instance (2026-03-26): +# Median: 5 KB, Avg: 191 KB, Max: 2.1 MB per key +# A job processing ~2,500 images can produce ~480 MB of result keys +# +# The role of Redis in this stack is still being evaluated — it may be reduced +# to cache-only or removed for Celery entirely. See issue #1189. + +# Memory limit. Adjust based on available RAM. A production server with more +# memory might use a higher limit. +maxmemory 8gb + +# Eviction policy. allkeys-lru evicts the least-recently-used key from any DB +# when maxmemory is reached. This works when all data is regenerable (cache) +# or has TTLs (results). If mixing persistent and ephemeral data, consider +# volatile-ttl or separate Redis instances per concern. +maxmemory-policy allkeys-lru + +# Disable RDB persistence. Staging/demo data is disposable and bgsave of large +# datasets can saturate disk I/O on small volumes. A production deployment +# with adequate disk should consider enabling RDB snapshots for durability. +save "" + +# Network timeouts. Tune based on network conditions — longer keepalive +# intervals may be needed for connections over unreliable networks. +tcp-keepalive 60 +timeout 120 diff --git a/config/settings/base.py b/config/settings/base.py index 2d1464d11..3717bb216 100644 --- a/config/settings/base.py +++ b/config/settings/base.py @@ -334,9 +334,11 @@ def _celery_result_backend_url(redis_url): # Stores full task args/kwargs/name in the result backend alongside status. # Useful for: inspecting task arguments in Flower, debugging failed tasks, # post-hoc analysis of what data a task received. -# Cost: ~19KB per result key (vs ~200B without) because process_nats_pipeline_result -# receives the full ML result JSON as args. With thousands of tasks per job this -# adds significant memory pressure on the result backend. +# Cost: result keys are large because process_nats_pipeline_result receives the +# full ML result JSON as args. Measured on demo (298 keys, 2026-03-26): +# Median: 5 KB, Avg: 191 KB, Max: 2.1 MB per key +# Distribution: 29 <1KB, 195 1-10KB, 52 100KB-1MB, 22 >1MB +# With thousands of tasks per job, this adds significant memory pressure. # TODO: consider disabling this or setting ignore_result=True on bulk tasks # like process_nats_pipeline_result to reduce result backend load. See #1189. CELERY_RESULT_EXTENDED = True @@ -345,6 +347,10 @@ def _celery_result_backend_url(redis_url): CELERY_RESULT_BACKEND_ALWAYS_RETRY = True # https://docs.celeryq.dev/en/stable/userguide/configuration.html#result-backend-max-retries CELERY_RESULT_BACKEND_MAX_RETRIES = 10 +# https://docs.celeryq.dev/en/stable/userguide/configuration.html#std:setting-result_expires +# Auto-expire task results after 72 hours. Keeps results available for inspection +# and troubleshooting while preventing unbounded growth. Override via env var (seconds). +CELERY_RESULT_EXPIRES = int(env("CELERY_RESULT_EXPIRES", default="259200")) # type: ignore[no-untyped-call] # https://docs.celeryq.dev/en/stable/userguide/configuration.html#std:setting-accept_content CELERY_ACCEPT_CONTENT = ["json"] # https://docs.celeryq.dev/en/stable/userguide/configuration.html#std:setting-task_serializer diff --git a/docker-compose.staging.yml b/docker-compose.staging.yml index 820939d38..66ae6c14b 100644 --- a/docker-compose.staging.yml +++ b/docker-compose.staging.yml @@ -70,14 +70,11 @@ services: volumes: - ./data/flower/:/data/ - # Redis DB layout: - # DB 0: Django cache (disposable, allkeys-lru eviction) - # DB 1: Celery task result metadata (TTL-based, CELERY_RESULT_EXPIRES) - # TODO: consider separate instances with different eviction policies: - # allkeys-lru for cache, volatile-ttl for results. See issue #1189. redis: image: redis:6 - command: redis-server --maxmemory 8gb --maxmemory-policy allkeys-lru + command: redis-server /usr/local/etc/redis/redis.conf + volumes: + - ./compose/staging/redis.conf:/usr/local/etc/redis/redis.conf:ro restart: always rabbitmq: From f12610fbf8ce925dd84994ad90b84e1606054ca3 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 1 Apr 2026 21:19:42 +0000 Subject: [PATCH 10/18] chore(staging): add .compose-example and deploy.sh script Add .envs/.production/.compose-example documenting required DATABASE_IP variable. Add compose/staging/deploy.sh as the canonical deploy script (fetch, build, migrate). Co-Authored-By: Claude Opus 4.6 (1M context) --- .envs/.production/.compose-example | 4 ++++ compose/staging/deploy.sh | 16 ++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 .envs/.production/.compose-example create mode 100755 compose/staging/deploy.sh diff --git a/.envs/.production/.compose-example b/.envs/.production/.compose-example new file mode 100644 index 000000000..0baee09b6 --- /dev/null +++ b/.envs/.production/.compose-example @@ -0,0 +1,4 @@ +# IP address of the external PostgreSQL server. +# Used by docker-compose.staging.yml to map the "db" hostname. +# Set to "host-gateway" if using the local DB container (docker-compose.db.yml). +DATABASE_IP= diff --git a/compose/staging/deploy.sh b/compose/staging/deploy.sh new file mode 100755 index 000000000..adc276b0a --- /dev/null +++ b/compose/staging/deploy.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Deploy the staging stack: fetch latest code, rebuild, migrate. +# Usage: ./deploy.sh + +set -o errexit +set -o xtrace + +cd "$(dirname "$0")/../.." + +git fetch origin + +docker compose -f docker-compose.staging.yml \ + --env-file .envs/.production/.compose up -d --build + +docker compose -f docker-compose.staging.yml \ + --env-file .envs/.production/.compose run --rm django python manage.py migrate From fea3af2afa9279ad945eaec0021d6e4551a2958f Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 1 Apr 2026 21:22:53 +0000 Subject: [PATCH 11/18] fix(settings): use urllib.parse for Redis DB URL rewriting The regex-based approach failed for URLs with query strings or trailing slashes. Use urlparse/urlunparse to properly handle the path component. Also clarifies the Redis DB numbering convention in comments. Co-Authored-By: Claude Opus 4.6 (1M context) --- config/settings/base.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/config/settings/base.py b/config/settings/base.py index 3717bb216..2a9d71ad4 100644 --- a/config/settings/base.py +++ b/config/settings/base.py @@ -4,6 +4,7 @@ import re import socket +from urllib.parse import urlparse, urlunparse from pathlib import Path import django_stubs_ext @@ -265,16 +266,24 @@ REDIS_URL = env("REDIS_URL", default=None) -# Derive a separate Redis DB for Celery results (DB 1) from REDIS_URL (DB 0). -# This keeps Django cache (DB 0) and Celery task metadata (DB 1) isolated so they -# can be flushed and monitored independently. +# Redis DB numbering convention: +# DB 0 = Django cache (REDIS_URL, used by django-redis CACHES above) +# DB 1 = Celery result backend (derived automatically below) +# Separating DBs lets us flush cache without losing pending task results, +# and monitor each independently. The function below rewrites the path +# component of REDIS_URL to point at DB 1. # TODO: consider separate Redis instances with different eviction policies: # allkeys-lru for cache, volatile-ttl for results. See issue #1189. def _celery_result_backend_url(redis_url): if not redis_url: return None - # Replace the DB number at the end of the URL (e.g. /0 -> /1) - return re.sub(r"/\d+$", "/1", redis_url) if "/" in redis_url.split(":")[-1] else redis_url + "/1" + parsed = urlparse(redis_url) + parts = [s for s in parsed.path.split("/") if s] + if parts and parts[-1].isdigit(): + parts[-1] = "1" + else: + parts.append("1") + return urlunparse(parsed._replace(path="/" + "/".join(parts))) CELERY_RESULT_BACKEND_URL = env("CELERY_RESULT_BACKEND", default=None) or _celery_result_backend_url(REDIS_URL) From 87b8c12c9397edaddf05237e9c98715f6b94d8f0 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 1 Apr 2026 22:08:09 +0000 Subject: [PATCH 12/18] docs(staging): add reverse proxy section with nginx example Document client_max_body_size 100M requirement for ML worker payloads, proxy_read_timeout for long API operations, and example nginx config for SSL termination. Also fix deploy.sh symlink resolution. Co-Authored-By: Claude Opus 4.6 (1M context) --- compose/staging/README.md | 55 +++++++++++++++++++++++++++++++++++++++ compose/staging/deploy.sh | 3 ++- 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/compose/staging/README.md b/compose/staging/README.md index 473639696..9333722e0 100644 --- a/compose/staging/README.md +++ b/compose/staging/README.md @@ -168,3 +168,58 @@ The staging compose supports any PostgreSQL database reachable by IP: Set `POSTGRES_HOST=db` in `.envs/.production/.postgres` — the `extra_hosts` directive in the compose file maps `db` to whatever `DATABASE_IP` resolves to. + +## Reverse Proxy + +The staging compose exposes Django on port 5001 (configurable via `DJANGO_PORT`) +and Flower on port 5550 (`FLOWER_PORT`). For production-like deployments, put a +reverse proxy in front to handle SSL termination and domain routing. + +### Example nginx config + +```nginx +server { + listen 443 ssl; + server_name api.staging.example.com; + + ssl_certificate /etc/letsencrypt/live/staging.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/staging.example.com/privkey.pem; + + # ML workers POST large result payloads (detections + classifications + # for hundreds of images per batch). 10M is too small and causes 413. + client_max_body_size 100M; + + # Long-running requests (ML job submission, large exports) + proxy_read_timeout 1200; + + location / { + proxy_pass http://127.0.0.1:5001; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} + +server { + listen 443 ssl; + server_name celery.staging.example.com; + + ssl_certificate /etc/letsencrypt/live/staging.example.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/staging.example.com/privkey.pem; + + location / { + proxy_pass http://127.0.0.1:5550; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} +``` + +Key settings: +- **`client_max_body_size 100M`** — required for ML worker result payloads. Without this, workers get 413 errors when posting detection/classification results. +- **`proxy_read_timeout 1200`** — some API operations (job submission, exports) take longer than the default 60s. +- Set `DJANGO_ALLOWED_HOSTS` in `.envs/.production/.django` to include your domain. +- Set `DJANGO_SECURE_SSL_REDIRECT=True` if all traffic goes through SSL. diff --git a/compose/staging/deploy.sh b/compose/staging/deploy.sh index adc276b0a..14d51b6f6 100755 --- a/compose/staging/deploy.sh +++ b/compose/staging/deploy.sh @@ -5,7 +5,8 @@ set -o errexit set -o xtrace -cd "$(dirname "$0")/../.." +SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "$0")")" && pwd)" +cd "$SCRIPT_DIR/../.." git fetch origin From 1f2db69b104f7139eefd3b7218a0a32ad0be4328 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 1 Apr 2026 22:15:42 +0000 Subject: [PATCH 13/18] fix(staging): deploy.sh should pull, not just fetch git fetch updates remote refs but does not update the working tree, so the Docker build was using stale code. Use git pull --ff-only to actually update the checked-out branch. Co-Authored-By: Claude Opus 4.6 (1M context) --- compose/staging/deploy.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/compose/staging/deploy.sh b/compose/staging/deploy.sh index 14d51b6f6..4d40356ca 100755 --- a/compose/staging/deploy.sh +++ b/compose/staging/deploy.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Deploy the staging stack: fetch latest code, rebuild, migrate. +# Deploy the staging stack: pull latest code, rebuild, migrate. # Usage: ./deploy.sh set -o errexit @@ -8,7 +8,7 @@ set -o xtrace SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "$0")")" && pwd)" cd "$SCRIPT_DIR/../.." -git fetch origin +git pull --ff-only docker compose -f docker-compose.staging.yml \ --env-file .envs/.production/.compose up -d --build From 47e9fdda1cbd5e309528c96ad3e8f32cb2a205c5 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 1 Apr 2026 22:17:30 +0000 Subject: [PATCH 14/18] fix(staging): add branch/host echo to deploy.sh before deploying Co-Authored-By: Claude Opus 4.6 (1M context) --- compose/staging/deploy.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/compose/staging/deploy.sh b/compose/staging/deploy.sh index 4d40356ca..765203ebe 100755 --- a/compose/staging/deploy.sh +++ b/compose/staging/deploy.sh @@ -8,6 +8,9 @@ set -o xtrace SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "$0")")" && pwd)" cd "$SCRIPT_DIR/../.." +echo "Deploying branch: $(git branch --show-current) on $(hostname)" +sleep 2 + git pull --ff-only docker compose -f docker-compose.staging.yml \ From 095c6faa85d52046e9d6e4c81dfd570636901735 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 1 Apr 2026 22:20:41 +0000 Subject: [PATCH 15/18] docs(staging): clarify staging vs production, explain .envs/.production convention Staging means single-box deployment (demo, preview, testing), not a pre-production environment. The .envs/.production/ directory is a cookiecutter-django convention for non-local-dev config. Co-Authored-By: Claude Opus 4.6 (1M context) --- compose/staging/README.md | 11 ++++++++++- compose/staging/deploy.sh | 14 ++++++++++---- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/compose/staging/README.md b/compose/staging/README.md index 9333722e0..7b9a89ab8 100644 --- a/compose/staging/README.md +++ b/compose/staging/README.md @@ -1,9 +1,18 @@ # Staging Deployment -Deploy the Antenna platform with local Redis, RabbitMQ, and NATS containers. +Single-box deployment of the Antenna platform — all services (Django, Celery, +Redis, RabbitMQ, NATS) run on one host. Suitable for demos, previews, and +testing. Not the same as a production setup, which distributes services across +multiple servers. + The database is always external — either a dedicated server, a managed service, or the optional local Postgres container included here. +> **Note on `.envs/.production/`**: The `.production` directory name is a +> [cookiecutter-django](https://github.com/cookiecutter/cookiecutter-django) +> convention meaning "not local dev." Both staging and production deployments +> use these env files for real secrets and external service configuration. + ## Quick Start (single instance) ### 1. Configure environment files diff --git a/compose/staging/deploy.sh b/compose/staging/deploy.sh index 765203ebe..b6f87d4d5 100755 --- a/compose/staging/deploy.sh +++ b/compose/staging/deploy.sh @@ -1,5 +1,10 @@ #!/bin/bash # Deploy the staging stack: pull latest code, rebuild, migrate. +# +# "staging" here means a single-box deployment (all services on one host), +# as opposed to production which splits services across multiple servers. +# Used for demo instances, previews, and testing. +# # Usage: ./deploy.sh set -o errexit @@ -13,8 +18,9 @@ sleep 2 git pull --ff-only -docker compose -f docker-compose.staging.yml \ - --env-file .envs/.production/.compose up -d --build +# .envs/.production/ is a cookiecutter-django convention meaning "not local dev" — +# both staging and production deployments use it for real secrets and external services. +COMPOSE="docker compose -f docker-compose.staging.yml --env-file .envs/.production/.compose" -docker compose -f docker-compose.staging.yml \ - --env-file .envs/.production/.compose run --rm django python manage.py migrate +$COMPOSE up -d --build +$COMPOSE run --rm django python manage.py migrate From 9a3b020b986e31fbd788b6c50d444a66fc5dc16b Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 1 Apr 2026 22:24:29 +0000 Subject: [PATCH 16/18] docs(staging): note potential rename to demo in future release Co-Authored-By: Claude Opus 4.6 (1M context) --- compose/staging/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/compose/staging/README.md b/compose/staging/README.md index 7b9a89ab8..857bff89a 100644 --- a/compose/staging/README.md +++ b/compose/staging/README.md @@ -13,6 +13,11 @@ or the optional local Postgres container included here. > convention meaning "not local dev." Both staging and production deployments > use these env files for real secrets and external service configuration. +> **Future rename**: This compose config may be renamed from "staging" to "demo" +> in a future release, since it describes a single-box deployment rather than a +> true staging/pre-production environment. File paths and compose filenames may +> change accordingly. + ## Quick Start (single instance) ### 1. Configure environment files From 076258f993550f8faa5aa1ca69c569abcc2abfb5 Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 1 Apr 2026 22:25:12 +0000 Subject: [PATCH 17/18] fix: sort stdlib imports in base.py (isort) Co-Authored-By: Claude Opus 4.6 (1M context) --- config/settings/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/settings/base.py b/config/settings/base.py index 2a9d71ad4..58f320905 100644 --- a/config/settings/base.py +++ b/config/settings/base.py @@ -4,8 +4,8 @@ import re import socket -from urllib.parse import urlparse, urlunparse from pathlib import Path +from urllib.parse import urlparse, urlunparse import django_stubs_ext import environ From d0a8f1d989a1fbbe7a65a88d6e0282e9524b4b3a Mon Sep 17 00:00:00 2001 From: Michael Bunsen Date: Wed, 1 Apr 2026 15:34:13 -0700 Subject: [PATCH 18/18] fix: unused import --- config/settings/base.py | 1 - 1 file changed, 1 deletion(-) diff --git a/config/settings/base.py b/config/settings/base.py index 58f320905..c3a8750dc 100644 --- a/config/settings/base.py +++ b/config/settings/base.py @@ -2,7 +2,6 @@ Base settings to build other settings files upon. """ -import re import socket from pathlib import Path from urllib.parse import urlparse, urlunparse