services: db: image: pgvector/pgvector:pg17 volumes: - postgres_data:/var/lib/postgresql/data/ # Note: Port not exposed in production for security # Access via internal network only environment: - POSTGRES_USER=${POSTGRES_USER} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - POSTGRES_DB=${POSTGRES_DB} healthcheck: test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER}"] interval: 5s timeout: 5s retries: 5 networks: - app-network restart: unless-stopped redis: image: redis:7-alpine volumes: - redis_data:/data command: redis-server --appendonly yes healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 5s timeout: 5s retries: 5 networks: - app-network restart: unless-stopped # ========================================================================== # MCP Servers - Model Context Protocol servers for AI agent capabilities # ========================================================================== mcp-llm-gateway: build: context: ./mcp-servers/llm-gateway dockerfile: Dockerfile env_file: - .env environment: - LLM_GATEWAY_HOST=0.0.0.0 - LLM_GATEWAY_PORT=8001 - REDIS_URL=redis://redis:6379/1 - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY} - ENVIRONMENT=production depends_on: redis: condition: service_healthy healthcheck: test: ["CMD", "python", "-c", "import httpx; httpx.get('http://localhost:8001/health').raise_for_status()"] interval: 30s timeout: 10s retries: 3 start_period: 10s networks: - app-network restart: unless-stopped deploy: resources: limits: cpus: '2.0' memory: 2G reservations: cpus: '0.5' memory: 512M mcp-knowledge-base: build: context: ./mcp-servers/knowledge-base dockerfile: Dockerfile env_file: - .env environment: # KB_ prefix required by pydantic-settings config - KB_HOST=0.0.0.0 - KB_PORT=8002 - KB_DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} - KB_REDIS_URL=redis://redis:6379/2 - KB_LLM_GATEWAY_URL=http://mcp-llm-gateway:8001 - OPENAI_API_KEY=${OPENAI_API_KEY} - ENVIRONMENT=production depends_on: db: condition: service_healthy redis: condition: service_healthy healthcheck: test: ["CMD", "python", "-c", "import httpx; httpx.get('http://localhost:8002/health').raise_for_status()"] interval: 30s timeout: 10s retries: 3 start_period: 10s networks: - app-network restart: unless-stopped deploy: resources: limits: cpus: '1.0' memory: 1G reservations: cpus: '0.25' memory: 256M backend: build: context: ./backend dockerfile: Dockerfile target: production ports: - "8000:8000" env_file: - .env environment: - DATABASE_URL=${DATABASE_URL} - SECRET_KEY=${SECRET_KEY} - ENVIRONMENT=production - DEBUG=false - BACKEND_CORS_ORIGINS=${BACKEND_CORS_ORIGINS} - REDIS_URL=redis://redis:6379/0 # MCP Server URLs - LLM_GATEWAY_URL=http://mcp-llm-gateway:8001 - KNOWLEDGE_BASE_URL=http://mcp-knowledge-base:8002 depends_on: db: condition: service_healthy redis: condition: service_healthy mcp-llm-gateway: condition: service_healthy mcp-knowledge-base: condition: service_healthy networks: - app-network restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 30s timeout: 10s retries: 3 start_period: 40s # Celery workers for background task processing (per ADR-003) celery-agent: build: context: ./backend dockerfile: Dockerfile target: production env_file: - .env environment: - DATABASE_URL=${DATABASE_URL} - REDIS_URL=redis://redis:6379/0 - CELERY_QUEUE=agent # MCP Server URLs (agents need access to MCP) - LLM_GATEWAY_URL=http://mcp-llm-gateway:8001 - KNOWLEDGE_BASE_URL=http://mcp-knowledge-base:8002 depends_on: db: condition: service_healthy redis: condition: service_healthy mcp-llm-gateway: condition: service_healthy mcp-knowledge-base: condition: service_healthy networks: - app-network restart: unless-stopped command: ["celery", "-A", "app.celery_app", "worker", "-Q", "agent", "-l", "info", "-c", "4"] deploy: resources: limits: cpus: '2.0' memory: 4G reservations: cpus: '0.5' memory: 512M celery-git: build: context: ./backend dockerfile: Dockerfile target: production env_file: - .env environment: - DATABASE_URL=${DATABASE_URL} - REDIS_URL=redis://redis:6379/0 - CELERY_QUEUE=git depends_on: db: condition: service_healthy redis: condition: service_healthy networks: - app-network restart: unless-stopped command: ["celery", "-A", "app.celery_app", "worker", "-Q", "git", "-l", "info", "-c", "2"] deploy: resources: limits: cpus: '1.0' memory: 2G reservations: cpus: '0.25' memory: 256M celery-sync: build: context: ./backend dockerfile: Dockerfile target: production env_file: - .env environment: - DATABASE_URL=${DATABASE_URL} - REDIS_URL=redis://redis:6379/0 - CELERY_QUEUE=sync depends_on: db: condition: service_healthy redis: condition: service_healthy networks: - app-network restart: unless-stopped command: ["celery", "-A", "app.celery_app", "worker", "-Q", "sync", "-l", "info", "-c", "2"] deploy: resources: limits: cpus: '1.0' memory: 2G reservations: cpus: '0.25' memory: 256M celery-beat: build: context: ./backend dockerfile: Dockerfile target: production env_file: - .env environment: - DATABASE_URL=${DATABASE_URL} - REDIS_URL=redis://redis:6379/0 depends_on: db: condition: service_healthy redis: condition: service_healthy networks: - app-network restart: unless-stopped command: ["celery", "-A", "app.celery_app", "beat", "-l", "info"] deploy: resources: limits: cpus: '0.5' memory: 512M reservations: cpus: '0.1' memory: 128M frontend: build: context: ./frontend dockerfile: Dockerfile target: runner args: - NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL} ports: - "3000:3000" environment: - NODE_ENV=production - NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL} depends_on: backend: condition: service_healthy networks: - app-network restart: unless-stopped volumes: postgres_data: redis_data: networks: app-network: driver: bridge