# Docker Compose configuration for DEPLOYMENT with pre-built images # # IMPORTANT: This configuration is designed for deployment scenarios where you have # already built and pushed your Docker images to a container registry. # # Since this is a template project, you'll need to: # 1. Build your images: docker-compose build # 2. Tag them appropriately: docker tag /: # 3. Push to your registry: docker push /: # 4. Update the image references below to point to your registry # # Example registry paths: # - Docker Hub: username/project-backend:latest # - GitHub Container Registry: ghcr.io/username/project-backend:latest # - GitLab Registry: registry.gitlab.com/username/project/backend:latest # - Private Registry: registry.example.com/project-backend:latest services: db: image: pgvector/pgvector:pg17 volumes: - postgres_data:/var/lib/postgresql/data/ # Note: Port not exposed in production for security environment: - POSTGRES_USER=${POSTGRES_USER} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - POSTGRES_DB=${POSTGRES_DB} healthcheck: test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER}"] interval: 5s timeout: 5s retries: 5 networks: - app-network restart: unless-stopped redis: image: redis:7-alpine volumes: - redis_data:/data command: redis-server --appendonly yes healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 5s timeout: 5s retries: 5 networks: - app-network restart: unless-stopped # ========================================================================== # MCP Servers - Model Context Protocol servers for AI agent capabilities # ========================================================================== mcp-llm-gateway: # REPLACE THIS with your actual image from your container registry image: YOUR_REGISTRY/YOUR_PROJECT_MCP_LLM_GATEWAY:latest env_file: - .env environment: - LLM_GATEWAY_HOST=0.0.0.0 - LLM_GATEWAY_PORT=8001 - REDIS_URL=redis://redis:6379/1 - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY} - ENVIRONMENT=production depends_on: redis: condition: service_healthy healthcheck: test: ["CMD", "python", "-c", "import httpx; httpx.get('http://localhost:8001/health').raise_for_status()"] interval: 30s timeout: 10s retries: 3 start_period: 10s networks: - app-network restart: unless-stopped deploy: resources: limits: cpus: '2.0' memory: 2G reservations: cpus: '0.5' memory: 512M mcp-knowledge-base: # REPLACE THIS with your actual image from your container registry image: YOUR_REGISTRY/YOUR_PROJECT_MCP_KNOWLEDGE_BASE:latest env_file: - .env environment: # KB_ prefix required by pydantic-settings config - KB_HOST=0.0.0.0 - KB_PORT=8002 - KB_DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} - KB_REDIS_URL=redis://redis:6379/2 - KB_LLM_GATEWAY_URL=http://mcp-llm-gateway:8001 - OPENAI_API_KEY=${OPENAI_API_KEY} - ENVIRONMENT=production depends_on: db: condition: service_healthy redis: condition: service_healthy healthcheck: test: ["CMD", "python", "-c", "import httpx; httpx.get('http://localhost:8002/health').raise_for_status()"] interval: 30s timeout: 10s retries: 3 start_period: 10s networks: - app-network restart: unless-stopped deploy: resources: limits: cpus: '1.0' memory: 1G reservations: cpus: '0.25' memory: 256M backend: # REPLACE THIS with your actual image from your container registry # Examples: # image: ghcr.io/your-username/your-project-backend:latest # image: your-registry.com/your-project/backend:v1.0.0 # image: username/your-project-backend:latest image: YOUR_REGISTRY/YOUR_PROJECT_BACKEND:latest env_file: - .env environment: - DATABASE_URL=${DATABASE_URL} - SECRET_KEY=${SECRET_KEY} - ENVIRONMENT=production - DEBUG=false - BACKEND_CORS_ORIGINS=${BACKEND_CORS_ORIGINS} - REDIS_URL=redis://redis:6379/0 # MCP Server URLs - LLM_GATEWAY_URL=http://mcp-llm-gateway:8001 - KNOWLEDGE_BASE_URL=http://mcp-knowledge-base:8002 depends_on: db: condition: service_healthy redis: condition: service_healthy mcp-llm-gateway: condition: service_healthy mcp-knowledge-base: condition: service_healthy networks: - app-network restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 30s timeout: 10s retries: 3 start_period: 40s # Uncomment if you need persistent data storage for uploads, etc. # volumes: # - ${HOST_DATA_FILES_DIR:-./data}:${DATA_FILES_DIR:-/app/data} # Celery workers for background task processing (per ADR-003) celery-agent: # REPLACE THIS with your backend image image: YOUR_REGISTRY/YOUR_PROJECT_BACKEND:latest env_file: - .env environment: - DATABASE_URL=${DATABASE_URL} - REDIS_URL=redis://redis:6379/0 - CELERY_QUEUE=agent # MCP Server URLs (agents need access to MCP) - LLM_GATEWAY_URL=http://mcp-llm-gateway:8001 - KNOWLEDGE_BASE_URL=http://mcp-knowledge-base:8002 depends_on: db: condition: service_healthy redis: condition: service_healthy mcp-llm-gateway: condition: service_healthy mcp-knowledge-base: condition: service_healthy networks: - app-network restart: unless-stopped command: ["celery", "-A", "app.celery_app", "worker", "-Q", "agent", "-l", "info", "-c", "4"] deploy: resources: limits: cpus: '2.0' memory: 4G reservations: cpus: '0.5' memory: 512M celery-git: # REPLACE THIS with your backend image image: YOUR_REGISTRY/YOUR_PROJECT_BACKEND:latest env_file: - .env environment: - DATABASE_URL=${DATABASE_URL} - REDIS_URL=redis://redis:6379/0 - CELERY_QUEUE=git depends_on: db: condition: service_healthy redis: condition: service_healthy networks: - app-network restart: unless-stopped command: ["celery", "-A", "app.celery_app", "worker", "-Q", "git", "-l", "info", "-c", "2"] deploy: resources: limits: cpus: '1.0' memory: 2G reservations: cpus: '0.25' memory: 256M celery-sync: # REPLACE THIS with your backend image image: YOUR_REGISTRY/YOUR_PROJECT_BACKEND:latest env_file: - .env environment: - DATABASE_URL=${DATABASE_URL} - REDIS_URL=redis://redis:6379/0 - CELERY_QUEUE=sync depends_on: db: condition: service_healthy redis: condition: service_healthy networks: - app-network restart: unless-stopped command: ["celery", "-A", "app.celery_app", "worker", "-Q", "sync", "-l", "info", "-c", "2"] deploy: resources: limits: cpus: '1.0' memory: 2G reservations: cpus: '0.25' memory: 256M celery-beat: # REPLACE THIS with your backend image image: YOUR_REGISTRY/YOUR_PROJECT_BACKEND:latest env_file: - .env environment: - DATABASE_URL=${DATABASE_URL} - REDIS_URL=redis://redis:6379/0 depends_on: db: condition: service_healthy redis: condition: service_healthy networks: - app-network restart: unless-stopped command: ["celery", "-A", "app.celery_app", "beat", "-l", "info"] deploy: resources: limits: cpus: '0.5' memory: 512M reservations: cpus: '0.1' memory: 128M frontend: # REPLACE THIS with your actual image from your container registry # Examples: # image: ghcr.io/your-username/your-project-frontend:latest # image: your-registry.com/your-project/frontend:v1.0.0 # image: username/your-project-frontend:latest image: YOUR_REGISTRY/YOUR_PROJECT_FRONTEND:latest environment: - NODE_ENV=production - NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL} - NEXT_PUBLIC_API_BASE_URL=http://backend:8000 depends_on: backend: condition: service_healthy networks: - app-network restart: unless-stopped # Optional: Add a reverse proxy like nginx or traefik here # nginx: # image: nginx:alpine # ports: # - "80:80" # - "443:443" # volumes: # - ./nginx.conf:/etc/nginx/nginx.conf:ro # - ./ssl:/etc/nginx/ssl:ro # depends_on: # - frontend # - backend # networks: # - app-network # restart: unless-stopped volumes: postgres_data: redis_data: networks: app-network: driver: bridge