version: "3.9" # COMPOSE_PROFILES controls which services start: # - dev : laptop-friendly; mounts source, exposes localhost ports, enables reloaders # - prod : VPS-friendly; no host ports for app containers, joins proxy network for Caddy x-service-defaults: &service-defaults env_file: .env restart: ${DOCKER_RESTART_POLICY:-unless-stopped} networks: - app logging: driver: "json-file" options: max-size: "10m" max-file: "3" x-backend-common: &backend-common <<: *service-defaults depends_on: postgres: condition: service_healthy services: # -------------------------------------------------------------------------- # Next.js Frontend # -------------------------------------------------------------------------- frontend: <<: *service-defaults profiles: [prod] container_name: frontend build: context: ./frontend dockerfile: Dockerfile target: runner environment: NODE_ENV: production NEXT_TELEMETRY_DISABLED: 1 PORT: 3000 expose: - "3000" depends_on: backend: condition: service_healthy healthcheck: test: [ "CMD-SHELL", "node -e \"require('http').get('http://127.0.0.1:3000', (res) => { process.exit(res.statusCode < 500 ? 0 : 1); }).on('error', () => process.exit(1));\"", ] interval: 30s timeout: 5s retries: 5 start_period: 10s networks: - app - proxy # Caddy from the infra stack reverse-proxies to this container on the proxy network. frontend-dev: <<: *service-defaults profiles: [dev] container_name: frontend-dev build: context: ./frontend dockerfile: Dockerfile target: dev command: ["npm", "run", "dev", "--", "--hostname", "0.0.0.0", "--port", "3000"] environment: NODE_ENV: development NEXT_TELEMETRY_DISABLED: 1 ports: - "3000:3000" # Localhost access during development volumes: - ./frontend:/app - frontend-node_modules:/app/node_modules depends_on: backend-dev: condition: service_healthy networks: app: aliases: - frontend # -------------------------------------------------------------------------- # FastAPI Backend # -------------------------------------------------------------------------- backend: <<: *backend-common profiles: [prod] container_name: backend build: context: ./backend dockerfile: Dockerfile target: production environment: ENVIRONMENT: production command: [ "gunicorn", "app.main:app", "-k", "uvicorn.workers.UvicornWorker", "-w", "${GUNICORN_WORKERS:-4}", "--bind", "0.0.0.0:8000", "--access-logfile", "-", "--error-logfile", "-", "--timeout", "${GUNICORN_TIMEOUT:-120}", ] expose: - "8000" healthcheck: test: [ "CMD-SHELL", "python -c \"import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health').close()\"", ] interval: 30s timeout: 5s retries: 5 start_period: 10s networks: - app - proxy backend-dev: <<: *backend-common profiles: [dev] container_name: backend-dev build: context: ./backend dockerfile: Dockerfile target: development environment: ENVIRONMENT: development PYTHONPATH: /app command: ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000", "--reload"] ports: - "8000:8000" # Localhost access during development volumes: - ./backend:/app - backend-venv:/app/.venv healthcheck: test: [ "CMD-SHELL", "python -c \"import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health').close()\"", ] interval: 15s timeout: 5s retries: 5 start_period: 10s networks: app: aliases: - backend # -------------------------------------------------------------------------- # PostgreSQL + pgvector # -------------------------------------------------------------------------- postgres: <<: *service-defaults container_name: postgres image: pgvector/pgvector:pg16 profiles: - dev - prod volumes: - postgres-data:/var/lib/postgresql/data ports: - "127.0.0.1:${POSTGRES_PORT:-5432}:5432" # Local-only binding keeps DB off the public interface healthcheck: test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-postgres}"] interval: 10s timeout: 5s retries: 5 # -------------------------------------------------------------------------- # LiveKit Real-Time Server # -------------------------------------------------------------------------- livekit: <<: *service-defaults container_name: livekit image: livekit/livekit-server:latest profiles: - dev - prod # UDP/TCP ports remain published in prod so external clients can complete WebRTC/TURN; # Caddy still proxies signaling over the shared proxy network. environment: LIVEKIT_KEYS: "${LIVEKIT_API_KEY}:${LIVEKIT_API_SECRET}" LIVEKIT_PORT: 7880 LIVEKIT_LOG_LEVEL: ${LIVEKIT_LOG_LEVEL:-info} ports: - "7880:7880" # HTTP/WS signaling (Caddy terminates TLS) - "7881:7881" # TCP fallback for WebRTC - "3478:3478/udp" # TURN - "5349:5349/tcp" # TURN over TLS - "50000-60000:50000-60000/udp" # WebRTC media plane networks: - app - proxy volumes: postgres-data: backend-venv: frontend-node_modules: networks: app: name: app_network proxy: name: proxy external: true