Refine Docker config and env sample

This commit is contained in:
2025-11-26 09:39:28 +01:00
parent f8ab8f761f
commit 1dc225dd77
4 changed files with 316 additions and 280 deletions

View File

@@ -1,180 +1,219 @@
#
# APP DOCKER COMPOSE
#
# This file defines the application services for avaaz.ai. It is designed
# to work in both development and production environments, controlled by the
# `COMPOSE_PROFILES` environment variable.
#
# Profiles:
# - `dev`: For local development. Exposes ports to localhost, mounts local
# code for hot-reloading, and uses development-specific commands.
# - `prod`: For production. Does not expose ports directly (relies on the
# `proxy` network), uses production-ready commands, and enables
# restarts.
#
# To run in development:
# > COMPOSE_PROFILES=dev docker compose up --build
#
# To run in production:
# > COMPOSE_PROFILES=prod docker compose up --build -d
#
# For more details, see: ./docs/architecture.md
#
version: "3.9"
# COMPOSE_PROFILES controls which services start:
# - dev : laptop-friendly; mounts source, exposes localhost ports, enables reloaders
# - prod : VPS-friendly; no host ports for app containers, joins proxy network for Caddy
x-service-defaults: &service-defaults
env_file: .env
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
networks:
- app
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
x-backend-common: &backend-common
<<: *service-defaults
depends_on:
postgres:
condition: service_healthy
services:
# --------------------------------------------------------------------------
# Next.js Frontend
# --------------------------------------------------------------------------
frontend:
# Service name matches the Caddyfile reverse_proxy directive.
<<: *service-defaults
profiles: [prod]
container_name: frontend
build:
context: ./frontend
# The Dockerfile is expected to handle multi-stage builds for both
# development and production.
dockerfile: Dockerfile
# The application is stateless, so no volume is needed for the container
# itself. A bind mount is used in development for hot-reloading.
target: runner
environment:
NODE_ENV: production
NEXT_TELEMETRY_DISABLED: 1
PORT: 3000
expose:
- "3000"
depends_on:
backend:
condition: service_healthy
healthcheck:
test:
[
"CMD-SHELL",
"node -e \"require('http').get('http://127.0.0.1:3000', (res) => { process.exit(res.statusCode < 500 ? 0 : 1); }).on('error', () => process.exit(1));\"",
]
interval: 30s
timeout: 5s
retries: 5
start_period: 10s
networks:
- app
- proxy
# Caddy from the infra stack reverse-proxies to this container on the proxy network.
frontend-dev:
<<: *service-defaults
profiles: [dev]
container_name: frontend-dev
build:
context: ./frontend
dockerfile: Dockerfile
target: dev
command: ["npm", "run", "dev", "--", "--hostname", "0.0.0.0", "--port", "3000"]
environment:
NODE_ENV: development
NEXT_TELEMETRY_DISABLED: 1
ports:
- "3000:3000" # Localhost access during development
volumes:
# Mounts the local frontend source code into the container for
# hot-reloading during development.
- if:
- COMPOSE_PROFILES=dev
type: bind
source: ./frontend
target: /app
# Environment variables are loaded from the shared .env file.
env_file: .env
# Restart policy is only applied in production. In development, we
# typically want the container to stop on errors for debugging.
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
profiles:
- dev
- prod
- ./frontend:/app
- frontend-node_modules:/app/node_modules
depends_on:
backend-dev:
condition: service_healthy
networks:
app:
aliases:
- frontend
# --------------------------------------------------------------------------
# FastAPI Backend
# --------------------------------------------------------------------------
backend:
# Service name matches the Caddyfile reverse_proxy directive.
<<: *backend-common
profiles: [prod]
container_name: backend
build:
context: ./backend
# The Dockerfile should contain stages for both development (with
# debugging tools) and production (a lean, optimized image).
dockerfile: Dockerfile
# The application is stateless. A bind mount is used in development.
volumes:
# Mounts the local backend source code for hot-reloading with uvicorn.
- if:
- COMPOSE_PROFILES=dev
type: bind
source: ./backend
target: /app
# Environment variables provide configuration for database connections,
# API keys, and other secrets.
env_file: .env
# Explicitly depend on postgres to ensure it starts first.
depends_on:
postgres:
condition: service_healthy
# Use development-specific command for auto-reloading.
target: production
environment:
ENVIRONMENT: production
command:
- if:
- COMPOSE_PROFILES=dev
# Uvicorn with --reload watches for file changes.
content: uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
- else:
# Gunicorn is a battle-tested WSGI server for production.
content: gunicorn -w 4 -k uvicorn.workers.UvicornWorker app.main:app --bind 0.0.0.0:8000
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
[
"gunicorn",
"app.main:app",
"-k",
"uvicorn.workers.UvicornWorker",
"-w",
"${GUNICORN_WORKERS:-4}",
"--bind",
"0.0.0.0:8000",
"--access-logfile",
"-",
"--error-logfile",
"-",
"--timeout",
"${GUNICORN_TIMEOUT:-120}",
]
expose:
- "8000"
healthcheck:
test:
[
"CMD-SHELL",
"python -c \"import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health').close()\"",
]
interval: 30s
timeout: 5s
retries: 5
start_period: 10s
networks:
- app
- proxy
backend-dev:
<<: *backend-common
profiles: [dev]
container_name: backend-dev
build:
context: ./backend
dockerfile: Dockerfile
target: development
environment:
ENVIRONMENT: development
PYTHONPATH: /app
command: ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000", "--reload"]
ports:
- "8000:8000" # Localhost access during development
volumes:
- ./backend:/app
- backend-venv:/app/.venv
healthcheck:
test:
[
"CMD-SHELL",
"python -c \"import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health').close()\"",
]
interval: 15s
timeout: 5s
retries: 5
start_period: 10s
networks:
app:
aliases:
- backend
# --------------------------------------------------------------------------
# PostgreSQL + pgvector
# --------------------------------------------------------------------------
postgres:
<<: *service-defaults
container_name: postgres
image: pgvector/pgvector:pg16
profiles:
- dev
- prod
# --------------------------------------------------------------------------
# PostgreSQL Database
# --------------------------------------------------------------------------
postgres:
# Standard service name for a PostgreSQL database.
container_name: postgres
# Use the latest official Postgres image with pgvector support.
image: pgvector/pgvector:pg16
# A volume is essential to persist database data across container
# restarts and deployments.
volumes:
- postgres-data:/var/lib/postgresql/data
env_file: .env
# The healthcheck ensures that other services don't start until the
# database is ready to accept connections.
ports:
- "127.0.0.1:${POSTGRES_PORT:-5432}:5432" # Local-only binding keeps DB off the public interface
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-postgres}"]
interval: 10s
timeout: 5s
retries: 5
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
profiles:
- dev
- prod
# --------------------------------------------------------------------------
# LiveKit Real-Time Server
# --------------------------------------------------------------------------
livekit:
# Service name matches the Caddyfile reverse_proxy directive.
<<: *service-defaults
container_name: livekit
# Use the latest official LiveKit server image.
image: livekit/livekit-server:latest
# The command starts the server with a configuration file. The file is
# generated on startup based on environment variables.
command: --config /etc/livekit.yaml
# In development, ports are exposed for direct connection. In production,
# Caddy handles this.
ports:
# WebRTC signaling (TCP/WS)
- target: 7880
published: 7880
protocol: tcp
mode: host
# WebRTC media (UDP)
- target: 50000-60000
published: 50000-60000
protocol: udp
mode: host
environment:
# The livekit.yaml is generated from environment variables.
# This allows easy configuration without managing a separate file.
LIVEKIT_KEYS: "${LIVEKIT_API_KEY}:${LIVEKIT_API_SECRET}"
LIVEKIT_PORT: 7880
LIVEKIT_LOG_LEVEL: info
LIVEKIT_RTC_UDP_PORT: 7881
LIVEKIT_RTC_TCP_PORT: 7881
LIVEKIT_TURN_ENABLED: "true"
LIVEKIT_TURN_PORT: 3478
env_file: .env
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
profiles:
- dev
- prod
# UDP/TCP ports remain published in prod so external clients can complete WebRTC/TURN;
# Caddy still proxies signaling over the shared proxy network.
environment:
LIVEKIT_KEYS: "${LIVEKIT_API_KEY}:${LIVEKIT_API_SECRET}"
LIVEKIT_PORT: 7880
LIVEKIT_LOG_LEVEL: ${LIVEKIT_LOG_LEVEL:-info}
ports:
- "7880:7880" # HTTP/WS signaling (Caddy terminates TLS)
- "7881:7881" # TCP fallback for WebRTC
- "3478:3478/udp" # TURN
- "5349:5349/tcp" # TURN over TLS
- "50000-60000:50000-60000/udp" # WebRTC media plane
networks:
- app
- proxy
# ----------------------------------------------------------------------------
# Volumes
# ----------------------------------------------------------------------------
# Defines the named volume for persisting PostgreSQL data.
volumes:
postgres-data:
driver: local
backend-venv:
frontend-node_modules:
# ----------------------------------------------------------------------------
# Networks
# ----------------------------------------------------------------------------
# Defines the networks used by the services.
networks:
default:
# The default network for internal communication between app services.
app:
name: app_network
proxy:
# This external network connects services to the Caddy reverse proxy
# defined in `infra/docker-compose.yml`.
name: proxy
external: true