Files
playground/app/docker-compose.yml

181 lines
6.5 KiB
YAML

#
# APP DOCKER COMPOSE
#
# This file defines the application services for avaaz.ai. It is designed
# to work in both development and production environments, controlled by the
# `COMPOSE_PROFILES` environment variable.
#
# Profiles:
# - `dev`: For local development. Exposes ports to localhost, mounts local
# code for hot-reloading, and uses development-specific commands.
# - `prod`: For production. Does not expose ports directly (relies on the
# `proxy` network), uses production-ready commands, and enables
# restarts.
#
# To run in development:
# > COMPOSE_PROFILES=dev docker compose up --build
#
# To run in production:
# > COMPOSE_PROFILES=prod docker compose up --build -d
#
# For more details, see: ./docs/architecture.md
#
services:
# --------------------------------------------------------------------------
# Next.js Frontend
# --------------------------------------------------------------------------
frontend:
# Service name matches the Caddyfile reverse_proxy directive.
container_name: frontend
build:
context: ./frontend
# The Dockerfile is expected to handle multi-stage builds for both
# development and production.
dockerfile: Dockerfile
# The application is stateless, so no volume is needed for the container
# itself. A bind mount is used in development for hot-reloading.
volumes:
# Mounts the local frontend source code into the container for
# hot-reloading during development.
- if:
- COMPOSE_PROFILES=dev
type: bind
source: ./frontend
target: /app
# Environment variables are loaded from the shared .env file.
env_file: .env
# Restart policy is only applied in production. In development, we
# typically want the container to stop on errors for debugging.
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
profiles:
- dev
- prod
# --------------------------------------------------------------------------
# FastAPI Backend
# --------------------------------------------------------------------------
backend:
# Service name matches the Caddyfile reverse_proxy directive.
container_name: backend
build:
context: ./backend
# The Dockerfile should contain stages for both development (with
# debugging tools) and production (a lean, optimized image).
dockerfile: Dockerfile
# The application is stateless. A bind mount is used in development.
volumes:
# Mounts the local backend source code for hot-reloading with uvicorn.
- if:
- COMPOSE_PROFILES=dev
type: bind
source: ./backend
target: /app
# Environment variables provide configuration for database connections,
# API keys, and other secrets.
env_file: .env
# Explicitly depend on postgres to ensure it starts first.
depends_on:
postgres:
condition: service_healthy
# Use development-specific command for auto-reloading.
command:
- if:
- COMPOSE_PROFILES=dev
# Uvicorn with --reload watches for file changes.
content: uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
- else:
# Gunicorn is a battle-tested WSGI server for production.
content: gunicorn -w 4 -k uvicorn.workers.UvicornWorker app.main:app --bind 0.0.0.0:8000
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
profiles:
- dev
- prod
# --------------------------------------------------------------------------
# PostgreSQL Database
# --------------------------------------------------------------------------
postgres:
# Standard service name for a PostgreSQL database.
container_name: postgres
# Use the latest official Postgres image with pgvector support.
image: pgvector/pgvector:pg16
# A volume is essential to persist database data across container
# restarts and deployments.
volumes:
- postgres-data:/var/lib/postgresql/data
env_file: .env
# The healthcheck ensures that other services don't start until the
# database is ready to accept connections.
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-postgres}"]
interval: 10s
timeout: 5s
retries: 5
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
profiles:
- dev
- prod
# --------------------------------------------------------------------------
# LiveKit Real-Time Server
# --------------------------------------------------------------------------
livekit:
# Service name matches the Caddyfile reverse_proxy directive.
container_name: livekit
# Use the latest official LiveKit server image.
image: livekit/livekit-server:latest
# The command starts the server with a configuration file. The file is
# generated on startup based on environment variables.
command: --config /etc/livekit.yaml
# In development, ports are exposed for direct connection. In production,
# Caddy handles this.
ports:
# WebRTC signaling (TCP/WS)
- target: 7880
published: 7880
protocol: tcp
mode: host
# WebRTC media (UDP)
- target: 50000-60000
published: 50000-60000
protocol: udp
mode: host
environment:
# The livekit.yaml is generated from environment variables.
# This allows easy configuration without managing a separate file.
LIVEKIT_KEYS: "${LIVEKIT_API_KEY}:${LIVEKIT_API_SECRET}"
LIVEKIT_PORT: 7880
LIVEKIT_LOG_LEVEL: info
LIVEKIT_RTC_UDP_PORT: 7881
LIVEKIT_RTC_TCP_PORT: 7881
LIVEKIT_TURN_ENABLED: "true"
LIVEKIT_TURN_PORT: 3478
env_file: .env
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
profiles:
- dev
- prod
# ----------------------------------------------------------------------------
# Volumes
# ----------------------------------------------------------------------------
# Defines the named volume for persisting PostgreSQL data.
volumes:
postgres-data:
driver: local
# ----------------------------------------------------------------------------
# Networks
# ----------------------------------------------------------------------------
# Defines the networks used by the services.
networks:
default:
# The default network for internal communication between app services.
name: app_network
proxy:
# This external network connects services to the Caddy reverse proxy
# defined in `infra/docker-compose.yml`.
name: proxy
external: true