Refine Docker config and env sample

This commit is contained in:
2025-11-26 09:39:28 +01:00
parent f8ab8f761f
commit 1dc225dd77
4 changed files with 316 additions and 280 deletions

View File

@@ -0,0 +1,49 @@
#
# Sample environment for docker compose. Copy to .env and adjust.
#
# Profiles:
# dev - laptop development (hot reload + localhost ports)
# prod - VPS behind Caddy (no public container ports; secrets provided by CI/CD)
#
COMPOSE_PROFILES=dev
DOCKER_RESTART_POLICY=unless-stopped
# PostgreSQL
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
POSTGRES_DB=avaaz
POSTGRES_HOST=postgres
POSTGRES_PORT=5432
DATABASE_URL=postgresql+psycopg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# Backend
ENVIRONMENT=development
SECRET_KEY=dev-secret-change-me
GUNICORN_WORKERS=4
GUNICORN_TIMEOUT=120
OPENAI_API_KEY=
GOOGLE_API_KEY=
# LiveKit
LIVEKIT_API_KEY=devkey
LIVEKIT_API_SECRET=devsecret
LIVEKIT_LOG_LEVEL=info
LIVEKIT_WS_URL=ws://livekit:7880
LIVEKIT_URL=http://livekit:7880
# Frontend
NEXT_PUBLIC_API_URL=http://localhost:8000
NEXT_PUBLIC_LIVEKIT_WS_URL=ws://localhost:7880
# Production overrides (supply via secrets/CI, not committed):
# COMPOSE_PROFILES=prod
# ENVIRONMENT=production
# SECRET_KEY=<strong-random-secret>
# NEXT_PUBLIC_API_URL=https://api.avaaz.ai
# NEXT_PUBLIC_LIVEKIT_WS_URL=wss://rtc.avaaz.ai
# LIVEKIT_WS_URL=ws://livekit:7880
# LIVEKIT_API_KEY=<lk-key>
# LIVEKIT_API_SECRET=<lk-secret>
# OPENAI_API_KEY=<openai-key>
# GOOGLE_API_KEY=<gemini-key>

View File

@@ -1,99 +1,84 @@
# #
# BACKEND DOCKERFILE # BACKEND DOCKERFILE
# #
# This Dockerfile builds the container for the FastAPI backend application. # Multi-stage image for the FastAPI + LiveKit Agent backend using uv.
# It uses a multi-stage build to create optimized images for both development # - production: smallest runtime image with gunicorn/uvicorn worker
# and production environments. # - development: hot-reload friendly image with full toolchain
# # - builder: installs dependencies once for reuse across stages
# Stages:
# - `base`: Installs Python and poetry, the dependency manager.
# - `builder`: Installs application dependencies into a virtual environment.
# - `development`: A debug-friendly image with the full project and an
# auto-reloading server.
# - `production`: A minimal, optimized image for production deployment.
#
# For more details, see: ./docs/architecture.md
# #
# Keep dependency definitions aligned with docs/architecture.md.
# ------------------------------------------------------------------------------ FROM python:3.12-slim AS base
# 1. Base Stage
# - Installs Python and Poetry.
# - Sets up a non-root user for security.
# ------------------------------------------------------------------------------
FROM python:3.11-slim as base
# Set environment variables to prevent Python from writing .pyc files and to ENV PYTHONDONTWRITEBYTECODE=1 \
# ensure output is sent straight to the terminal without buffering. PYTHONUNBUFFERED=1 \
ENV PYTHONDONTWRITEBYTECODE=1 PIP_NO_CACHE_DIR=1 \
ENV PYTHONUNBUFFERED=1 PIP_DISABLE_PIP_VERSION_CHECK=1 \
UV_PROJECT_ENVIRONMENT=/app/.venv \
UV_LINK_MODE=copy
# Install Poetry, a modern dependency management tool for Python. RUN apt-get update \
# We use a specific version to ensure reproducible builds. && apt-get install -y --no-install-recommends \
RUN pip install "poetry==1.8.2" build-essential \
curl \
# Create a non-root user and group to run the application. libpq-dev \
# Running as a non-root user is a security best practice. && rm -rf /var/lib/apt/lists/*
RUN addgroup --system app && adduser --system --group app
# ------------------------------------------------------------------------------
# 2. Builder Stage
# - Copies project files and installs dependencies using Poetry.
# - Dependencies are installed into a virtual environment for isolation.
# ------------------------------------------------------------------------------
FROM base as builder
RUN groupadd --system app && useradd --system --home /app --gid app app
WORKDIR /app WORKDIR /app
# Copy the dependency definition files. # Install uv globally so subsequent stages share the toolchain.
COPY poetry.lock pyproject.toml ./ RUN pip install --upgrade pip uv
# Install dependencies into a virtual environment.
# `--no-root` tells Poetry not to install the project package itself.
# `--only main` installs only production dependencies.
RUN poetry install --no-root --only main
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# 3. Production Stage # Builder: install prod dependencies into an in-project virtualenv
# - Creates a minimal image for production.
# - Copies the virtual environment from the `builder` stage.
# - Copies the application code.
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
FROM base as production FROM base AS builder
WORKDIR /app
# Copy the virtual environment with production dependencies from the builder.
COPY --from=builder /app/.venv /app/.venv
# Copy the application source code.
COPY . . COPY . .
RUN test -f pyproject.toml || (echo "pyproject.toml is required for uv sync"; exit 1)
# Activate the virtual environment. RUN if [ -f uv.lock ]; then \
ENV PATH="/app/.venv/bin:$PATH" uv sync --frozen --no-dev --compile-bytecode; \
else \
# Switch to the non-root user. uv sync --no-dev --compile-bytecode; \
USER app fi
# The default command is specified in the docker-compose.yml file, allowing
# it to be easily overridden (e.g., for running Gunicorn).
EXPOSE 8000
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# 4. Development Stage # Production: minimal runtime image with gunicorn as the entrypoint
# - Sets up the environment for local development.
# - Installs all dependencies, including development tools.
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
FROM base as development FROM python:3.12-slim AS production
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PIP_NO_CACHE_DIR=1
RUN apt-get update \
&& apt-get install -y --no-install-recommends libpq5 \
&& rm -rf /var/lib/apt/lists/*
RUN groupadd --system app && useradd --system --home /app --gid app app
WORKDIR /app WORKDIR /app
# Copy dependency definition files. COPY --from=builder --chown=app:app /app /app
COPY poetry.lock pyproject.toml ./
# Install all dependencies, including development dependencies like pytest.
RUN poetry install --no-root
# Activate the virtual environment.
ENV PATH="/app/.venv/bin:$PATH" ENV PATH="/app/.venv/bin:$PATH"
# The command is specified in docker-compose.yml to run uvicorn with --reload. USER app
EXPOSE 8000
CMD ["gunicorn", "-w", "4", "-k", "uvicorn.workers.UvicornWorker", "app.main:app", "--bind", "0.0.0.0:8000"]
# ------------------------------------------------------------------------------
# Development: includes dev dependencies and keeps uvicorn reload-friendly
# ------------------------------------------------------------------------------
FROM base AS development
COPY . .
RUN test -f pyproject.toml || (echo "pyproject.toml is required for uv sync"; exit 1)
RUN if [ -f uv.lock ]; then \
uv sync --frozen --dev --compile-bytecode; \
else \
uv sync --dev --compile-bytecode; \
fi
ENV PATH="/app/.venv/bin:$PATH"
USER app
EXPOSE 8000 EXPOSE 8000

View File

@@ -1,180 +1,219 @@
# version: "3.9"
# APP DOCKER COMPOSE
# # COMPOSE_PROFILES controls which services start:
# This file defines the application services for avaaz.ai. It is designed # - dev : laptop-friendly; mounts source, exposes localhost ports, enables reloaders
# to work in both development and production environments, controlled by the # - prod : VPS-friendly; no host ports for app containers, joins proxy network for Caddy
# `COMPOSE_PROFILES` environment variable.
# x-service-defaults: &service-defaults
# Profiles: env_file: .env
# - `dev`: For local development. Exposes ports to localhost, mounts local restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
# code for hot-reloading, and uses development-specific commands. networks:
# - `prod`: For production. Does not expose ports directly (relies on the - app
# `proxy` network), uses production-ready commands, and enables logging:
# restarts. driver: "json-file"
# options:
# To run in development: max-size: "10m"
# > COMPOSE_PROFILES=dev docker compose up --build max-file: "3"
#
# To run in production: x-backend-common: &backend-common
# > COMPOSE_PROFILES=prod docker compose up --build -d <<: *service-defaults
# depends_on:
# For more details, see: ./docs/architecture.md postgres:
# condition: service_healthy
services: services:
# -------------------------------------------------------------------------- # --------------------------------------------------------------------------
# Next.js Frontend # Next.js Frontend
# -------------------------------------------------------------------------- # --------------------------------------------------------------------------
frontend: frontend:
# Service name matches the Caddyfile reverse_proxy directive. <<: *service-defaults
profiles: [prod]
container_name: frontend container_name: frontend
build: build:
context: ./frontend context: ./frontend
# The Dockerfile is expected to handle multi-stage builds for both
# development and production.
dockerfile: Dockerfile dockerfile: Dockerfile
# The application is stateless, so no volume is needed for the container target: runner
# itself. A bind mount is used in development for hot-reloading. environment:
NODE_ENV: production
NEXT_TELEMETRY_DISABLED: 1
PORT: 3000
expose:
- "3000"
depends_on:
backend:
condition: service_healthy
healthcheck:
test:
[
"CMD-SHELL",
"node -e \"require('http').get('http://127.0.0.1:3000', (res) => { process.exit(res.statusCode < 500 ? 0 : 1); }).on('error', () => process.exit(1));\"",
]
interval: 30s
timeout: 5s
retries: 5
start_period: 10s
networks:
- app
- proxy
# Caddy from the infra stack reverse-proxies to this container on the proxy network.
frontend-dev:
<<: *service-defaults
profiles: [dev]
container_name: frontend-dev
build:
context: ./frontend
dockerfile: Dockerfile
target: dev
command: ["npm", "run", "dev", "--", "--hostname", "0.0.0.0", "--port", "3000"]
environment:
NODE_ENV: development
NEXT_TELEMETRY_DISABLED: 1
ports:
- "3000:3000" # Localhost access during development
volumes: volumes:
# Mounts the local frontend source code into the container for - ./frontend:/app
# hot-reloading during development. - frontend-node_modules:/app/node_modules
- if: depends_on:
- COMPOSE_PROFILES=dev backend-dev:
type: bind condition: service_healthy
source: ./frontend networks:
target: /app app:
# Environment variables are loaded from the shared .env file. aliases:
env_file: .env - frontend
# Restart policy is only applied in production. In development, we
# typically want the container to stop on errors for debugging.
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
profiles:
- dev
- prod
# -------------------------------------------------------------------------- # --------------------------------------------------------------------------
# FastAPI Backend # FastAPI Backend
# -------------------------------------------------------------------------- # --------------------------------------------------------------------------
backend: backend:
# Service name matches the Caddyfile reverse_proxy directive. <<: *backend-common
profiles: [prod]
container_name: backend container_name: backend
build: build:
context: ./backend context: ./backend
# The Dockerfile should contain stages for both development (with
# debugging tools) and production (a lean, optimized image).
dockerfile: Dockerfile dockerfile: Dockerfile
# The application is stateless. A bind mount is used in development. target: production
volumes: environment:
# Mounts the local backend source code for hot-reloading with uvicorn. ENVIRONMENT: production
- if:
- COMPOSE_PROFILES=dev
type: bind
source: ./backend
target: /app
# Environment variables provide configuration for database connections,
# API keys, and other secrets.
env_file: .env
# Explicitly depend on postgres to ensure it starts first.
depends_on:
postgres:
condition: service_healthy
# Use development-specific command for auto-reloading.
command: command:
- if: [
- COMPOSE_PROFILES=dev "gunicorn",
# Uvicorn with --reload watches for file changes. "app.main:app",
content: uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload "-k",
- else: "uvicorn.workers.UvicornWorker",
# Gunicorn is a battle-tested WSGI server for production. "-w",
content: gunicorn -w 4 -k uvicorn.workers.UvicornWorker app.main:app --bind 0.0.0.0:8000 "${GUNICORN_WORKERS:-4}",
restart: ${DOCKER_RESTART_POLICY:-unless-stopped} "--bind",
"0.0.0.0:8000",
"--access-logfile",
"-",
"--error-logfile",
"-",
"--timeout",
"${GUNICORN_TIMEOUT:-120}",
]
expose:
- "8000"
healthcheck:
test:
[
"CMD-SHELL",
"python -c \"import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health').close()\"",
]
interval: 30s
timeout: 5s
retries: 5
start_period: 10s
networks:
- app
- proxy
backend-dev:
<<: *backend-common
profiles: [dev]
container_name: backend-dev
build:
context: ./backend
dockerfile: Dockerfile
target: development
environment:
ENVIRONMENT: development
PYTHONPATH: /app
command: ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000", "--reload"]
ports:
- "8000:8000" # Localhost access during development
volumes:
- ./backend:/app
- backend-venv:/app/.venv
healthcheck:
test:
[
"CMD-SHELL",
"python -c \"import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health').close()\"",
]
interval: 15s
timeout: 5s
retries: 5
start_period: 10s
networks:
app:
aliases:
- backend
# --------------------------------------------------------------------------
# PostgreSQL + pgvector
# --------------------------------------------------------------------------
postgres:
<<: *service-defaults
container_name: postgres
image: pgvector/pgvector:pg16
profiles: profiles:
- dev - dev
- prod - prod
# --------------------------------------------------------------------------
# PostgreSQL Database
# --------------------------------------------------------------------------
postgres:
# Standard service name for a PostgreSQL database.
container_name: postgres
# Use the latest official Postgres image with pgvector support.
image: pgvector/pgvector:pg16
# A volume is essential to persist database data across container
# restarts and deployments.
volumes: volumes:
- postgres-data:/var/lib/postgresql/data - postgres-data:/var/lib/postgresql/data
env_file: .env ports:
# The healthcheck ensures that other services don't start until the - "127.0.0.1:${POSTGRES_PORT:-5432}:5432" # Local-only binding keeps DB off the public interface
# database is ready to accept connections.
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-postgres}"] test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-postgres}"]
interval: 10s interval: 10s
timeout: 5s timeout: 5s
retries: 5 retries: 5
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
profiles:
- dev
- prod
# -------------------------------------------------------------------------- # --------------------------------------------------------------------------
# LiveKit Real-Time Server # LiveKit Real-Time Server
# -------------------------------------------------------------------------- # --------------------------------------------------------------------------
livekit: livekit:
# Service name matches the Caddyfile reverse_proxy directive. <<: *service-defaults
container_name: livekit container_name: livekit
# Use the latest official LiveKit server image.
image: livekit/livekit-server:latest image: livekit/livekit-server:latest
# The command starts the server with a configuration file. The file is
# generated on startup based on environment variables.
command: --config /etc/livekit.yaml
# In development, ports are exposed for direct connection. In production,
# Caddy handles this.
ports:
# WebRTC signaling (TCP/WS)
- target: 7880
published: 7880
protocol: tcp
mode: host
# WebRTC media (UDP)
- target: 50000-60000
published: 50000-60000
protocol: udp
mode: host
environment:
# The livekit.yaml is generated from environment variables.
# This allows easy configuration without managing a separate file.
LIVEKIT_KEYS: "${LIVEKIT_API_KEY}:${LIVEKIT_API_SECRET}"
LIVEKIT_PORT: 7880
LIVEKIT_LOG_LEVEL: info
LIVEKIT_RTC_UDP_PORT: 7881
LIVEKIT_RTC_TCP_PORT: 7881
LIVEKIT_TURN_ENABLED: "true"
LIVEKIT_TURN_PORT: 3478
env_file: .env
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
profiles: profiles:
- dev - dev
- prod - prod
# UDP/TCP ports remain published in prod so external clients can complete WebRTC/TURN;
# Caddy still proxies signaling over the shared proxy network.
environment:
LIVEKIT_KEYS: "${LIVEKIT_API_KEY}:${LIVEKIT_API_SECRET}"
LIVEKIT_PORT: 7880
LIVEKIT_LOG_LEVEL: ${LIVEKIT_LOG_LEVEL:-info}
ports:
- "7880:7880" # HTTP/WS signaling (Caddy terminates TLS)
- "7881:7881" # TCP fallback for WebRTC
- "3478:3478/udp" # TURN
- "5349:5349/tcp" # TURN over TLS
- "50000-60000:50000-60000/udp" # WebRTC media plane
networks:
- app
- proxy
# ----------------------------------------------------------------------------
# Volumes
# ----------------------------------------------------------------------------
# Defines the named volume for persisting PostgreSQL data.
volumes: volumes:
postgres-data: postgres-data:
driver: local backend-venv:
frontend-node_modules:
# ----------------------------------------------------------------------------
# Networks
# ----------------------------------------------------------------------------
# Defines the networks used by the services.
networks: networks:
default: app:
# The default network for internal communication between app services.
name: app_network name: app_network
proxy: proxy:
# This external network connects services to the Caddy reverse proxy
# defined in `infra/docker-compose.yml`.
name: proxy name: proxy
external: true external: true

View File

@@ -1,108 +1,71 @@
# #
# FRONTEND DOCKERFILE # FRONTEND DOCKERFILE
# #
# This Dockerfile builds the container for the Next.js frontend application. # Multi-stage image for the Next.js SPA/SSR frontend.
# It uses a multi-stage build process to create lean, optimized images for # - runner: production server with minimal footprint
# production while providing a flexible environment for development. # - builder: compiles the Next.js app
# - dev: hot-reload friendly image
# #
# Stages: # COMPOSE_PROFILES decides which stage is used by docker-compose.yml.
# - `base`: Installs Node.js and sets up a non-root user.
# - `deps`: Installs npm dependencies.
# - `builder`: Builds the Next.js application for production.
# - `runner`: A minimal production-ready image that serves the built app.
# - `dev`: A development-ready image with hot-reloading enabled.
#
# For more details, see: ./docs/architecture.md
# #
# ------------------------------------------------------------------------------ FROM node:22-slim AS base
# 1. Base Stage
# - Installs Node.js and sets up a non-root user for security.
# ------------------------------------------------------------------------------
FROM node:20-slim AS base
# Set environment variables for non-interactive installation.
ENV NPM_CONFIG_LOGLEVEL=warn
# Create a non-root user and group for running the application.
# This is a critical security measure to avoid running as root.
RUN addgroup --system --gid 1001 nextjs
RUN adduser --system --uid 1001 nextjs
# ------------------------------------------------------------------------------
# 2. Dependencies Stage
# - Installs npm dependencies. This layer is cached to speed up builds
# when only source code changes.
# ------------------------------------------------------------------------------
FROM base AS deps
WORKDIR /app WORKDIR /app
# Copy the package manager files. ENV NPM_CONFIG_LOGLEVEL=warn \
COPY package.json package-lock.json* ./ NODE_OPTIONS="--enable-source-maps"
# Install dependencies. # ------------------------------------------------------------------------------
# Dependencies cache
# ------------------------------------------------------------------------------
FROM base AS deps
COPY package.json package-lock.json* ./
RUN npm ci RUN npm ci
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# 3. Builder Stage # Production dependencies only (pruned to omit dev tooling)
# - Builds the Next.js application for production. # ------------------------------------------------------------------------------
FROM base AS prod-deps
COPY package.json package-lock.json* ./
RUN npm ci --omit=dev
# ------------------------------------------------------------------------------
# Builder: compile the application for production
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
FROM base AS builder FROM base AS builder
WORKDIR /app
# Copy dependencies from the `deps` stage.
COPY --from=deps /app/node_modules ./node_modules COPY --from=deps /app/node_modules ./node_modules
# Copy the application source code.
COPY . . COPY . .
ENV NODE_ENV=production
# Build the Next.js application. This creates an optimized production build
# in the .next/ directory.
RUN npm run build RUN npm run build
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# 4. Runner Stage (Production) # Production runner: serve the built Next.js app
# - Creates a minimal, secure image for serving the production application.
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
FROM base AS runner FROM node:22-slim AS runner
WORKDIR /app WORKDIR /app
# Set the environment to "production". This tells Next.js to use the ENV NODE_ENV=production \
# optimized build and enables other production-specific behaviors. NEXT_TELEMETRY_DISABLED=1
ENV NODE_ENV=production
# Switch to the non-root user. USER node
USER nextjs COPY --from=prod-deps --chown=node:node /app/node_modules ./node_modules
COPY --from=builder --chown=node:node /app/.next ./.next
COPY --from=builder --chown=node:node /app/public ./public
COPY --from=builder --chown=node:node /app/package.json ./package.json
# Copy the built application from the `builder` stage.
# We copy only the necessary files to keep the image small.
COPY --from=builder --chown=nextjs:nextjs /app/public ./public
COPY --from=builder --chown=nextjs:nextjs /app/.next ./.next
COPY --from=builder --chown=nextjs:nextjs /app/node_modules ./node_modules
COPY --from=builder --chown=nextjs:nextjs /app/package.json ./package.json
# Expose the port the Next.js server will run on.
EXPOSE 3000 EXPOSE 3000
CMD ["npm", "run", "start"]
# The command to start the Next.js server in production mode.
CMD ["npm", "start"]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# 5. Dev Stage (Development) # Development: keeps node_modules and sources mounted for hot reload
# - Creates an image for local development with hot-reloading.
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
FROM base AS dev FROM deps AS dev
WORKDIR /app WORKDIR /app
# Copy dependencies from the `deps` stage. ENV NODE_ENV=development \
COPY --from=deps /app/node_modules ./node_modules NEXT_TELEMETRY_DISABLED=1
# Expose the development port. USER node
EXPOSE 3000 EXPOSE 3000
CMD ["npm", "run", "dev", "--", "--hostname", "0.0.0.0", "--port", "3000"]
# The command to start the Next.js development server.
# This will be overridden by the docker-compose file for bind mounting.
CMD ["npm", "run", "dev"]