Add app scaffold and workflows
All checks were successful
Continuous Integration / Validate and test changes (push) Successful in 3s

This commit is contained in:
2025-12-03 08:58:34 +01:00
parent 5a8b773e40
commit d6b61ae8fb
51 changed files with 10252 additions and 3 deletions

231
app/docker-compose.yml Normal file
View File

@@ -0,0 +1,231 @@
# COMPOSE_PROFILES controls which services start:
# - dev : laptop-friendly; mounts source, exposes localhost ports, enables reloaders
# - prod : VPS-friendly; no host ports for app containers, joins proxy network for Caddy
x-service-defaults: &service-defaults
env_file: .env
restart: ${DOCKER_RESTART_POLICY:-unless-stopped}
networks:
- app
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
x-backend-common: &backend-common
<<: *service-defaults
depends_on:
postgres:
condition: service_healthy
services:
# --------------------------------------------------------------------------
# Next.js Frontend
# --------------------------------------------------------------------------
frontend:
<<: *service-defaults
profiles: [prod]
container_name: frontend
build:
context: ./frontend
dockerfile: Dockerfile
target: runner
environment:
NODE_ENV: production
NEXT_TELEMETRY_DISABLED: 1
PORT: 3000
expose:
- "3000"
depends_on:
backend:
condition: service_healthy
healthcheck:
test:
[
"CMD-SHELL",
"node -e \"require('http').get('http://127.0.0.1:3000', (res) => { process.exit(res.statusCode < 500 ? 0 : 1); }).on('error', () => process.exit(1));\"",
]
interval: 30s
timeout: 5s
retries: 5
start_period: 10s
networks:
- app
- proxy
# Caddy from the infra stack reverse-proxies to this container on the proxy network.
frontend-dev:
<<: *service-defaults
profiles: [dev]
container_name: frontend-dev
build:
context: ./frontend
dockerfile: Dockerfile
target: dev
command: ["npm", "run", "dev", "--", "--hostname", "0.0.0.0", "--port", "3000"]
environment:
NODE_ENV: development
NEXT_TELEMETRY_DISABLED: 1
ports:
- "3000:3000" # Localhost access during development
volumes:
- ./frontend:/app
- frontend-node_modules:/app/node_modules
depends_on:
backend-dev:
condition: service_healthy
networks:
app:
aliases:
- frontend
# --------------------------------------------------------------------------
# FastAPI Backend
# --------------------------------------------------------------------------
backend:
<<: *backend-common
profiles: [prod]
container_name: backend
build:
context: ./backend
dockerfile: Dockerfile
target: production
environment:
ENVIRONMENT: production
command:
[
"gunicorn",
"main:app",
"-k",
"uvicorn.workers.UvicornWorker",
"-w",
"${GUNICORN_WORKERS:-4}",
"--bind",
"0.0.0.0:8000",
"--access-logfile",
"-",
"--error-logfile",
"-",
"--timeout",
"${GUNICORN_TIMEOUT:-120}",
]
expose:
- "8000"
healthcheck:
test:
[
"CMD-SHELL",
"python -c \"import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health/live').close()\"",
]
interval: 30s
timeout: 5s
retries: 5
start_period: 10s
networks:
- app
- proxy
backend-dev:
<<: *backend-common
profiles: [dev]
container_name: backend-dev
build:
context: ./backend
dockerfile: Dockerfile
target: development
environment:
ENVIRONMENT: development
PYTHONPATH: /app
command: ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--reload"]
ports:
- "8000:8000" # Localhost access during development
volumes:
- ./backend:/app
- backend-venv:/app/.venv
healthcheck:
test:
[
"CMD-SHELL",
"python -c \"import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health/live').close()\"",
]
interval: 15s
timeout: 5s
retries: 5
start_period: 10s
networks:
app:
aliases:
- backend
# --------------------------------------------------------------------------
# PostgreSQL + pgvector
# --------------------------------------------------------------------------
postgres:
<<: *service-defaults
container_name: postgres
image: pgvector/pgvector:pg16
profiles:
- dev
- prod
volumes:
- postgres-data:/var/lib/postgresql/data
ports:
- "127.0.0.1:${POSTGRES_PORT:-5432}:5432" # Local-only binding keeps DB off the public interface
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-postgres}"]
interval: 10s
timeout: 5s
retries: 5
# --------------------------------------------------------------------------
# LiveKit Real-Time Server
# --------------------------------------------------------------------------
livekit:
<<: *service-defaults
container_name: livekit
image: livekit/livekit-server:latest
profiles:
- dev
- prod
# UDP/TCP ports remain published in prod so external clients can complete WebRTC/TURN;
# Caddy still proxies signaling over the shared proxy network.
environment:
LIVEKIT_KEYS: "${LIVEKIT_API_KEY}:${LIVEKIT_API_SECRET}"
LIVEKIT_PORT: 7880
LIVEKIT_LOG_LEVEL: ${LIVEKIT_LOG_LEVEL:-info}
command:
[
"livekit-server",
"--dev",
"--port",
"7880",
"--rtc.port-range-start",
"${LIVEKIT_RTC_PORT_RANGE_START:-60000}",
"--rtc.port-range-end",
"${LIVEKIT_RTC_PORT_RANGE_END:-60100}",
]
ports:
- "7880:7880" # HTTP/WS signaling (Caddy terminates TLS)
- "7881:7881" # TCP fallback for WebRTC
- "3478:3478/udp" # TURN
- "5349:5349/tcp" # TURN over TLS
- "${LIVEKIT_RTC_PORT_RANGE_START:-60000}-${LIVEKIT_RTC_PORT_RANGE_END:-60100}:${LIVEKIT_RTC_PORT_RANGE_START:-60000}-${LIVEKIT_RTC_PORT_RANGE_END:-60100}/udp" # WebRTC media plane
networks:
- app
- proxy
volumes:
postgres-data:
backend-venv:
frontend-node_modules:
networks:
app:
name: app_network
proxy:
name: proxy
# In prod, set PROXY_NETWORK_EXTERNAL=true so this attaches to the shared
# Caddy network created by infra. In dev, leave false to let Compose create
# a local network automatically.
external: ${PROXY_NETWORK_EXTERNAL:-false}