veza/docker-compose.staging.yml

214 lines
6.8 KiB
YAML
Raw Normal View History

2026-01-07 18:39:21 +00:00
version: '3.8'
services:
# --- INFRASTRUCTURE ---
postgres:
image: postgres:16-alpine
container_name: veza_postgres_staging
restart: unless-stopped
environment:
POSTGRES_USER: veza
POSTGRES_PASSWORD: ${STAGING_DB_PASSWORD:?STAGING_DB_PASSWORD must be set}
2026-01-07 18:39:21 +00:00
POSTGRES_DB: veza_staging
volumes:
- postgres_staging_data:/var/lib/postgresql/data
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U veza" ]
interval: 10s
timeout: 5s
retries: 5
redis:
image: redis:7-alpine
container_name: veza_redis_staging
restart: unless-stopped
command: redis-server --save 60 1 --loglevel warning
volumes:
- redis_staging_data:/data
healthcheck:
test: [ "CMD", "redis-cli", "ping" ]
interval: 10s
rabbitmq:
image: rabbitmq:3-management-alpine
container_name: veza_rabbitmq_staging
restart: unless-stopped
environment:
RABBITMQ_DEFAULT_USER: veza
RABBITMQ_DEFAULT_PASS: ${STAGING_RABBITMQ_PASSWORD:?STAGING_RABBITMQ_PASSWORD must be set}
2026-01-07 18:39:21 +00:00
volumes:
- rabbitmq_staging_data:/var/lib/rabbitmq
healthcheck:
test: rabbitmq-diagnostics -q ping
interval: 20s
# --- APPLICATION SERVICES ---
backend:
build:
context: ./veza-backend-api
dockerfile: Dockerfile.production
container_name: veza_backend_staging
restart: unless-stopped
environment:
- APP_ENV=staging
- DB_HOST=postgres
- DB_PORT=5432
- DB_USER=veza
- DB_PASSWORD=${STAGING_DB_PASSWORD:?STAGING_DB_PASSWORD must be set}
2026-01-07 18:39:21 +00:00
- DB_NAME=veza_staging
- DATABASE_URL=postgresql://veza:${STAGING_DB_PASSWORD:?STAGING_DB_PASSWORD must be set}@postgres:5432/veza_staging?sslmode=${STAGING_DB_SSLMODE:-disable}
2026-01-07 18:39:21 +00:00
- REDIS_URL=redis://redis:6379
- RABBITMQ_URL=amqp://veza:${STAGING_RABBITMQ_PASSWORD:?STAGING_RABBITMQ_PASSWORD must be set}@rabbitmq:5672/%2f
2026-01-07 18:39:21 +00:00
- JWT_SECRET=${STAGING_JWT_SECRET}
- ENABLE_CLAMAV=false
- LOG_DIR=/var/log/veza
- LOG_LEVEL=INFO
# Cookie Security Settings (Staging)
- COOKIE_SECURE=true # true en staging (HTTPS requis)
- COOKIE_SAME_SITE=strict # strict pour sécurité maximale
- COOKIE_DOMAIN=${STAGING_COOKIE_DOMAIN:-.staging.veza.app}
- COOKIE_HTTP_ONLY=true
- COOKIE_PATH=/
- CORS_ALLOWED_ORIGINS=${STAGING_CORS_ORIGINS:-https://staging.veza.app,https://staging-api.veza.app}
- AWS_S3_ENDPOINT=http://minio:9000
- AWS_S3_BUCKET=veza-files
- AWS_ACCESS_KEY_ID=${STAGING_S3_ACCESS_KEY:?STAGING_S3_ACCESS_KEY must be set}
- AWS_SECRET_ACCESS_KEY=${STAGING_S3_SECRET_KEY:?STAGING_S3_SECRET_KEY must be set}
- AWS_REGION=us-east-1
# v1.0.10 polish: enable the S3 stack and route track uploads through
# MinIO end-to-end. Without these two flags, defaults (local +
# disabled) win and the AWS_S3_* credentials above are inert.
- AWS_S3_ENABLED=true
- TRACK_STORAGE_BACKEND=s3
feat(infra): add coturn service + wire WEBRTC_TURN_* envs in compose WebRTC 1:1 calls were silently broken behind symmetric NAT (corporate firewalls, mobile CGNAT, Incus default networking) because no TURN relay was deployed. The /api/v1/config/webrtc endpoint and the useWebRTC frontend hook were both wired correctly from v1.0.9 Day 1, but with no TURN box on the network the handler returned STUN-only and the SPA's `nat.hasTurn` flag stayed false. Added : * docker-compose.prod.yml: new `coturn` service using the official coturn/coturn:4.6.2 image, network_mode: host (UDP relay range 49152-65535 doesn't survive Docker NAT), config passed entirely via CLI args so no template render is needed. TLS cert volume points at /etc/letsencrypt/live/turn.veza.fr by default; override with TURN_CERT_DIR for non-LE setups. Healthcheck uses nc -uz to catch crashed/unbound listeners. * Both backend services (blue + green): WEBRTC_STUN_URLS, WEBRTC_TURN_URLS, WEBRTC_TURN_USERNAME, WEBRTC_TURN_CREDENTIAL pulled from env with `:?` strict-fail markers so a misconfigured deploy crashes loudly instead of degrading silently to STUN-only. * docker-compose.staging.yml: same 4 env vars but with safe fallback defaults (Google STUN, no TURN) so staging boots without a coturn box. Operators can flip to relay by setting the envs externally. Operator must set the following secrets at deploy time : WEBRTC_TURN_PUBLIC_IP the host's public IP (used both by coturn --external-ip and by the backend STUN/TURN URLs the SPA receives) WEBRTC_TURN_USERNAME static long-term credential username WEBRTC_TURN_CREDENTIAL static long-term credential password WEBRTC_TURN_REALM optional, defaults to turn.veza.fr Smoke test : turnutils_uclient -u $USER -w $CRED -p 3478 $PUBLIC_IP should return a relay allocation within ~1s. From the SPA, watch chrome://webrtc-internals during a call and confirm the selected candidate pair is `relay` when both peers are on symmetric NAT. The Ansible role under infra/coturn/ is the canonical Incus-native deploy path documented in infra/coturn/README.md; this compose service is the simpler single-host option that unblocks calls today. v1.1 will switch from static to ephemeral REST-shared-secret credentials per ORIGIN_SECURITY_FRAMEWORK.md. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-30 13:44:12 +00:00
# WebRTC ICE — STUN-only by default in staging (no public TURN
# box). Set the WEBRTC_TURN_* envs externally to flip to relay.
- WEBRTC_STUN_URLS=${WEBRTC_STUN_URLS:-stun:stun.l.google.com:19302}
- WEBRTC_TURN_URLS=${WEBRTC_TURN_URLS:-}
- WEBRTC_TURN_USERNAME=${WEBRTC_TURN_USERNAME:-}
- WEBRTC_TURN_CREDENTIAL=${WEBRTC_TURN_CREDENTIAL:-}
- HLS_STREAMING=true
- HLS_STORAGE_DIR=/data/hls
2026-01-07 18:39:21 +00:00
volumes:
- veza_logs_staging:/var/log/veza
- hls_staging_data:/data/hls
2026-01-07 18:39:21 +00:00
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
rabbitmq:
condition: service_healthy
ports:
- "8080:8080"
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/api/v1/health"]
interval: 10s
timeout: 5s
retries: 3
# Chat Server removed in v0.502 -- chat is now handled by backend WebSocket at /api/v1/ws
stream-server:
build:
context: ./veza-stream-server
dockerfile: Dockerfile.production
container_name: veza_stream_staging
restart: unless-stopped
environment:
- DATABASE_URL=postgresql://veza:${STAGING_DB_PASSWORD:?STAGING_DB_PASSWORD must be set}@postgres:5432/veza_staging?sslmode=${STAGING_DB_SSLMODE:-disable}
- REDIS_URL=redis://redis:6379
- JWT_SECRET=${STAGING_JWT_SECRET:?STAGING_JWT_SECRET must be set}
- PORT=3001
- HLS_OUTPUT_DIR=/data/hls
volumes:
- hls_staging_data:/data/hls
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3001/health"]
interval: 10s
timeout: 5s
retries: 3
2026-01-07 18:39:21 +00:00
frontend:
build:
context: ./apps/web
dockerfile: Dockerfile.production
container_name: veza_frontend_staging
restart: unless-stopped
environment:
- VITE_API_URL=/api/v1
- VITE_STREAM_URL=ws://caddy/stream
2026-01-07 18:39:21 +00:00
- VITE_APP_ENV=staging
depends_on:
- backend
- stream-server
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:5173"]
interval: 10s
timeout: 5s
retries: 3
caddy:
image: caddy:2-alpine
container_name: veza_caddy_staging
restart: unless-stopped
2026-01-07 18:39:21 +00:00
ports:
- "80:80"
- "443:443"
volumes:
- ./config/caddy/Caddyfile.staging:/etc/caddy/Caddyfile:ro
- caddy_data:/data
- caddy_config:/config
2026-01-07 18:39:21 +00:00
depends_on:
- backend
- stream-server
- frontend
2026-01-07 18:39:21 +00:00
minio:
image: minio/minio:RELEASE.2025-09-07T16-13-09Z
container_name: veza_minio_staging
restart: unless-stopped
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: ${STAGING_S3_ACCESS_KEY:?STAGING_S3_ACCESS_KEY must be set}
MINIO_ROOT_PASSWORD: ${STAGING_S3_SECRET_KEY:?STAGING_S3_SECRET_KEY must be set}
volumes:
- minio_staging_data:/data
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 10s
timeout: 5s
retries: 5
minio-init:
image: minio/mc:RELEASE.2025-09-07T05-25-40Z
depends_on:
minio:
condition: service_healthy
entrypoint: >
/bin/sh -c "
mc alias set veza http://minio:9000 $${MINIO_ROOT_USER} $${MINIO_ROOT_PASSWORD};
mc mb --ignore-existing veza/veza-files;
exit 0;
"
environment:
MINIO_ROOT_USER: ${STAGING_S3_ACCESS_KEY:?STAGING_S3_ACCESS_KEY must be set}
MINIO_ROOT_PASSWORD: ${STAGING_S3_SECRET_KEY:?STAGING_S3_SECRET_KEY must be set}
2026-01-07 18:39:21 +00:00
volumes:
postgres_staging_data:
redis_staging_data:
rabbitmq_staging_data:
veza_logs_staging:
caddy_data:
caddy_config:
minio_staging_data:
hls_staging_data:
2026-01-07 18:39:21 +00:00