2026-01-11 15:30:43 +00:00
version : '3.8'
2026-01-03 21:59:48 +00:00
2026-01-11 15:30:43 +00:00
services :
# ============================================================================
# INFRASTRUCTURE SERVICES
# ============================================================================
2026-01-03 21:59:48 +00:00
postgres :
image : postgres:16-alpine
2026-01-11 15:30:43 +00:00
container_name : veza_postgres
2026-01-03 21:59:48 +00:00
restart : unless-stopped
environment :
2026-01-11 15:30:43 +00:00
POSTGRES_USER : ${DB_USER:-veza}
2026-02-11 21:43:09 +00:00
POSTGRES_PASSWORD : ${DB_PASS:?DB_PASS must be set for production}
2026-01-11 15:30:43 +00:00
POSTGRES_DB : ${DB_NAME:-veza}
2026-01-03 21:59:48 +00:00
volumes :
2026-01-11 15:30:43 +00:00
- postgres_data:/var/lib/postgresql/data
2026-01-03 21:59:48 +00:00
healthcheck :
2026-01-11 15:30:43 +00:00
test : [ "CMD-SHELL" , "pg_isready -U ${DB_USER:-veza}" ]
interval : 5s
2026-01-03 21:59:48 +00:00
timeout : 5s
retries : 5
2026-01-11 15:30:43 +00:00
networks :
- veza-network
deploy :
resources :
limits :
cpus : '0.50'
memory : 256M
2026-01-03 21:59:48 +00:00
redis :
image : redis:7-alpine
2026-01-11 15:30:43 +00:00
container_name : veza_redis
2026-01-03 21:59:48 +00:00
restart : unless-stopped
2026-02-22 16:24:12 +00:00
command : [ "redis-server" , "--requirepass" , "${REDIS_PASSWORD:?REDIS_PASSWORD must be set for production}" , "--appendonly" , "yes" ]
2026-01-03 21:59:48 +00:00
volumes :
2026-01-11 15:30:43 +00:00
- redis_data:/data
2026-01-03 21:59:48 +00:00
healthcheck :
2026-02-22 16:24:12 +00:00
test : [ "CMD" , "redis-cli" , "-a" , "${REDIS_PASSWORD}" , "ping" ]
2026-01-11 15:30:43 +00:00
interval : 5s
timeout : 3s
retries : 5
networks :
- veza-network
deploy :
resources :
limits :
cpus : '0.25'
memory : 64M
2026-01-03 21:59:48 +00:00
2026-03-12 05:13:38 +00:00
# SECURITY(MEDIUM-008): Use rabbitmq:3-alpine (no management UI) in production.
# Management UI exposes internal metrics/config and is unnecessary in prod.
2026-01-03 21:59:48 +00:00
rabbitmq :
2026-03-12 05:13:38 +00:00
image : rabbitmq:3-alpine
2026-01-11 15:30:43 +00:00
container_name : veza_rabbitmq
2026-01-03 21:59:48 +00:00
restart : unless-stopped
environment :
2026-01-11 15:30:43 +00:00
RABBITMQ_DEFAULT_USER : ${DB_USER:-veza}
2026-02-11 21:43:09 +00:00
RABBITMQ_DEFAULT_PASS : ${RABBITMQ_PASS:?RABBITMQ_PASS must be set for production}
2026-01-03 21:59:48 +00:00
volumes :
2026-01-11 15:30:43 +00:00
- rabbitmq_data:/var/lib/rabbitmq
2026-01-03 21:59:48 +00:00
healthcheck :
test : rabbitmq-diagnostics -q ping
2026-01-11 15:30:43 +00:00
interval : 10s
timeout : 10s
retries : 5
networks :
- veza-network
2026-01-03 21:59:48 +00:00
deploy :
resources :
limits :
2026-01-11 15:30:43 +00:00
cpus : '0.50'
memory : 256M
2026-01-03 21:59:48 +00:00
2026-03-12 05:13:38 +00:00
# SECURITY(MEDIUM-003): Pin ClamAV image to specific version instead of :latest
2026-02-18 11:03:14 +00:00
clamav :
2026-03-12 05:13:38 +00:00
image : clamav/clamav:1.4
2026-02-18 11:03:14 +00:00
container_name : veza_clamav
restart : unless-stopped
networks :
- veza-network
healthcheck :
test : [ "CMD" , "clamdscan" , "--ping" , "1" ]
interval : 30s
timeout : 10s
retries : 5
start_period : 180s
deploy :
resources :
limits :
cpus : '0.5'
memory : 1G
2026-02-14 20:45:15 +00:00
# ============================================================================
# PAYMENT ROUTER (Hyperswitch)
# ============================================================================
hyperswitch_postgres :
image : postgres:16-alpine
container_name : veza_hyperswitch_postgres
restart : unless-stopped
environment :
POSTGRES_USER : ${HYPERSWITCH_DB_USER:-hyperswitch}
POSTGRES_PASSWORD : ${HYPERSWITCH_DB_PASS:?HYPERSWITCH_DB_PASS must be set for production}
POSTGRES_DB : ${HYPERSWITCH_DB_NAME:-hyperswitch}
volumes :
- hyperswitch_postgres_data:/var/lib/postgresql/data
healthcheck :
test : [ "CMD-SHELL" , "pg_isready -U ${HYPERSWITCH_DB_USER:-hyperswitch}" ]
interval : 5s
timeout : 5s
retries : 5
networks :
- veza-network
deploy :
resources :
limits :
cpus : "0.25"
memory : 128M
2026-03-12 05:23:56 +00:00
# SECURITY(LOW-002): Pin to specific Hyperswitch version. Check https://github.com/juspay/hyperswitch/releases for updates.
2026-02-14 20:45:15 +00:00
hyperswitch :
2026-03-12 05:23:56 +00:00
image : juspaydotin/hyperswitch-router:2026.03.11.0-standalone
2026-02-14 20:45:15 +00:00
container_name : veza_hyperswitch
restart : unless-stopped
environment :
DATABASE_URL : postgresql://${HYPERSWITCH_DB_USER:-hyperswitch}:${HYPERSWITCH_DB_PASS:?HYPERSWITCH_DB_PASS must be set}@hyperswitch_postgres:5432/${HYPERSWITCH_DB_NAME:-hyperswitch}?sslmode=require
2026-02-22 16:24:12 +00:00
REDIS_URL : redis://:${REDIS_PASSWORD}@redis:6379
2026-02-14 20:45:15 +00:00
depends_on :
hyperswitch_postgres :
condition : service_healthy
redis :
condition : service_healthy
networks :
- veza-network
healthcheck :
test : [ "CMD" , "wget" , "--quiet" , "--tries=1" , "--spider" , "http://localhost:8080/health" ]
interval : 10s
timeout : 5s
retries : 3
deploy :
resources :
limits :
cpus : "0.5"
memory : 256M
2026-01-11 15:30:43 +00:00
# ============================================================================
2026-02-23 18:52:19 +00:00
# APPLICATION SERVICES - Blue-Green Deployment
# STACK_COLOR=blue|green. Use scripts/deploy-blue-green.sh to switch.
2026-01-11 15:30:43 +00:00
# ============================================================================
2026-02-23 18:52:19 +00:00
backend-api-blue :
2026-01-03 21:59:48 +00:00
build :
context : ./veza-backend-api
dockerfile : Dockerfile.production
2026-01-11 15:30:43 +00:00
image : veza-backend-api:latest
2026-02-23 18:52:19 +00:00
container_name : veza_backend_api_blue
2026-01-03 21:59:48 +00:00
restart : unless-stopped
environment :
- APP_ENV=production
2026-02-23 18:52:19 +00:00
- STACK_COLOR=blue
2026-02-11 21:43:09 +00:00
- DATABASE_URL=postgres://${DB_USER:-veza}:${DB_PASS:?DB_PASS must be set}@postgres:5432/${DB_NAME:-veza}?sslmode=require
2026-02-22 16:24:12 +00:00
- REDIS_URL=redis://:${REDIS_PASSWORD:?REDIS_PASSWORD must be set}@redis:6379
2026-02-11 21:43:09 +00:00
- AMQP_URL=amqp://${DB_USER:-veza}:${RABBITMQ_PASS:?RABBITMQ_PASS must be set}@rabbitmq:5672
2026-03-12 04:40:53 +00:00
# SECURITY(HIGH-002): Use RS256 asymmetric keys in production instead of HS256 shared secret.
# Generate: openssl genrsa -out jwt_private.pem 2048 && openssl rsa -in jwt_private.pem -pubout -out jwt_public.pem
- JWT_PRIVATE_KEY_PATH=${JWT_PRIVATE_KEY_PATH:-/secrets/jwt_private.pem}
- JWT_PUBLIC_KEY_PATH=${JWT_PUBLIC_KEY_PATH:-/secrets/jwt_public.pem}
2026-01-11 15:30:43 +00:00
- COOKIE_SECURE=true
- COOKIE_SAME_SITE=strict
- COOKIE_HTTP_ONLY=true
2026-02-11 21:18:57 +00:00
- CORS_ALLOWED_ORIGINS=${CORS_ORIGINS:-http://veza.fr}
2026-02-14 20:45:15 +00:00
- HYPERSWITCH_URL=http://hyperswitch:8080
- HYPERSWITCH_API_KEY=${HYPERSWITCH_API_KEY:-}
- HYPERSWITCH_WEBHOOK_SECRET=${HYPERSWITCH_WEBHOOK_SECRET:-}
- HYPERSWITCH_ENABLED=${HYPERSWITCH_ENABLED:-false}
2026-02-23 18:56:52 +00:00
- HYPERSWITCH_LIVE_MODE=${HYPERSWITCH_LIVE_MODE:-false}
2026-02-14 20:45:15 +00:00
- CHECKOUT_SUCCESS_URL=${CHECKOUT_SUCCESS_URL:-https://veza.fr/purchases}
2026-02-18 11:03:14 +00:00
- ENABLE_CLAMAV=true
- CLAMAV_REQUIRED=true
- CLAMAV_ADDRESS=clamav:3310
feat(v0.501): Sprint 1 -- infrastructure foundations
- Add MinIO S3-compatible storage to docker-compose (dev, staging, prod)
- Create migrations 103-108 (waveform_url, user_folders, user_files,
user_storage_quotas, gear_items.is_public, gear_images)
- Add Go models: UserFile, UserFolder, StorageQuota, GearImage
- Add WaveformURL to Track model, IsPublic + GearImages to GearItem model
2026-02-22 17:10:25 +00:00
- AWS_S3_ENDPOINT=http://minio:9000
- AWS_S3_BUCKET=veza-files
- AWS_ACCESS_KEY_ID=${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
- AWS_SECRET_ACCESS_KEY=${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
- AWS_REGION=${AWS_REGION:-us-east-1}
2026-04-30 13:39:30 +00:00
# v1.0.10 polish: enable the S3 stack and route track uploads through
# MinIO end-to-end. Without these two flags, defaults (local +
# disabled) win and the AWS_S3_* credentials above are inert. With
# blue/green active/active behind HAProxy, local-disk uploads on
# one pod are invisible to the other — S3 is required for HA.
- AWS_S3_ENABLED=true
- TRACK_STORAGE_BACKEND=s3
feat(infra): add coturn service + wire WEBRTC_TURN_* envs in compose
WebRTC 1:1 calls were silently broken behind symmetric NAT (corporate
firewalls, mobile CGNAT, Incus default networking) because no TURN
relay was deployed. The /api/v1/config/webrtc endpoint and the
useWebRTC frontend hook were both wired correctly from v1.0.9 Day 1,
but with no TURN box on the network the handler returned STUN-only
and the SPA's `nat.hasTurn` flag stayed false.
Added :
* docker-compose.prod.yml: new `coturn` service using the official
coturn/coturn:4.6.2 image, network_mode: host (UDP relay range
49152-65535 doesn't survive Docker NAT), config passed entirely
via CLI args so no template render is needed. TLS cert volume
points at /etc/letsencrypt/live/turn.veza.fr by default; override
with TURN_CERT_DIR for non-LE setups. Healthcheck uses nc -uz to
catch crashed/unbound listeners.
* Both backend services (blue + green): WEBRTC_STUN_URLS,
WEBRTC_TURN_URLS, WEBRTC_TURN_USERNAME, WEBRTC_TURN_CREDENTIAL
pulled from env with `:?` strict-fail markers so a misconfigured
deploy crashes loudly instead of degrading silently to STUN-only.
* docker-compose.staging.yml: same 4 env vars but with safe fallback
defaults (Google STUN, no TURN) so staging boots without a coturn
box. Operators can flip to relay by setting the envs externally.
Operator must set the following secrets at deploy time :
WEBRTC_TURN_PUBLIC_IP the host's public IP (used both by coturn
--external-ip and by the backend STUN/TURN
URLs the SPA receives)
WEBRTC_TURN_USERNAME static long-term credential username
WEBRTC_TURN_CREDENTIAL static long-term credential password
WEBRTC_TURN_REALM optional, defaults to turn.veza.fr
Smoke test : turnutils_uclient -u $USER -w $CRED -p 3478 $PUBLIC_IP
should return a relay allocation within ~1s. From the SPA, watch
chrome://webrtc-internals during a call and confirm the selected
candidate pair is `relay` when both peers are on symmetric NAT.
The Ansible role under infra/coturn/ is the canonical Incus-native
deploy path documented in infra/coturn/README.md; this compose
service is the simpler single-host option that unblocks calls today.
v1.1 will switch from static to ephemeral REST-shared-secret
credentials per ORIGIN_SECURITY_FRAMEWORK.md.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-30 13:44:12 +00:00
# WebRTC ICE servers — populated from the coturn service above.
# Empty TURN vars degrade to STUN-only (calls work peer-to-peer
# but fail behind symmetric NAT); the all-or-nothing rule in
# webrtc_config_handler.go means partial config is rejected.
- WEBRTC_STUN_URLS=stun:${WEBRTC_TURN_PUBLIC_IP:?WEBRTC_TURN_PUBLIC_IP must be set}:3478
- WEBRTC_TURN_URLS=turn:${WEBRTC_TURN_PUBLIC_IP}:3478,turns:${WEBRTC_TURN_PUBLIC_IP}:5349
- WEBRTC_TURN_USERNAME=${WEBRTC_TURN_USERNAME:?WEBRTC_TURN_USERNAME must be set}
- WEBRTC_TURN_CREDENTIAL=${WEBRTC_TURN_CREDENTIAL:?WEBRTC_TURN_CREDENTIAL must be set}
2026-02-22 20:20:35 +00:00
- HLS_STREAMING=true
- HLS_STORAGE_DIR=/data/hls
volumes :
- hls_prod_data:/data/hls
2026-01-03 21:59:48 +00:00
depends_on :
postgres :
condition : service_healthy
redis :
condition : service_healthy
rabbitmq :
condition : service_healthy
2026-02-18 11:03:14 +00:00
clamav :
condition : service_started
2026-01-11 15:30:43 +00:00
networks :
- veza-network
healthcheck :
test : [ "CMD" , "wget" , "--quiet" , "--tries=1" , "--spider" , "http://localhost:8080/api/v1/health" ]
interval : 10s
timeout : 5s
retries : 3
2026-01-03 21:59:48 +00:00
2026-02-23 18:52:19 +00:00
backend-api-green :
build :
context : ./veza-backend-api
dockerfile : Dockerfile.production
image : veza-backend-api:latest
container_name : veza_backend_api_green
restart : unless-stopped
environment :
- APP_ENV=production
- STACK_COLOR=green
- DATABASE_URL=postgres://${DB_USER:-veza}:${DB_PASS:?DB_PASS must be set}@postgres:5432/${DB_NAME:-veza}?sslmode=require
- REDIS_URL=redis://:${REDIS_PASSWORD:?REDIS_PASSWORD must be set}@redis:6379
- AMQP_URL=amqp://${DB_USER:-veza}:${RABBITMQ_PASS:?RABBITMQ_PASS must be set}@rabbitmq:5672
2026-03-12 04:40:53 +00:00
# SECURITY(HIGH-002): RS256 asymmetric keys for production
- JWT_PRIVATE_KEY_PATH=${JWT_PRIVATE_KEY_PATH:-/secrets/jwt_private.pem}
- JWT_PUBLIC_KEY_PATH=${JWT_PUBLIC_KEY_PATH:-/secrets/jwt_public.pem}
2026-02-23 18:52:19 +00:00
- COOKIE_SECURE=true
- COOKIE_SAME_SITE=strict
- COOKIE_HTTP_ONLY=true
- CORS_ALLOWED_ORIGINS=${CORS_ORIGINS:-http://veza.fr}
- HYPERSWITCH_URL=http://hyperswitch:8080
- HYPERSWITCH_API_KEY=${HYPERSWITCH_API_KEY:-}
- HYPERSWITCH_WEBHOOK_SECRET=${HYPERSWITCH_WEBHOOK_SECRET:-}
- HYPERSWITCH_ENABLED=${HYPERSWITCH_ENABLED:-false}
2026-02-23 18:56:52 +00:00
- HYPERSWITCH_LIVE_MODE=${HYPERSWITCH_LIVE_MODE:-false}
2026-02-23 18:52:19 +00:00
- CHECKOUT_SUCCESS_URL=${CHECKOUT_SUCCESS_URL:-https://veza.fr/purchases}
- ENABLE_CLAMAV=true
- CLAMAV_REQUIRED=true
- CLAMAV_ADDRESS=clamav:3310
- AWS_S3_ENDPOINT=http://minio:9000
- AWS_S3_BUCKET=veza-files
- AWS_ACCESS_KEY_ID=${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
- AWS_SECRET_ACCESS_KEY=${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
- AWS_REGION=${AWS_REGION:-us-east-1}
2026-04-30 13:39:30 +00:00
# v1.0.10 polish: enable the S3 stack and route track uploads through
# MinIO end-to-end. Without these two flags, defaults (local +
# disabled) win and the AWS_S3_* credentials above are inert. With
# blue/green active/active behind HAProxy, local-disk uploads on
# one pod are invisible to the other — S3 is required for HA.
- AWS_S3_ENABLED=true
- TRACK_STORAGE_BACKEND=s3
feat(infra): add coturn service + wire WEBRTC_TURN_* envs in compose
WebRTC 1:1 calls were silently broken behind symmetric NAT (corporate
firewalls, mobile CGNAT, Incus default networking) because no TURN
relay was deployed. The /api/v1/config/webrtc endpoint and the
useWebRTC frontend hook were both wired correctly from v1.0.9 Day 1,
but with no TURN box on the network the handler returned STUN-only
and the SPA's `nat.hasTurn` flag stayed false.
Added :
* docker-compose.prod.yml: new `coturn` service using the official
coturn/coturn:4.6.2 image, network_mode: host (UDP relay range
49152-65535 doesn't survive Docker NAT), config passed entirely
via CLI args so no template render is needed. TLS cert volume
points at /etc/letsencrypt/live/turn.veza.fr by default; override
with TURN_CERT_DIR for non-LE setups. Healthcheck uses nc -uz to
catch crashed/unbound listeners.
* Both backend services (blue + green): WEBRTC_STUN_URLS,
WEBRTC_TURN_URLS, WEBRTC_TURN_USERNAME, WEBRTC_TURN_CREDENTIAL
pulled from env with `:?` strict-fail markers so a misconfigured
deploy crashes loudly instead of degrading silently to STUN-only.
* docker-compose.staging.yml: same 4 env vars but with safe fallback
defaults (Google STUN, no TURN) so staging boots without a coturn
box. Operators can flip to relay by setting the envs externally.
Operator must set the following secrets at deploy time :
WEBRTC_TURN_PUBLIC_IP the host's public IP (used both by coturn
--external-ip and by the backend STUN/TURN
URLs the SPA receives)
WEBRTC_TURN_USERNAME static long-term credential username
WEBRTC_TURN_CREDENTIAL static long-term credential password
WEBRTC_TURN_REALM optional, defaults to turn.veza.fr
Smoke test : turnutils_uclient -u $USER -w $CRED -p 3478 $PUBLIC_IP
should return a relay allocation within ~1s. From the SPA, watch
chrome://webrtc-internals during a call and confirm the selected
candidate pair is `relay` when both peers are on symmetric NAT.
The Ansible role under infra/coturn/ is the canonical Incus-native
deploy path documented in infra/coturn/README.md; this compose
service is the simpler single-host option that unblocks calls today.
v1.1 will switch from static to ephemeral REST-shared-secret
credentials per ORIGIN_SECURITY_FRAMEWORK.md.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-30 13:44:12 +00:00
# WebRTC ICE servers — populated from the coturn service above.
# Empty TURN vars degrade to STUN-only (calls work peer-to-peer
# but fail behind symmetric NAT); the all-or-nothing rule in
# webrtc_config_handler.go means partial config is rejected.
- WEBRTC_STUN_URLS=stun:${WEBRTC_TURN_PUBLIC_IP:?WEBRTC_TURN_PUBLIC_IP must be set}:3478
- WEBRTC_TURN_URLS=turn:${WEBRTC_TURN_PUBLIC_IP}:3478,turns:${WEBRTC_TURN_PUBLIC_IP}:5349
- WEBRTC_TURN_USERNAME=${WEBRTC_TURN_USERNAME:?WEBRTC_TURN_USERNAME must be set}
- WEBRTC_TURN_CREDENTIAL=${WEBRTC_TURN_CREDENTIAL:?WEBRTC_TURN_CREDENTIAL must be set}
2026-02-23 18:52:19 +00:00
- HLS_STREAMING=true
- HLS_STORAGE_DIR=/data/hls
volumes :
- hls_prod_data:/data/hls
depends_on :
postgres :
condition : service_healthy
redis :
condition : service_healthy
rabbitmq :
condition : service_healthy
clamav :
condition : service_started
networks :
- veza-network
healthcheck :
test : [ "CMD" , "wget" , "--quiet" , "--tries=1" , "--spider" , "http://localhost:8080/api/v1/health" ]
interval : 10s
timeout : 5s
retries : 3
stream-server-blue :
build :
context : ./veza-stream-server
dockerfile : Dockerfile.production
image : veza-stream-server:latest
container_name : veza_stream_server_blue
restart : unless-stopped
environment :
- DATABASE_URL=postgres://${DB_USER:-veza}:${DB_PASS:?DB_PASS must be set}@postgres:5432/${DB_NAME:-veza}?sslmode=require
- REDIS_URL=redis://:${REDIS_PASSWORD:?REDIS_PASSWORD must be set}@redis:6379
2026-03-12 04:40:53 +00:00
# SECURITY(HIGH-002): Stream server uses public key only (verification)
- JWT_PUBLIC_KEY_PATH=${JWT_PUBLIC_KEY_PATH:-/secrets/jwt_public.pem}
2026-02-23 18:52:19 +00:00
- PORT=3001
- HLS_OUTPUT_DIR=/data/hls
volumes :
- hls_prod_data:/data/hls
depends_on :
postgres :
condition : service_healthy
redis :
condition : service_healthy
networks :
- veza-network
healthcheck :
test : [ "CMD" , "wget" , "--quiet" , "--tries=1" , "--spider" , "http://localhost:3001/health" ]
interval : 10s
timeout : 5s
retries : 3
2026-01-03 21:59:48 +00:00
2026-02-23 18:52:19 +00:00
stream-server-green :
2026-01-03 21:59:48 +00:00
build :
context : ./veza-stream-server
dockerfile : Dockerfile.production
2026-01-11 15:30:43 +00:00
image : veza-stream-server:latest
2026-02-23 18:52:19 +00:00
container_name : veza_stream_server_green
2026-01-03 21:59:48 +00:00
restart : unless-stopped
environment :
2026-02-11 21:43:09 +00:00
- DATABASE_URL=postgres://${DB_USER:-veza}:${DB_PASS:?DB_PASS must be set}@postgres:5432/${DB_NAME:-veza}?sslmode=require
2026-02-22 16:24:12 +00:00
- REDIS_URL=redis://:${REDIS_PASSWORD:?REDIS_PASSWORD must be set}@redis:6379
2026-03-12 04:40:53 +00:00
# SECURITY(HIGH-002): Stream server uses public key only (verification)
- JWT_PUBLIC_KEY_PATH=${JWT_PUBLIC_KEY_PATH:-/secrets/jwt_public.pem}
2026-01-11 15:30:43 +00:00
- PORT=3001
2026-02-22 20:20:35 +00:00
- HLS_OUTPUT_DIR=/data/hls
volumes :
- hls_prod_data:/data/hls
2026-01-03 21:59:48 +00:00
depends_on :
postgres :
condition : service_healthy
2026-01-11 15:30:43 +00:00
redis :
2026-01-03 21:59:48 +00:00
condition : service_healthy
2026-01-11 15:30:43 +00:00
networks :
- veza-network
healthcheck :
test : [ "CMD" , "wget" , "--quiet" , "--tries=1" , "--spider" , "http://localhost:3001/health" ]
interval : 10s
timeout : 5s
retries : 3
2026-01-03 21:59:48 +00:00
feat(v0.501): Sprint 1 -- infrastructure foundations
- Add MinIO S3-compatible storage to docker-compose (dev, staging, prod)
- Create migrations 103-108 (waveform_url, user_folders, user_files,
user_storage_quotas, gear_items.is_public, gear_images)
- Add Go models: UserFile, UserFolder, StorageQuota, GearImage
- Add WaveformURL to Track model, IsPublic + GearImages to GearItem model
2026-02-22 17:10:25 +00:00
minio :
2026-04-20 18:32:01 +00:00
image : minio/minio:RELEASE.2025-09-07T16-13-09Z
feat(v0.501): Sprint 1 -- infrastructure foundations
- Add MinIO S3-compatible storage to docker-compose (dev, staging, prod)
- Create migrations 103-108 (waveform_url, user_folders, user_files,
user_storage_quotas, gear_items.is_public, gear_images)
- Add Go models: UserFile, UserFolder, StorageQuota, GearImage
- Add WaveformURL to Track model, IsPublic + GearImages to GearItem model
2026-02-22 17:10:25 +00:00
container_name : veza_minio
restart : unless-stopped
command : server /data --console-address ":9001"
environment :
MINIO_ROOT_USER : ${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
MINIO_ROOT_PASSWORD : ${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
volumes :
- minio_data:/data
networks :
- veza-network
healthcheck :
test : [ "CMD" , "mc" , "ready" , "local" ]
interval : 10s
timeout : 5s
retries : 3
minio-init :
2026-04-20 18:32:01 +00:00
image : minio/mc:RELEASE.2025-09-07T05-25-40Z
feat(v0.501): Sprint 1 -- infrastructure foundations
- Add MinIO S3-compatible storage to docker-compose (dev, staging, prod)
- Create migrations 103-108 (waveform_url, user_folders, user_files,
user_storage_quotas, gear_items.is_public, gear_images)
- Add Go models: UserFile, UserFolder, StorageQuota, GearImage
- Add WaveformURL to Track model, IsPublic + GearImages to GearItem model
2026-02-22 17:10:25 +00:00
depends_on :
minio :
condition : service_healthy
entrypoint : >
/bin/sh -c "
mc alias set veza http://minio:9000 $${MINIO_ROOT_USER} $${MINIO_ROOT_PASSWORD};
mc mb --ignore-existing veza/veza-files;
exit 0;
"
environment :
MINIO_ROOT_USER : ${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
MINIO_ROOT_PASSWORD : ${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
networks :
- veza-network
2026-02-23 18:54:55 +00:00
# ============================================================================
feat(infra): add coturn service + wire WEBRTC_TURN_* envs in compose
WebRTC 1:1 calls were silently broken behind symmetric NAT (corporate
firewalls, mobile CGNAT, Incus default networking) because no TURN
relay was deployed. The /api/v1/config/webrtc endpoint and the
useWebRTC frontend hook were both wired correctly from v1.0.9 Day 1,
but with no TURN box on the network the handler returned STUN-only
and the SPA's `nat.hasTurn` flag stayed false.
Added :
* docker-compose.prod.yml: new `coturn` service using the official
coturn/coturn:4.6.2 image, network_mode: host (UDP relay range
49152-65535 doesn't survive Docker NAT), config passed entirely
via CLI args so no template render is needed. TLS cert volume
points at /etc/letsencrypt/live/turn.veza.fr by default; override
with TURN_CERT_DIR for non-LE setups. Healthcheck uses nc -uz to
catch crashed/unbound listeners.
* Both backend services (blue + green): WEBRTC_STUN_URLS,
WEBRTC_TURN_URLS, WEBRTC_TURN_USERNAME, WEBRTC_TURN_CREDENTIAL
pulled from env with `:?` strict-fail markers so a misconfigured
deploy crashes loudly instead of degrading silently to STUN-only.
* docker-compose.staging.yml: same 4 env vars but with safe fallback
defaults (Google STUN, no TURN) so staging boots without a coturn
box. Operators can flip to relay by setting the envs externally.
Operator must set the following secrets at deploy time :
WEBRTC_TURN_PUBLIC_IP the host's public IP (used both by coturn
--external-ip and by the backend STUN/TURN
URLs the SPA receives)
WEBRTC_TURN_USERNAME static long-term credential username
WEBRTC_TURN_CREDENTIAL static long-term credential password
WEBRTC_TURN_REALM optional, defaults to turn.veza.fr
Smoke test : turnutils_uclient -u $USER -w $CRED -p 3478 $PUBLIC_IP
should return a relay allocation within ~1s. From the SPA, watch
chrome://webrtc-internals during a call and confirm the selected
candidate pair is `relay` when both peers are on symmetric NAT.
The Ansible role under infra/coturn/ is the canonical Incus-native
deploy path documented in infra/coturn/README.md; this compose
service is the simpler single-host option that unblocks calls today.
v1.1 will switch from static to ephemeral REST-shared-secret
credentials per ORIGIN_SECURITY_FRAMEWORK.md.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-30 13:44:12 +00:00
# COTURN — TURN/STUN relay for WebRTC NAT traversal (v1.0.10 polish)
# ----------------------------------------------------------------------------
# Calls (1:1 audio/video) signal through chat WebSocket but the actual
# media stream needs a relay when both peers are behind symmetric NAT.
# Without this service, every call between users on corporate firewalls,
# mobile CGNAT or Incus default networking will silently fail with
# iceConnectionState=failed after ~30s.
#
# network_mode: host is REQUIRED — coturn allocates UDP ports in the
# 49152-65535 range for media relay, and Docker's NAT layer drops them.
# Host networking exposes the host's public IP directly, which is what
# WEBRTC_TURN_PUBLIC_IP must point at (so coturn advertises the right
# candidate to remote peers).
#
# The infra/coturn/README.md describes a parallel Incus-native deploy
# path; this compose service is the simpler dev/single-host option.
# If you run prod on multiple hosts behind a load balancer, prefer the
# Ansible/Incus path so coturn lives on a host with a stable public IP.
# ============================================================================
coturn :
image : coturn/coturn:4.6.2
container_name : veza_coturn
restart : unless-stopped
network_mode : host
command :
- "-n"
- "--listening-port=3478"
- "--tls-listening-port=5349"
- "--external-ip=${WEBRTC_TURN_PUBLIC_IP:?WEBRTC_TURN_PUBLIC_IP must be set (the public IP coturn advertises to peers)}"
- "--realm=${WEBRTC_TURN_REALM:-turn.veza.fr}"
- "--lt-cred-mech"
- "--user=${WEBRTC_TURN_USERNAME:?WEBRTC_TURN_USERNAME must be set}:${WEBRTC_TURN_CREDENTIAL:?WEBRTC_TURN_CREDENTIAL must be set}"
- "--min-port=49152"
- "--max-port=65535"
- "--no-cli"
- "--no-tlsv1"
- "--no-tlsv1_1"
- "--cert=/etc/coturn/cert.pem"
- "--pkey=/etc/coturn/key.pem"
volumes :
# Map the TLS cert dir read-only. Default points at a Let's Encrypt
# rotation managed outside this compose (certbot on the host or
# similar). Override TURN_CERT_DIR for self-signed dev certs.
- ${TURN_CERT_DIR:-/etc/letsencrypt/live/turn.veza.fr}:/etc/coturn:ro
healthcheck :
# nc -uz checks UDP/3478 is bound; doesn't validate auth but catches
# crashes / cert-load failures cleanly.
test : [ "CMD-SHELL" , "nc -zu localhost 3478 || exit 1" ]
interval : 30s
timeout : 5s
retries : 3
# ============================================================================
2026-02-23 18:54:55 +00:00
# MONITORING - Alertmanager
# Set SLACK_WEBHOOK_URL for Slack notifications. Works with Prometheus.
# ============================================================================
alertmanager :
image : prom/alertmanager:v0.26.0
container_name : veza_alertmanager
restart : unless-stopped
ports :
- "9093:9093"
volumes :
- ./config/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml:ro
command :
- '--config.file=/etc/alertmanager/alertmanager.yml'
- '--storage.path=/alertmanager'
environment :
- SLACK_WEBHOOK_URL=${SLACK_WEBHOOK_URL:-}
networks :
- veza-network
2026-02-23 18:52:19 +00:00
web-blue :
build :
context : ./apps/web
dockerfile : Dockerfile.production
image : veza-web:latest
container_name : veza_web_blue
restart : unless-stopped
environment :
- VITE_API_URL=http://haproxy/api/v1
- VITE_STREAM_URL=ws://haproxy/stream
- VITE_UPLOAD_URL=http://haproxy/api/v1/uploads
depends_on :
- backend-api-blue
- stream-server-blue
networks :
- veza-network
healthcheck :
test : [ "CMD" , "wget" , "--quiet" , "--tries=1" , "--spider" , "http://localhost:5173" ]
interval : 10s
timeout : 5s
retries : 3
web-green :
2026-01-03 21:59:48 +00:00
build :
context : ./apps/web
dockerfile : Dockerfile.production
2026-01-11 15:30:43 +00:00
image : veza-web:latest
2026-02-23 18:52:19 +00:00
container_name : veza_web_green
2026-01-03 21:59:48 +00:00
restart : unless-stopped
environment :
2026-01-11 15:30:43 +00:00
- VITE_API_URL=http://haproxy/api/v1
- VITE_STREAM_URL=ws://haproxy/stream
- VITE_UPLOAD_URL=http://haproxy/api/v1/uploads
2026-01-03 21:59:48 +00:00
depends_on :
2026-02-23 18:52:19 +00:00
- backend-api-green
- stream-server-green
2026-01-11 15:30:43 +00:00
networks :
- veza-network
healthcheck :
test : [ "CMD" , "wget" , "--quiet" , "--tries=1" , "--spider" , "http://localhost:5173" ]
interval : 10s
timeout : 5s
retries : 3
2026-01-03 21:59:48 +00:00
2026-01-11 15:30:43 +00:00
# ============================================================================
2026-02-23 18:52:19 +00:00
# REVERSE PROXY - HAProxy (Blue-Green)
2026-01-11 15:30:43 +00:00
# ============================================================================
haproxy :
image : haproxy:2.8-alpine
container_name : veza_haproxy
restart : unless-stopped
2026-02-15 14:58:51 +00:00
deploy :
resources :
limits :
cpus : '0.5'
memory : 128M
2026-01-11 15:30:43 +00:00
ports :
- "${PORT_HAPROXY:-80}:80"
- "443:443"
volumes :
- ./config/haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
2026-02-15 14:58:51 +00:00
- ./config/ssl:/etc/ssl/veza:ro
2026-01-11 15:30:43 +00:00
depends_on :
2026-02-23 18:52:19 +00:00
- backend-api-blue
- backend-api-green
- stream-server-blue
- stream-server-green
- web-blue
- web-green
2026-01-11 15:30:43 +00:00
networks :
- veza-network
healthcheck :
test : [ "CMD" , "haproxy" , "-c" , "-f" , "/usr/local/etc/haproxy/haproxy.cfg" ]
interval : 10s
timeout : 5s
retries : 3
2026-01-03 21:59:48 +00:00
2026-01-11 15:30:43 +00:00
networks :
veza-network :
driver : bridge
ipam :
config :
- subnet : 172.20 .0 .0 /16
2026-01-03 21:59:48 +00:00
2026-01-11 15:30:43 +00:00
volumes :
postgres_data :
redis_data :
rabbitmq_data :
2026-02-14 20:45:15 +00:00
hyperswitch_postgres_data :
feat(v0.501): Sprint 1 -- infrastructure foundations
- Add MinIO S3-compatible storage to docker-compose (dev, staging, prod)
- Create migrations 103-108 (waveform_url, user_folders, user_files,
user_storage_quotas, gear_items.is_public, gear_images)
- Add Go models: UserFile, UserFolder, StorageQuota, GearImage
- Add WaveformURL to Track model, IsPublic + GearImages to GearItem model
2026-02-22 17:10:25 +00:00
minio_data :
2026-02-22 20:20:35 +00:00
hls_prod_data :