The prod and staging compose files were passing AWS_S3_ENDPOINT,
AWS_S3_BUCKET, AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY but NOT
the two flags that actually activate the routing:
- AWS_S3_ENABLED (default false in code → S3 stack skipped)
- TRACK_STORAGE_BACKEND (default "local" in code → uploads to disk)
So both prod and staging deploys were silently writing track uploads
to local disk despite the apparent S3 wiring. With blue/green
active/active behind HAProxy, that's an HA bug — uploads on the blue
pod aren't visible to green and vice-versa.
Set both flags in:
- docker-compose.staging.yml backend service (1 instance)
- docker-compose.prod.yml backend_blue + backend_green (2 instances,
same env block via replace_all)
The code already validates on startup that TRACK_STORAGE_BACKEND=s3
requires AWS_S3_ENABLED=true (config.go:1040-1042) so a partial
config now fails-loud instead of falling back to local.
The S3StorageService is already implemented (services/s3_storage_service.go)
and wired into TrackService.UploadTrack via the storageBackend dispatcher
(core/track/service.go:432). HLS segment output remains on the
hls_*_data volume — that's a separate concern (stream server local
write), out of scope for this compose-only fix.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
207 lines
6.4 KiB
YAML
207 lines
6.4 KiB
YAML
version: '3.8'
|
|
|
|
services:
|
|
# --- INFRASTRUCTURE ---
|
|
|
|
postgres:
|
|
image: postgres:16-alpine
|
|
container_name: veza_postgres_staging
|
|
restart: unless-stopped
|
|
environment:
|
|
POSTGRES_USER: veza
|
|
POSTGRES_PASSWORD: ${STAGING_DB_PASSWORD:?STAGING_DB_PASSWORD must be set}
|
|
POSTGRES_DB: veza_staging
|
|
volumes:
|
|
- postgres_staging_data:/var/lib/postgresql/data
|
|
healthcheck:
|
|
test: [ "CMD-SHELL", "pg_isready -U veza" ]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 5
|
|
|
|
redis:
|
|
image: redis:7-alpine
|
|
container_name: veza_redis_staging
|
|
restart: unless-stopped
|
|
command: redis-server --save 60 1 --loglevel warning
|
|
volumes:
|
|
- redis_staging_data:/data
|
|
healthcheck:
|
|
test: [ "CMD", "redis-cli", "ping" ]
|
|
interval: 10s
|
|
|
|
rabbitmq:
|
|
image: rabbitmq:3-management-alpine
|
|
container_name: veza_rabbitmq_staging
|
|
restart: unless-stopped
|
|
environment:
|
|
RABBITMQ_DEFAULT_USER: veza
|
|
RABBITMQ_DEFAULT_PASS: ${STAGING_RABBITMQ_PASSWORD:?STAGING_RABBITMQ_PASSWORD must be set}
|
|
volumes:
|
|
- rabbitmq_staging_data:/var/lib/rabbitmq
|
|
healthcheck:
|
|
test: rabbitmq-diagnostics -q ping
|
|
interval: 20s
|
|
|
|
# --- APPLICATION SERVICES ---
|
|
|
|
backend:
|
|
build:
|
|
context: ./veza-backend-api
|
|
dockerfile: Dockerfile.production
|
|
container_name: veza_backend_staging
|
|
restart: unless-stopped
|
|
environment:
|
|
- APP_ENV=staging
|
|
- DB_HOST=postgres
|
|
- DB_PORT=5432
|
|
- DB_USER=veza
|
|
- DB_PASSWORD=${STAGING_DB_PASSWORD:?STAGING_DB_PASSWORD must be set}
|
|
- DB_NAME=veza_staging
|
|
- DATABASE_URL=postgresql://veza:${STAGING_DB_PASSWORD:?STAGING_DB_PASSWORD must be set}@postgres:5432/veza_staging?sslmode=${STAGING_DB_SSLMODE:-disable}
|
|
- REDIS_URL=redis://redis:6379
|
|
- RABBITMQ_URL=amqp://veza:${STAGING_RABBITMQ_PASSWORD:?STAGING_RABBITMQ_PASSWORD must be set}@rabbitmq:5672/%2f
|
|
- JWT_SECRET=${STAGING_JWT_SECRET}
|
|
- ENABLE_CLAMAV=false
|
|
- LOG_DIR=/var/log/veza
|
|
- LOG_LEVEL=INFO
|
|
# Cookie Security Settings (Staging)
|
|
- COOKIE_SECURE=true # true en staging (HTTPS requis)
|
|
- COOKIE_SAME_SITE=strict # strict pour sécurité maximale
|
|
- COOKIE_DOMAIN=${STAGING_COOKIE_DOMAIN:-.staging.veza.app}
|
|
- COOKIE_HTTP_ONLY=true
|
|
- COOKIE_PATH=/
|
|
- CORS_ALLOWED_ORIGINS=${STAGING_CORS_ORIGINS:-https://staging.veza.app,https://staging-api.veza.app}
|
|
- AWS_S3_ENDPOINT=http://minio:9000
|
|
- AWS_S3_BUCKET=veza-files
|
|
- AWS_ACCESS_KEY_ID=${STAGING_S3_ACCESS_KEY:?STAGING_S3_ACCESS_KEY must be set}
|
|
- AWS_SECRET_ACCESS_KEY=${STAGING_S3_SECRET_KEY:?STAGING_S3_SECRET_KEY must be set}
|
|
- AWS_REGION=us-east-1
|
|
# v1.0.10 polish: enable the S3 stack and route track uploads through
|
|
# MinIO end-to-end. Without these two flags, defaults (local +
|
|
# disabled) win and the AWS_S3_* credentials above are inert.
|
|
- AWS_S3_ENABLED=true
|
|
- TRACK_STORAGE_BACKEND=s3
|
|
- HLS_STREAMING=true
|
|
- HLS_STORAGE_DIR=/data/hls
|
|
volumes:
|
|
- veza_logs_staging:/var/log/veza
|
|
- hls_staging_data:/data/hls
|
|
depends_on:
|
|
postgres:
|
|
condition: service_healthy
|
|
redis:
|
|
condition: service_healthy
|
|
rabbitmq:
|
|
condition: service_healthy
|
|
ports:
|
|
- "8080:8080"
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/api/v1/health"]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 3
|
|
|
|
# Chat Server removed in v0.502 -- chat is now handled by backend WebSocket at /api/v1/ws
|
|
|
|
stream-server:
|
|
build:
|
|
context: ./veza-stream-server
|
|
dockerfile: Dockerfile.production
|
|
container_name: veza_stream_staging
|
|
restart: unless-stopped
|
|
environment:
|
|
- DATABASE_URL=postgresql://veza:${STAGING_DB_PASSWORD:?STAGING_DB_PASSWORD must be set}@postgres:5432/veza_staging?sslmode=${STAGING_DB_SSLMODE:-disable}
|
|
- REDIS_URL=redis://redis:6379
|
|
- JWT_SECRET=${STAGING_JWT_SECRET:?STAGING_JWT_SECRET must be set}
|
|
- PORT=3001
|
|
- HLS_OUTPUT_DIR=/data/hls
|
|
volumes:
|
|
- hls_staging_data:/data/hls
|
|
depends_on:
|
|
postgres:
|
|
condition: service_healthy
|
|
redis:
|
|
condition: service_healthy
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3001/health"]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 3
|
|
|
|
frontend:
|
|
build:
|
|
context: ./apps/web
|
|
dockerfile: Dockerfile.production
|
|
container_name: veza_frontend_staging
|
|
restart: unless-stopped
|
|
environment:
|
|
- VITE_API_URL=/api/v1
|
|
- VITE_STREAM_URL=ws://caddy/stream
|
|
- VITE_APP_ENV=staging
|
|
depends_on:
|
|
- backend
|
|
- stream-server
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:5173"]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 3
|
|
|
|
caddy:
|
|
image: caddy:2-alpine
|
|
container_name: veza_caddy_staging
|
|
restart: unless-stopped
|
|
ports:
|
|
- "80:80"
|
|
- "443:443"
|
|
volumes:
|
|
- ./config/caddy/Caddyfile.staging:/etc/caddy/Caddyfile:ro
|
|
- caddy_data:/data
|
|
- caddy_config:/config
|
|
depends_on:
|
|
- backend
|
|
- stream-server
|
|
- frontend
|
|
|
|
minio:
|
|
image: minio/minio:RELEASE.2025-09-07T16-13-09Z
|
|
container_name: veza_minio_staging
|
|
restart: unless-stopped
|
|
command: server /data --console-address ":9001"
|
|
environment:
|
|
MINIO_ROOT_USER: ${STAGING_S3_ACCESS_KEY:?STAGING_S3_ACCESS_KEY must be set}
|
|
MINIO_ROOT_PASSWORD: ${STAGING_S3_SECRET_KEY:?STAGING_S3_SECRET_KEY must be set}
|
|
volumes:
|
|
- minio_staging_data:/data
|
|
healthcheck:
|
|
test: ["CMD", "mc", "ready", "local"]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 5
|
|
|
|
minio-init:
|
|
image: minio/mc:RELEASE.2025-09-07T05-25-40Z
|
|
depends_on:
|
|
minio:
|
|
condition: service_healthy
|
|
entrypoint: >
|
|
/bin/sh -c "
|
|
mc alias set veza http://minio:9000 $${MINIO_ROOT_USER} $${MINIO_ROOT_PASSWORD};
|
|
mc mb --ignore-existing veza/veza-files;
|
|
exit 0;
|
|
"
|
|
environment:
|
|
MINIO_ROOT_USER: ${STAGING_S3_ACCESS_KEY:?STAGING_S3_ACCESS_KEY must be set}
|
|
MINIO_ROOT_PASSWORD: ${STAGING_S3_SECRET_KEY:?STAGING_S3_SECRET_KEY must be set}
|
|
|
|
volumes:
|
|
postgres_staging_data:
|
|
redis_staging_data:
|
|
rabbitmq_staging_data:
|
|
veza_logs_staging:
|
|
caddy_data:
|
|
caddy_config:
|
|
minio_staging_data:
|
|
hls_staging_data:
|
|
|