veza/docker-compose.yml
senke 9cd0da0046 fix(v0.12.6): apply all pentest remediations — 36 findings across 36 files
CRITICAL fixes:
- Race condition (TOCTOU) in payout/refund with SELECT FOR UPDATE (CRITICAL-001/002)
- IDOR on analytics endpoint — ownership check enforced (CRITICAL-003)
- CSWSH on all WebSocket endpoints — origin whitelist (CRITICAL-004)
- Mass assignment on user self-update — strip privileged fields (CRITICAL-005)

HIGH fixes:
- Path traversal in marketplace upload — UUID filenames (HIGH-001)
- IP spoofing — use Gin trusted proxy c.ClientIP() (HIGH-002)
- Popularity metrics (followers, likes) set to json:"-" (HIGH-003)
- bcrypt cost hardened to 12 everywhere (HIGH-004)
- Refresh token lock made mandatory (HIGH-005)
- Stream token replay prevention with access_count (HIGH-006)
- Subscription trial race condition fixed (HIGH-007)
- License download expiration check (HIGH-008)
- Webhook amount validation (HIGH-009)
- pprof endpoint removed from production (HIGH-010)

MEDIUM fixes:
- WebSocket message size limit 64KB (MEDIUM-010)
- HSTS header in nginx production (MEDIUM-001)
- CORS origin restricted in nginx-rtmp (MEDIUM-002)
- Docker alpine pinned to 3.21 (MEDIUM-003/004)
- Redis authentication enforced (MEDIUM-005)
- GDPR account deletion expanded (MEDIUM-006)
- .gitignore hardened (MEDIUM-007)

LOW/INFO fixes:
- GitHub Actions SHA pinning on all workflows (LOW-001)
- .env.example security documentation (INFO-001)
- Production CORS set to HTTPS (LOW-002)

All tests pass. Go and Rust compile clean.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-14 00:44:46 +01:00

317 lines
9.3 KiB
YAML

services:
# PostgreSQL - Primary Database
# Limit: 256MB RAM. Sufficient for local dev schemas.
# Port 15432 avoids conflict with other projects using default 5432
postgres:
image: postgres:16-alpine
container_name: veza_postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER:-veza}
# Use .env for real values; default is for local dev only
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-devpassword}
POSTGRES_DB: ${POSTGRES_DB:-veza}
ports:
- "${PORT_POSTGRES:-15432}:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U veza" ]
interval: 5s
timeout: 5s
retries: 5
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.50'
memory: 256M
reservations:
memory: 128M
# Redis - Cache & PubSub
# Limit: 64MB RAM. Port 16379 avoids conflict with other projects (default 6379)
redis:
image: redis:7-alpine
container_name: veza_redis
restart: unless-stopped
# SECURITY(REM-023): Require password even in development
command: redis-server --requirepass ${REDIS_PASSWORD:-devpassword}
ports:
- "${PORT_REDIS:-16379}:6379"
volumes:
- redis_data:/data
healthcheck:
test: [ "CMD", "redis-cli", "-a", "${REDIS_PASSWORD:-devpassword}", "ping" ]
interval: 5s
timeout: 3s
retries: 5
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.25'
memory: 64M
reservations:
memory: 32M
# ClamAV - Virus scanning for uploads
# SECURITY(MEDIUM-003): Pin ClamAV image to specific version instead of :latest
clamav:
image: clamav/clamav:1.4
container_name: veza_clamav
restart: unless-stopped
ports:
- "${PORT_CLAMAV:-13310}:3310"
networks:
- veza-net
healthcheck:
test: ["CMD", "clamdscan", "--ping", "1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 180s
deploy:
resources:
limits:
cpus: '0.5'
memory: 1G
# RabbitMQ - Message Broker
# Limit: 256MB RAM. Host 15672->AMQP(5672), 25672->Management(15672)
rabbitmq:
image: rabbitmq:3-management-alpine
container_name: veza_rabbitmq
restart: unless-stopped
environment:
RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER:-veza}
# Use .env for real values; default is for local dev only
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS:-devpassword}
ports:
- "${PORT_RABBITMQ_AMQP:-15672}:5672" # AMQP (messaging)
- "${PORT_RABBITMQ_MGMT:-25672}:15672" # Management UI
volumes:
- rabbitmq_data:/var/lib/rabbitmq
healthcheck:
test: rabbitmq-diagnostics -q ping
interval: 5s
timeout: 5s
retries: 10
start_period: 40s
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.50'
memory: 512M
reservations:
memory: 256M
# Hyperswitch - Payment router (optional, for payment integration)
hyperswitch_postgres:
image: postgres:16-alpine
container_name: veza_hyperswitch_postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${HYPERSWITCH_DB_USER:-hyperswitch}
POSTGRES_PASSWORD: ${HYPERSWITCH_DB_PASSWORD:-hyperswitch_dev}
POSTGRES_DB: ${HYPERSWITCH_DB_NAME:-hyperswitch}
volumes:
- hyperswitch_postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${HYPERSWITCH_DB_USER:-hyperswitch}"]
interval: 5s
timeout: 5s
retries: 5
networks:
- veza-net
deploy:
resources:
limits:
cpus: "0.25"
memory: 128M
profiles:
- payments
# SECURITY(LOW-002): Pin to specific version. See https://github.com/juspay/hyperswitch/releases
hyperswitch:
image: juspaydotin/hyperswitch-router:2026.03.11.0-standalone
container_name: veza_hyperswitch
restart: unless-stopped
environment:
DATABASE_URL: postgresql://${HYPERSWITCH_DB_USER:-hyperswitch}:${HYPERSWITCH_DB_PASSWORD:-hyperswitch_dev}@hyperswitch_postgres:5432/${HYPERSWITCH_DB_NAME:-hyperswitch}?sslmode=disable
REDIS_URL: redis://redis:6379
ports:
- "${PORT_HYPERSWITCH:-18081}:8080"
depends_on:
hyperswitch_postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- veza-net
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/health"]
interval: 10s
timeout: 5s
retries: 3
profiles:
- payments
# Backend API (Docker dev)
backend-api:
build:
context: ./veza-backend-api
container_name: veza_backend_dev
environment:
- APP_ENV=development
- DATABASE_URL=postgresql://${POSTGRES_USER:-veza}:${POSTGRES_PASSWORD:-devpassword}@postgres:5432/${POSTGRES_DB:-veza}?sslmode=disable
- REDIS_URL=redis://:${REDIS_PASSWORD:-devpassword}@redis:6379
- JWT_SECRET=${JWT_SECRET:?JWT_SECRET must be set in .env}
- COOKIE_SECURE=false # false en dev local
- COOKIE_SAME_SITE=strict
- COOKIE_DOMAIN=
- COOKIE_HTTP_ONLY=true
- COOKIE_PATH=/
- CORS_ALLOWED_ORIGINS=http://veza.fr:3000,http://veza.fr:5173
- RABBITMQ_URL=amqp://${RABBITMQ_DEFAULT_USER:-veza}:${RABBITMQ_DEFAULT_PASS:-devpassword}@rabbitmq:5672/
- ENABLE_CLAMAV=true
- CLAMAV_REQUIRED=false
- CLAMAV_ADDRESS=clamav:3310
- AWS_S3_ENDPOINT=http://minio:9000
- AWS_S3_BUCKET=veza-files
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-minioadmin}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-minioadmin}
- AWS_REGION=us-east-1
- HLS_STREAMING=true
- HLS_STORAGE_DIR=/data/hls
volumes:
- hls-data:/data/hls
ports:
- "${PORT_BACKEND:-18080}:8080"
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
rabbitmq:
condition: service_healthy
clamav:
condition: service_started
networks:
- veza-net
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/api/v1/health"]
interval: 10s
timeout: 5s
retries: 5
# Chat Server removed in v0.502 -- chat is now handled by backend-api WebSocket at /api/v1/ws
# Nginx-RTMP - v0.10.6 F471: Live stream ingest (OBS -> RTMP, HLS out)
nginx-rtmp:
image: alfg/nginx-rtmp
container_name: veza_nginx_rtmp
restart: unless-stopped
ports:
- "${PORT_RTMP:-1935}:1935"
- "${PORT_RTMP_HTTP:-18083}:8080"
volumes:
- ./infra/nginx-rtmp/nginx.conf:/etc/nginx/nginx.conf:ro
depends_on:
- backend-api
networks:
- veza-net
profiles:
- live
# Stream Server (Rust) - v0.101
stream-server:
build:
context: .
dockerfile: veza-stream-server/Dockerfile
container_name: veza_stream_dev
environment:
- DATABASE_URL=postgresql://${POSTGRES_USER:-veza}:${POSTGRES_PASSWORD:-devpassword}@postgres:5432/${POSTGRES_DB:-veza}?sslmode=disable
- REDIS_URL=redis://:${REDIS_PASSWORD:-devpassword}@redis:6379
- JWT_SECRET=${JWT_SECRET:?JWT_SECRET must be set in .env}
- SECRET_KEY=${JWT_SECRET:?JWT_SECRET must be set in .env}
- PORT=3001
- AWS_S3_ENDPOINT=http://minio:9000
- AWS_S3_BUCKET=veza-files
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-minioadmin}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-minioadmin}
- AWS_REGION=us-east-1
- HLS_OUTPUT_DIR=/data/hls
volumes:
- hls-data:/data/hls
ports:
- "${PORT_STREAM:-18082}:3001"
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- veza-net
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3001/health"]
interval: 10s
timeout: 5s
retries: 5
# MinIO - S3-compatible object storage (v0.501 Cloud Storage)
minio:
image: minio/minio:latest
container_name: veza_minio
restart: unless-stopped
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: ${AWS_ACCESS_KEY_ID:-minioadmin}
MINIO_ROOT_PASSWORD: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
ports:
- "${PORT_MINIO:-19000}:9000"
- "${PORT_MINIO_CONSOLE:-19001}:9001"
volumes:
- minio_data:/data
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 10s
timeout: 5s
retries: 5
networks:
- veza-net
# MinIO bucket initialization
minio-init:
image: minio/mc:latest
depends_on:
minio:
condition: service_healthy
entrypoint: >
/bin/sh -c "
mc alias set veza http://minio:9000 $${MINIO_ROOT_USER:-minioadmin} $${MINIO_ROOT_PASSWORD:-minioadmin};
mc mb --ignore-existing veza/veza-files;
mc anonymous set none veza/veza-files/public;
exit 0;
"
environment:
MINIO_ROOT_USER: ${AWS_ACCESS_KEY_ID:-minioadmin}
MINIO_ROOT_PASSWORD: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
networks:
- veza-net
volumes:
postgres_data:
redis_data:
rabbitmq_data:
hyperswitch_postgres_data:
minio_data:
hls-data:
networks:
veza-net:
driver: bridge