veza/docker-compose.yml
senke 9002e91d91 refactor(backend,infra): unify SMTP env schema on canonical SMTP_* names
Third item of the v1.0.6 backlog. The v1.0.5.1 hotfix surfaced that two
email paths in-tree read *different* env vars for the same configuration:

    internal/email/sender.go         internal/services/email_service.go
    SMTP_USERNAME                    SMTP_USER
    SMTP_FROM                        FROM_EMAIL
    SMTP_FROM_NAME                   FROM_NAME

The hotfix worked around it by exporting both sets in `.env.template`.
This commit reconciles them onto a single schema so the workaround can
go away.

Changes
  * `internal/email/sender.go` is now the single loader. The canonical
    names (`SMTP_USERNAME`, `SMTP_FROM`, `SMTP_FROM_NAME`) are read
    first; the legacy names (`SMTP_USER`, `FROM_EMAIL`, `FROM_NAME`)
    stay supported as a migration fallback that logs a structured
    deprecation warning ("remove_in: v1.1.0"). Canonical always wins
    over deprecated — no silent precedence flip.
  * `NewSMTPEmailSender` callers keep working unchanged; a new
    `LoadSMTPConfigFromEnvWithLogger(*zap.Logger)` variant lets callers
    opt into the warning stream.
  * `internal/services/email_service.go` drops its six inline
    `os.Getenv` reads and delegates to the shared loader, so
    `AuthService.Register` and `RequestPasswordReset` now see exactly
    the same config as the async job worker.
  * `.env.template`: the duplicate (SMTP_USER + FROM_EMAIL + FROM_NAME)
    block added in v1.0.5.1 is removed — only the canonical SMTP_*
    names ship for new contributors.
  * `docker-compose.yml` (backend-api service): FROM_EMAIL / FROM_NAME
    renamed to SMTP_FROM / SMTP_FROM_NAME to match the canonical schema.
  * No Host/Port default injected in the loader. If SMTP_HOST is
    empty, callers see Host=="" and log-only (historic dev behavior).
    Dev defaults (MailHog localhost:1025) live in `.env.template`, so
    a fresh clone still works; a misconfigured prod pod fails loud
    instead of silently dialing localhost.

Tests
  * 5 new Go tests in `internal/email/smtp_env_test.go`: empty-env
    returns empty config; canonical names read directly; deprecated
    names fall back (one warning per var); canonical wins over
    deprecated silently; nil logger is allowed.
  * Existing `TestLoadSMTPConfigFromEnv`, `TestSMTPEmailSender_Send`,
    and every auth/services package remained green (40+ packages).

Import-cycle note: the loader deliberately lives in `internal/email`,
not `internal/config`, because `internal/config` already depends on
`internal/email` (wiring `EmailSender` at boot). Putting the loader in
`email` keeps the dependency flow one-way.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-16 20:44:09 +02:00

347 lines
10 KiB
YAML

services:
# PostgreSQL - Primary Database
# Limit: 256MB RAM. Sufficient for local dev schemas.
# Port 15432 avoids conflict with other projects using default 5432
postgres:
image: postgres:16-alpine
container_name: veza_postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER:-veza}
# Use .env for real values; default is for local dev only
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-devpassword}
POSTGRES_DB: ${POSTGRES_DB:-veza}
ports:
- "${PORT_POSTGRES:-15432}:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U veza" ]
interval: 5s
timeout: 5s
retries: 5
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.50'
memory: 256M
reservations:
memory: 128M
# Redis - Cache & PubSub
# Limit: 64MB RAM. Port 16379 avoids conflict with other projects (default 6379)
redis:
image: redis:7-alpine
container_name: veza_redis
restart: unless-stopped
# SECURITY(REM-023): Require password even in development
command: redis-server --requirepass ${REDIS_PASSWORD:-devpassword}
ports:
- "${PORT_REDIS:-16379}:6379"
volumes:
- redis_data:/data
healthcheck:
test: [ "CMD", "redis-cli", "-a", "${REDIS_PASSWORD:-devpassword}", "ping" ]
interval: 5s
timeout: 3s
retries: 5
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.25'
memory: 64M
reservations:
memory: 32M
# MailHog - Local SMTP capture for development
# Receives every outbound mail from backend-api (SMTP 1025) and exposes a
# web UI (8025) where devs can inspect verification and password-reset
# emails without wiring a real SMTP provider.
mailhog:
image: mailhog/mailhog:v1.0.1
container_name: veza_mailhog
restart: unless-stopped
ports:
- "${PORT_MAILHOG_SMTP:-1025}:1025"
- "${PORT_MAILHOG_UI:-8025}:8025"
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.10'
memory: 64M
# ClamAV - Virus scanning for uploads
# SECURITY(MEDIUM-003): Pin ClamAV image to specific version instead of :latest
clamav:
image: clamav/clamav:1.4
container_name: veza_clamav
restart: unless-stopped
ports:
- "${PORT_CLAMAV:-13310}:3310"
networks:
- veza-net
healthcheck:
test: ["CMD", "clamdscan", "--ping", "1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 180s
deploy:
resources:
limits:
cpus: '0.5'
memory: 1G
# RabbitMQ - Message Broker
# Limit: 256MB RAM. Host 15672->AMQP(5672), 25672->Management(15672)
rabbitmq:
image: rabbitmq:3-management-alpine
container_name: veza_rabbitmq
restart: unless-stopped
environment:
RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER:-veza}
# Use .env for real values; default is for local dev only
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS:-devpassword}
ports:
- "${PORT_RABBITMQ_AMQP:-15672}:5672" # AMQP (messaging)
- "${PORT_RABBITMQ_MGMT:-25672}:15672" # Management UI
volumes:
- rabbitmq_data:/var/lib/rabbitmq
healthcheck:
test: rabbitmq-diagnostics -q check_port_connectivity
interval: 10s
timeout: 10s
retries: 12
start_period: 90s
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.50'
memory: 512M
reservations:
memory: 256M
# Hyperswitch - Payment router (optional, for payment integration)
hyperswitch_postgres:
image: postgres:16-alpine
container_name: veza_hyperswitch_postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${HYPERSWITCH_DB_USER:-hyperswitch}
POSTGRES_PASSWORD: ${HYPERSWITCH_DB_PASSWORD:-hyperswitch_dev}
POSTGRES_DB: ${HYPERSWITCH_DB_NAME:-hyperswitch}
volumes:
- hyperswitch_postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${HYPERSWITCH_DB_USER:-hyperswitch}"]
interval: 5s
timeout: 5s
retries: 5
networks:
- veza-net
deploy:
resources:
limits:
cpus: "0.25"
memory: 128M
profiles:
- payments
# SECURITY(LOW-002): Pin to specific version. See https://github.com/juspay/hyperswitch/releases
hyperswitch:
image: juspaydotin/hyperswitch-router:2026.03.11.0-standalone
container_name: veza_hyperswitch
restart: unless-stopped
environment:
DATABASE_URL: postgresql://${HYPERSWITCH_DB_USER:-hyperswitch}:${HYPERSWITCH_DB_PASSWORD:-hyperswitch_dev}@hyperswitch_postgres:5432/${HYPERSWITCH_DB_NAME:-hyperswitch}?sslmode=disable
REDIS_URL: redis://redis:6379
ports:
- "${PORT_HYPERSWITCH:-18081}:8080"
depends_on:
hyperswitch_postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- veza-net
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/health"]
interval: 10s
timeout: 5s
retries: 3
profiles:
- payments
# Backend API (Docker dev) — only started with: docker compose --profile docker-dev up
backend-api:
build:
context: ./veza-backend-api
container_name: veza_backend_dev
profiles:
- docker-dev
environment:
- APP_ENV=development
- DATABASE_URL=postgresql://${POSTGRES_USER:-veza}:${POSTGRES_PASSWORD:-devpassword}@postgres:5432/${POSTGRES_DB:-veza}?sslmode=disable
- REDIS_URL=redis://:${REDIS_PASSWORD:-devpassword}@redis:6379
- JWT_SECRET=${JWT_SECRET:?JWT_SECRET must be set in .env}
- COOKIE_SECURE=false # false en dev local
- COOKIE_SAME_SITE=strict
- COOKIE_DOMAIN=
- COOKIE_HTTP_ONLY=true
- COOKIE_PATH=/
- CORS_ALLOWED_ORIGINS=http://veza.fr:3000,http://veza.fr:5173
- RABBITMQ_URL=amqp://${RABBITMQ_DEFAULT_USER:-veza}:${RABBITMQ_DEFAULT_PASS:-devpassword}@rabbitmq:5672/
- ENABLE_CLAMAV=true
- CLAMAV_REQUIRED=false
- CLAMAV_ADDRESS=clamav:3310
- AWS_S3_ENDPOINT=http://minio:9000
- AWS_S3_BUCKET=veza-files
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-minioadmin}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-minioadmin}
- AWS_REGION=us-east-1
- HLS_STREAMING=true
- HLS_STORAGE_DIR=/data/hls
- SMTP_HOST=mailhog
- SMTP_PORT=1025
- SMTP_FROM=${SMTP_FROM:-no-reply@veza.local}
- SMTP_FROM_NAME=${SMTP_FROM_NAME:-Veza (dev)}
volumes:
- hls-data:/data/hls
ports:
- "${PORT_BACKEND:-18080}:18080"
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
rabbitmq:
condition: service_healthy
clamav:
condition: service_started
networks:
- veza-net
healthcheck:
test: ["CMD-SHELL", "wget -q -O /dev/null http://localhost:18080/api/v1/health || exit 1"]
interval: 10s
timeout: 5s
retries: 5
# Chat Server removed in v0.502 -- chat is now handled by backend-api WebSocket at /api/v1/ws
# Nginx-RTMP - v0.10.6 F471: Live stream ingest (OBS -> RTMP, HLS out)
nginx-rtmp:
image: alfg/nginx-rtmp
container_name: veza_nginx_rtmp
restart: unless-stopped
ports:
- "${PORT_RTMP:-1935}:1935"
- "${PORT_RTMP_HTTP:-18083}:8080"
volumes:
- ./infra/nginx-rtmp/nginx.conf:/etc/nginx/nginx.conf:ro
depends_on:
- backend-api
networks:
- veza-net
profiles:
- live
# Stream Server (Rust) - v0.101 — only started with: docker compose --profile docker-dev up
stream-server:
build:
context: .
dockerfile: veza-stream-server/Dockerfile
container_name: veza_stream_dev
profiles:
- docker-dev
environment:
- DATABASE_URL=postgresql://${POSTGRES_USER:-veza}:${POSTGRES_PASSWORD:-devpassword}@postgres:5432/${POSTGRES_DB:-veza}?sslmode=disable
- REDIS_URL=redis://:${REDIS_PASSWORD:-devpassword}@redis:6379
- JWT_SECRET=${JWT_SECRET:?JWT_SECRET must be set in .env}
- SECRET_KEY=${JWT_SECRET:?JWT_SECRET must be set in .env}
- PORT=18082
- AWS_S3_ENDPOINT=http://minio:9000
- AWS_S3_BUCKET=veza-files
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-minioadmin}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-minioadmin}
- AWS_REGION=us-east-1
- HLS_OUTPUT_DIR=/data/hls
- RABBITMQ_URL=amqp://${RABBITMQ_DEFAULT_USER:-veza}:${RABBITMQ_DEFAULT_PASS:-devpassword}@rabbitmq:5672/
volumes:
- hls-data:/data/hls
ports:
- "${PORT_STREAM:-18082}:18082"
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
rabbitmq:
condition: service_healthy
networks:
- veza-net
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:18082/health"]
interval: 10s
timeout: 5s
retries: 5
# MinIO - S3-compatible object storage (v0.501 Cloud Storage)
minio:
image: minio/minio:latest
container_name: veza_minio
restart: unless-stopped
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: ${AWS_ACCESS_KEY_ID:-minioadmin}
MINIO_ROOT_PASSWORD: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
ports:
- "${PORT_MINIO:-19000}:9000"
- "${PORT_MINIO_CONSOLE:-19001}:9001"
volumes:
- minio_data:/data
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 10s
timeout: 5s
retries: 5
networks:
- veza-net
# MinIO bucket initialization
minio-init:
image: minio/mc:latest
depends_on:
minio:
condition: service_healthy
entrypoint: >
/bin/sh -c "
mc alias set veza http://minio:9000 $${MINIO_ROOT_USER:-minioadmin} $${MINIO_ROOT_PASSWORD:-minioadmin};
mc mb --ignore-existing veza/veza-files;
mc anonymous set none veza/veza-files/public;
exit 0;
"
environment:
MINIO_ROOT_USER: ${AWS_ACCESS_KEY_ID:-minioadmin}
MINIO_ROOT_PASSWORD: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
networks:
- veza-net
volumes:
postgres_data:
redis_data:
rabbitmq_data:
hyperswitch_postgres_data:
minio_data:
hls-data:
networks:
veza-net:
driver: bridge