veza/docker-compose.dev.yml
senke 9ed60e5719 fix(backend,infra): send real verification emails + fail-loud in prod
Registration was setting `IsVerified: true` at user-create time and the
"send email" block was a `logger.Info("Sending verification email")` — no
SMTP call. On production this meant any attacker-typo or typosquat email
got a fully-verified account because the user never had to prove
ownership. In development the hack let people "log in" without checking
MailHog, masking SMTP misconfiguration.

Changes:

  * `core/auth/service.go`: new users start with `IsVerified: false`. The
    existing `POST /auth/verify-email` flow (unchanged) flips the bit
    when the user clicks the link.
  * Registration now calls `emailService.SendVerificationEmail(...)` for
    real. On SMTP failure the handler returns `500` in production (no
    stuck account with no recovery path) and logs a warning in
    development (local sign-ups keep flowing).
  * Same treatment for `password_reset_handler.RequestPasswordReset` —
    production fails loud instead of returning the generic success
    message after a silent SMTP drop.
  * New helper `isProductionEnv()` centralises the
    `APP_ENV=="production"` check in both `core/auth` and `handlers`.
  * `docker-compose.yml` + `docker-compose.dev.yml` now ship MailHog
    (`mailhog/mailhog:v1.0.1`, SMTP 1025, UI 8025). Backend dev env
    vars `SMTP_HOST=mailhog SMTP_PORT=1025` pre-wired so dev sign-ups
    actually deliver.

Tests: auth test mocks updated (`expectRegister` adds a
`SendVerificationEmail` mock). `TestAuthService_Login_Success` +
`TestAuthHandler_Login_Success` flip `is_verified` directly after
`Register` to simulate the verification click.
`TestLogin_EmailNotVerified` now asserts `403` (previously asserted
`200` — the test was codifying the bug this commit fixes).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-16 14:52:46 +02:00

208 lines
5.5 KiB
YAML

# =============================================================================
# VEZA - Development Infrastructure (TASK-QA-010)
# =============================================================================
# Infra-only stack for local development. Applications (backend, stream, web)
# run locally with hot reload via make dev, make dev-full, etc.
#
# Usage:
# docker compose -f docker-compose.dev.yml up -d
# make dev # uses infra-up which can target this file via COMPOSE_FILE
#
# Override: COMPOSE_FILE=docker-compose.dev.yml make infra-up
# =============================================================================
services:
postgres:
image: postgres:16-alpine
container_name: veza_postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${POSTGRES_USER:-veza}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-devpassword}
POSTGRES_DB: ${POSTGRES_DB:-veza}
ports:
- "${PORT_POSTGRES:-15432}:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U veza"]
interval: 5s
timeout: 5s
retries: 5
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.50'
memory: 256M
reservations:
memory: 128M
redis:
image: redis:7-alpine
container_name: veza_redis
restart: unless-stopped
ports:
- "${PORT_REDIS:-16379}:6379"
volumes:
- redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 5
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.25'
memory: 64M
reservations:
memory: 32M
rabbitmq:
image: rabbitmq:3-management-alpine
container_name: veza_rabbitmq
restart: unless-stopped
environment:
RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER:-veza}
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS:-devpassword}
ports:
- "${PORT_RABBITMQ_AMQP:-15672}:5672"
- "${PORT_RABBITMQ_MGMT:-25672}:15672"
volumes:
- rabbitmq_data:/var/lib/rabbitmq
healthcheck:
test: rabbitmq-diagnostics -q check_port_connectivity
interval: 5s
timeout: 5s
retries: 12
start_period: 40s
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.50'
memory: 512M
reservations:
memory: 256M
# SECURITY(MEDIUM-003): Pin ClamAV image to specific version instead of :latest
clamav:
image: clamav/clamav:1.4
container_name: veza_clamav
restart: unless-stopped
ports:
- "${PORT_CLAMAV:-13310}:3310"
networks:
- veza-net
healthcheck:
test: ["CMD", "clamdscan", "--ping", "1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 180s
deploy:
resources:
limits:
cpus: '0.5'
memory: 1G
# MailHog - Local SMTP capture (dev only)
# Backend points SMTP_HOST=localhost:1025 (when running on host) or mailhog:1025
# (container) and outbound mail shows up in the web UI on port 8025.
mailhog:
image: mailhog/mailhog:v1.0.1
container_name: veza_mailhog
restart: unless-stopped
ports:
- "${PORT_MAILHOG_SMTP:-1025}:1025"
- "${PORT_MAILHOG_UI:-8025}:8025"
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.10'
memory: 64M
minio:
image: minio/minio:latest
container_name: veza_minio
restart: unless-stopped
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: ${AWS_ACCESS_KEY_ID:-minioadmin}
MINIO_ROOT_PASSWORD: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
ports:
- "${PORT_MINIO:-19000}:9000"
- "${PORT_MINIO_CONSOLE:-19001}:9001"
volumes:
- minio_data:/data
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 10s
timeout: 5s
retries: 5
networks:
- veza-net
minio-init:
image: minio/mc:latest
depends_on:
minio:
condition: service_healthy
entrypoint: >
/bin/sh -c "
mc alias set veza http://minio:9000 $${MINIO_ROOT_USER:-minioadmin} $${MINIO_ROOT_PASSWORD:-minioadmin};
mc mb --ignore-existing veza/veza-files;
mc anonymous set download veza/veza-files/public;
exit 0;
"
environment:
MINIO_ROOT_USER: ${AWS_ACCESS_KEY_ID:-minioadmin}
MINIO_ROOT_PASSWORD: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
networks:
- veza-net
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.11.0
container_name: veza_elasticsearch
restart: unless-stopped
environment:
- discovery.type=single-node
# SECURITY(LOW-004): Enable xpack security. Set ELASTIC_PASSWORD in .env for dev.
- xpack.security.enabled=true
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD:-devpassword}
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ports:
- "${PORT_ELASTICSEARCH:-19200}:9200"
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
healthcheck:
test: ["CMD-SHELL", "curl -s http://localhost:9200/_cluster/health || exit 1"]
interval: 10s
timeout: 5s
retries: 10
start_period: 60s
networks:
- veza-net
deploy:
resources:
limits:
cpus: '0.5'
memory: 1G
volumes:
postgres_data:
redis_data:
rabbitmq_data:
minio_data:
elasticsearch_data:
networks:
veza-net:
driver: bridge