veza/docker-compose.prod.yml

326 lines
9.5 KiB
YAML
Raw Normal View History

version: '3.8'
services:
# ============================================================================
# INFRASTRUCTURE SERVICES
# ============================================================================
postgres:
image: postgres:16-alpine
container_name: veza_postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${DB_USER:-veza}
POSTGRES_PASSWORD: ${DB_PASS:?DB_PASS must be set for production}
POSTGRES_DB: ${DB_NAME:-veza}
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-veza}"]
interval: 5s
timeout: 5s
retries: 5
networks:
- veza-network
deploy:
resources:
limits:
cpus: '0.50'
memory: 256M
redis:
image: redis:7-alpine
container_name: veza_redis
restart: unless-stopped
command: ["redis-server", "--requirepass", "${REDIS_PASSWORD:?REDIS_PASSWORD must be set for production}", "--appendonly", "yes"]
volumes:
- redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "-a", "${REDIS_PASSWORD}", "ping"]
interval: 5s
timeout: 3s
retries: 5
networks:
- veza-network
deploy:
resources:
limits:
cpus: '0.25'
memory: 64M
rabbitmq:
image: rabbitmq:3-management-alpine
container_name: veza_rabbitmq
restart: unless-stopped
environment:
RABBITMQ_DEFAULT_USER: ${DB_USER:-veza}
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_PASS:?RABBITMQ_PASS must be set for production}
volumes:
- rabbitmq_data:/var/lib/rabbitmq
healthcheck:
test: rabbitmq-diagnostics -q ping
interval: 10s
timeout: 10s
retries: 5
networks:
- veza-network
deploy:
resources:
limits:
cpus: '0.50'
memory: 256M
clamav:
image: clamav/clamav:latest
container_name: veza_clamav
restart: unless-stopped
networks:
- veza-network
healthcheck:
test: ["CMD", "clamdscan", "--ping", "1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 180s
deploy:
resources:
limits:
cpus: '0.5'
memory: 1G
# ============================================================================
# PAYMENT ROUTER (Hyperswitch)
# ============================================================================
hyperswitch_postgres:
image: postgres:16-alpine
container_name: veza_hyperswitch_postgres
restart: unless-stopped
environment:
POSTGRES_USER: ${HYPERSWITCH_DB_USER:-hyperswitch}
POSTGRES_PASSWORD: ${HYPERSWITCH_DB_PASS:?HYPERSWITCH_DB_PASS must be set for production}
POSTGRES_DB: ${HYPERSWITCH_DB_NAME:-hyperswitch}
volumes:
- hyperswitch_postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${HYPERSWITCH_DB_USER:-hyperswitch}"]
interval: 5s
timeout: 5s
retries: 5
networks:
- veza-network
deploy:
resources:
limits:
cpus: "0.25"
memory: 128M
hyperswitch:
image: juspaydotin/hyperswitch-router:2025.01.21.0-standalone
container_name: veza_hyperswitch
restart: unless-stopped
environment:
DATABASE_URL: postgresql://${HYPERSWITCH_DB_USER:-hyperswitch}:${HYPERSWITCH_DB_PASS:?HYPERSWITCH_DB_PASS must be set}@hyperswitch_postgres:5432/${HYPERSWITCH_DB_NAME:-hyperswitch}?sslmode=require
REDIS_URL: redis://:${REDIS_PASSWORD}@redis:6379
depends_on:
hyperswitch_postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- veza-network
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/health"]
interval: 10s
timeout: 5s
retries: 3
deploy:
resources:
limits:
cpus: "0.5"
memory: 256M
# ============================================================================
# APPLICATION SERVICES
# ============================================================================
backend-api:
build:
context: ./veza-backend-api
dockerfile: Dockerfile.production
image: veza-backend-api:latest
container_name: veza_backend_api
restart: unless-stopped
environment:
- APP_ENV=production
- DATABASE_URL=postgres://${DB_USER:-veza}:${DB_PASS:?DB_PASS must be set}@postgres:5432/${DB_NAME:-veza}?sslmode=require
- REDIS_URL=redis://:${REDIS_PASSWORD:?REDIS_PASSWORD must be set}@redis:6379
- AMQP_URL=amqp://${DB_USER:-veza}:${RABBITMQ_PASS:?RABBITMQ_PASS must be set}@rabbitmq:5672
- JWT_SECRET=${JWT_SECRET:?JWT_SECRET must be set for production}
- COOKIE_SECURE=true
- COOKIE_SAME_SITE=strict
- COOKIE_HTTP_ONLY=true
- CORS_ALLOWED_ORIGINS=${CORS_ORIGINS:-http://veza.fr}
- HYPERSWITCH_URL=http://hyperswitch:8080
- HYPERSWITCH_API_KEY=${HYPERSWITCH_API_KEY:-}
- HYPERSWITCH_WEBHOOK_SECRET=${HYPERSWITCH_WEBHOOK_SECRET:-}
- HYPERSWITCH_ENABLED=${HYPERSWITCH_ENABLED:-false}
- CHECKOUT_SUCCESS_URL=${CHECKOUT_SUCCESS_URL:-https://veza.fr/purchases}
- ENABLE_CLAMAV=true
- CLAMAV_REQUIRED=true
- CLAMAV_ADDRESS=clamav:3310
- AWS_S3_ENDPOINT=http://minio:9000
- AWS_S3_BUCKET=veza-files
- AWS_ACCESS_KEY_ID=${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
- AWS_SECRET_ACCESS_KEY=${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
- AWS_REGION=${AWS_REGION:-us-east-1}
- HLS_STREAMING=true
- HLS_STORAGE_DIR=/data/hls
volumes:
- hls_prod_data:/data/hls
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
rabbitmq:
condition: service_healthy
clamav:
condition: service_started
networks:
- veza-network
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/api/v1/health"]
interval: 10s
timeout: 5s
retries: 3
# Chat Server removed in v0.502 -- chat is now handled by backend-api WebSocket at /api/v1/ws
stream-server:
build:
context: ./veza-stream-server
dockerfile: Dockerfile.production
image: veza-stream-server:latest
container_name: veza_stream_server
restart: unless-stopped
environment:
- DATABASE_URL=postgres://${DB_USER:-veza}:${DB_PASS:?DB_PASS must be set}@postgres:5432/${DB_NAME:-veza}?sslmode=require
- REDIS_URL=redis://:${REDIS_PASSWORD:?REDIS_PASSWORD must be set}@redis:6379
- JWT_SECRET=${JWT_SECRET:?JWT_SECRET must be set}
- PORT=3001
- HLS_OUTPUT_DIR=/data/hls
volumes:
- hls_prod_data:/data/hls
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- veza-network
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3001/health"]
interval: 10s
timeout: 5s
retries: 3
minio:
image: minio/minio:latest
container_name: veza_minio
restart: unless-stopped
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: ${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
MINIO_ROOT_PASSWORD: ${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
volumes:
- minio_data:/data
networks:
- veza-network
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 10s
timeout: 5s
retries: 3
minio-init:
image: minio/mc:latest
depends_on:
minio:
condition: service_healthy
entrypoint: >
/bin/sh -c "
mc alias set veza http://minio:9000 $${MINIO_ROOT_USER} $${MINIO_ROOT_PASSWORD};
mc mb --ignore-existing veza/veza-files;
exit 0;
"
environment:
MINIO_ROOT_USER: ${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
MINIO_ROOT_PASSWORD: ${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
networks:
- veza-network
web:
build:
context: ./apps/web
dockerfile: Dockerfile.production
image: veza-web:latest
container_name: veza_web
restart: unless-stopped
environment:
- VITE_API_URL=http://haproxy/api/v1
- VITE_STREAM_URL=ws://haproxy/stream
- VITE_UPLOAD_URL=http://haproxy/api/v1/uploads
depends_on:
- backend-api
- stream-server
networks:
- veza-network
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:5173"]
interval: 10s
timeout: 5s
retries: 3
# ============================================================================
# REVERSE PROXY - HAProxy
# ============================================================================
haproxy:
image: haproxy:2.8-alpine
container_name: veza_haproxy
restart: unless-stopped
deploy:
resources:
limits:
cpus: '0.5'
memory: 128M
ports:
- "${PORT_HAPROXY:-80}:80"
- "443:443"
volumes:
- ./config/haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
- ./config/ssl:/etc/ssl/veza:ro
depends_on:
- backend-api
- stream-server
- web
networks:
- veza-network
healthcheck:
test: ["CMD", "haproxy", "-c", "-f", "/usr/local/etc/haproxy/haproxy.cfg"]
interval: 10s
timeout: 5s
retries: 3
networks:
veza-network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
volumes:
postgres_data:
redis_data:
rabbitmq_data:
hyperswitch_postgres_data:
minio_data:
hls_prod_data: