chore(infra): J6 — mark 3 dormant docker-compose files as deprecated

Audit cross-checked against active composes shows three dormant compose
files that duplicate functionality already covered by the canonical
docker-compose.{,dev,prod,staging,test}.yml at the repo root. None are
referenced from Make targets, scripts, or CI workflows. They have
diverged from the active set (different ports, older Postgres version,
no shared volume names, etc.) and are a footgun for new contributors.

Files marked DEPRECATED with a header pointing at the canonical compose
to use instead:

  veza-stream-server/docker-compose.yml
    Standalone stream-server compose. Same service is provided by the
    root docker-compose.yml under the `docker-dev` profile.

  infra/docker-compose.lab.yml
    Lab Postgres on default port 5432. Conflicts with a host Postgres on
    most setups; root docker-compose.dev.yml uses non-default ports for
    a reason.

  config/docker/docker-compose.local.yml
    Local Postgres 15 variant on port 5433. Redundant with root
    docker-compose.dev.yml (Postgres 16, project-wide port mapping).

Not in this commit (intentionally limited J6 scope, per audit plan
"verify, don't refactor"):

  - No `extends:` consolidation across the active composes — that is a
    1-2 day refactor on its own and not a v1.0.4 concern.
  - The five active composes were syntactically validated locally
    (docker compose config); production and staging both require
    operator-injected env vars (DB_PASS, S3_*, RABBITMQ_PASS, etc.)
    which is the intended behavior, not a bug.
  - Cross-compose audit confirms zero references to the removed
    chat-server or any other dead service / image. Only one residual
    deprecation warning across all active composes: the obsolete
    `version:` field on docker-compose.{prod,test,test}.yml — cosmetic,
    not blocking.
  - Test suite verification (Go / Rust / Vitest) deferred to Forgejo CI
    rather than re-running locally. The pre-push hook + remote pipeline
    will gate the next push.

Follow-up candidates (not blocking v1.0.4):
  - Delete the three deprecated files once a 2-month grace period
    confirms no local dev workflow references them.
  - Drop the obsolete `version:` field across the active composes.

Refs: AUDIT_REPORT.md §6.1, §10 P7
This commit is contained in:
senke 2026-04-15 12:58:39 +02:00
parent 7f89bebe1a
commit 113210734c
3 changed files with 332 additions and 311 deletions

View file

@ -1,122 +1,129 @@
version: '3.8'
# ============================================================================
# DEPRECATED — local dev compose variant (postgres:15 on port 5433).
# Redundant with docker-compose.dev.yml at the repo root (postgres:16 on the
# project-wide port mapping). Use docker-compose.dev.yml instead:
# docker compose -f docker-compose.dev.yml up -d
# Marked in v1.0.4 cleanup. Candidate for deletion once confirmed unused.
# ============================================================================
version: "3.8"
services:
# Base de données PostgreSQL
postgres:
image: postgres:15-alpine
container_name: veza-postgres-local
environment:
POSTGRES_DB: veza_local
POSTGRES_USER: veza_user
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-devpassword}
ports:
- "5433:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./scripts/database/init.sql:/docker-entrypoint-initdb.d/init.sql
networks:
- veza-network
# Base de données PostgreSQL
postgres:
image: postgres:15-alpine
container_name: veza-postgres-local
environment:
POSTGRES_DB: veza_local
POSTGRES_USER: veza_user
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-devpassword}
ports:
- "5433:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./scripts/database/init.sql:/docker-entrypoint-initdb.d/init.sql
networks:
- veza-network
# Cache Redis
redis:
image: redis:7-alpine
container_name: veza-redis-local
ports:
- "6379:6379"
volumes:
- redis_data:/data
networks:
- veza-network
# Cache Redis
redis:
image: redis:7-alpine
container_name: veza-redis-local
ports:
- "6379:6379"
volumes:
- redis_data:/data
networks:
- veza-network
# Monitoring - Prometheus
prometheus:
image: prom/prometheus:latest
container_name: veza-prometheus-local
ports:
- "9090:9090"
volumes:
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
networks:
- veza-network
# Monitoring - Prometheus
prometheus:
image: prom/prometheus:latest
container_name: veza-prometheus-local
ports:
- "9090:9090"
volumes:
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.console.libraries=/etc/prometheus/console_libraries"
- "--web.console.templates=/etc/prometheus/consoles"
- "--storage.tsdb.retention.time=200h"
- "--web.enable-lifecycle"
networks:
- veza-network
# Monitoring - Grafana
grafana:
image: grafana/grafana:latest
container_name: veza-grafana-local
ports:
- "3000:3000"
environment:
GF_SECURITY_ADMIN_PASSWORD: admin
GF_USERS_ALLOW_SIGN_UP: "false"
volumes:
- grafana_data:/var/lib/grafana
- ./config/grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./config/grafana/datasources:/etc/grafana/provisioning/datasources
networks:
- veza-network
# Monitoring - Grafana
grafana:
image: grafana/grafana:latest
container_name: veza-grafana-local
ports:
- "3000:3000"
environment:
GF_SECURITY_ADMIN_PASSWORD: admin
GF_USERS_ALLOW_SIGN_UP: "false"
volumes:
- grafana_data:/var/lib/grafana
- ./config/grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./config/grafana/datasources:/etc/grafana/provisioning/datasources
networks:
- veza-network
# Logging - Elasticsearch
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
container_name: veza-elasticsearch-local
environment:
- discovery.type=single-node
# SECURITY(LOW-004): Enable xpack security. Set ELASTIC_PASSWORD in .env.
- xpack.security.enabled=true
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD:-devpassword}
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ports:
- "9200:9200"
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
networks:
- veza-network
# Logging - Elasticsearch
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.0
container_name: veza-elasticsearch-local
environment:
- discovery.type=single-node
# SECURITY(LOW-004): Enable xpack security. Set ELASTIC_PASSWORD in .env.
- xpack.security.enabled=true
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD:-devpassword}
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ports:
- "9200:9200"
volumes:
- elasticsearch_data:/usr/share/elasticsearch/data
networks:
- veza-network
# Logging - Kibana
kibana:
image: docker.elastic.co/kibana/kibana:8.8.0
container_name: veza-kibana-local
ports:
- "5601:5601"
environment:
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
volumes:
- kibana_data:/usr/share/kibana/data
networks:
- veza-network
depends_on:
- elasticsearch
# Logging - Kibana
kibana:
image: docker.elastic.co/kibana/kibana:8.8.0
container_name: veza-kibana-local
ports:
- "5601:5601"
environment:
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
volumes:
- kibana_data:/usr/share/kibana/data
networks:
- veza-network
depends_on:
- elasticsearch
# Logging - Filebeat
filebeat:
image: docker.elastic.co/beats/filebeat:8.8.0
container_name: veza-filebeat-local
user: root
volumes:
- ./config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
networks:
- veza-network
depends_on:
- elasticsearch
# Logging - Filebeat
filebeat:
image: docker.elastic.co/beats/filebeat:8.8.0
container_name: veza-filebeat-local
user: root
volumes:
- ./config/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
networks:
- veza-network
depends_on:
- elasticsearch
volumes:
postgres_data:
redis_data:
prometheus_data:
grafana_data:
elasticsearch_data:
kibana_data:
postgres_data:
redis_data:
prometheus_data:
grafana_data:
elasticsearch_data:
kibana_data:
networks:
veza-network:
driver: bridge
veza-network:
driver: bridge

View file

@ -1,72 +1,79 @@
# ============================================================================
# DEPRECATED — lab environment compose, dormant since February 2026.
# Overlaps with docker-compose.dev.yml at the repo root (which uses non-default
# ports to avoid conflicts with a host Postgres). Use docker-compose.dev.yml
# for day-to-day dev:
# docker compose -f docker-compose.dev.yml up -d
# Marked in v1.0.4 cleanup. Candidate for deletion once confirmed unused.
# ============================================================================
version: "3.9"
services:
postgres:
image: postgres:16
container_name: veza-lab-postgres
environment:
POSTGRES_USER: veza
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-devpassword}
POSTGRES_DB: veza_lab
ports:
- "5432:5432"
volumes:
- postgres_lab_data:/var/lib/postgresql/data
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U veza -d veza_lab" ]
interval: 5s
timeout: 3s
retries: 10
networks:
- veza-lab-net
postgres:
image: postgres:16
container_name: veza-lab-postgres
environment:
POSTGRES_USER: veza
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-devpassword}
POSTGRES_DB: veza_lab
ports:
- "5432:5432"
volumes:
- postgres_lab_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U veza -d veza_lab"]
interval: 5s
timeout: 3s
retries: 10
networks:
- veza-lab-net
redis:
image: redis:7
container_name: veza-lab-redis
ports:
- "6379:6379"
healthcheck:
test: [ "CMD", "redis-cli", "ping" ]
interval: 5s
timeout: 3s
retries: 5
networks:
- veza-lab-net
redis:
image: redis:7
container_name: veza-lab-redis
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 5
networks:
- veza-lab-net
rabbitmq:
image: rabbitmq:3-management
container_name: veza-lab-rabbitmq
ports:
- "5672:5672"
- "15672:15672"
environment:
RABBITMQ_DEFAULT_USER: veza
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS:-devpassword}
healthcheck:
test: [ "CMD", "rabbitmq-diagnostics", "-q", "ping" ]
interval: 10s
timeout: 5s
retries: 5
networks:
- veza-lab-net
rabbitmq:
image: rabbitmq:3-management
container_name: veza-lab-rabbitmq
ports:
- "5672:5672"
- "15672:15672"
environment:
RABBITMQ_DEFAULT_USER: veza
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS:-devpassword}
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
interval: 10s
timeout: 5s
retries: 5
networks:
- veza-lab-net
haproxy:
image: haproxy:lts-alpine
container_name: veza-lab-haproxy
ports:
- "80:80"
# - "443:443" # SSL disabled for now
volumes:
- ../docker/haproxy/haproxy.lab.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- veza-lab-net
haproxy:
image: haproxy:lts-alpine
container_name: veza-lab-haproxy
ports:
- "80:80"
# - "443:443" # SSL disabled for now
volumes:
- ../docker/haproxy/haproxy.lab.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- veza-lab-net
volumes:
postgres_lab_data:
postgres_lab_data:
networks:
veza-lab-net:
driver: bridge
veza-lab-net:
driver: bridge

View file

@ -1,148 +1,155 @@
version: '3.8'
# ============================================================================
# DEPRECATED — standalone stream-server compose.
# Use docker-compose.yml (root) with the `docker-dev` profile instead:
# docker compose --profile docker-dev up -d stream-server
# Marked in v1.0.4 cleanup. Candidate for deletion once no local dev
# reference is found (2+ month grace period).
# ============================================================================
version: "3.8"
services:
stream-server:
build:
context: .
dockerfile: Dockerfile
args:
BUILD_TIME: ${BUILD_TIME:-$(date -u +"%Y-%m-%dT%H:%M:%SZ")}
RUST_VERSION: ${RUST_VERSION:-$(rustc --version)}
container_name: stream-server
restart: unless-stopped
# Configuration réseau
ports:
- "${HOST_PORT:-8082}:8082"
# Variables d'environnement
environment:
- SECRET_KEY=${SECRET_KEY}
- STREAM_SERVER_PORT=8082
- AUDIO_DIR=/app/audio
- ALLOWED_ORIGINS=${ALLOWED_ORIGINS:-http://localhost:5173}
- MAX_FILE_SIZE=${MAX_FILE_SIZE:-104857600}
- MAX_RANGE_SIZE=${MAX_RANGE_SIZE:-10485760}
- SIGNATURE_TOLERANCE=${SIGNATURE_TOLERANCE:-60}
- RUST_LOG=${RUST_LOG:-stream_server=info}
- ADMIN_TOKEN=${ADMIN_TOKEN:-}
# Montage des volumes
volumes:
- ./audio:/app/audio:ro
- ./logs:/app/logs:rw
- stream_server_cache:/tmp
# Limitations de ressources
deploy:
resources:
limits:
memory: 512M
cpus: '1.0'
reservations:
memory: 128M
cpus: '0.25'
# Configuration de sécurité
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
read_only: true
tmpfs:
- /tmp:rw,noexec,nosuid,size=100m
# Health check
healthcheck:
test: ["CMD", "/usr/local/bin/healthcheck.sh"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
# Réseau
networks:
- stream_network
stream-server:
build:
context: .
dockerfile: Dockerfile
args:
BUILD_TIME: ${BUILD_TIME:-$(date -u +"%Y-%m-%dT%H:%M:%SZ")}
RUST_VERSION: ${RUST_VERSION:-$(rustc --version)}
container_name: stream-server
restart: unless-stopped
# Proxy inverse (optionnel)
nginx:
image: nginx:alpine
container_name: stream-nginx
restart: unless-stopped
depends_on:
- stream-server
ports:
- "${NGINX_PORT:-80}:80"
- "${NGINX_SSL_PORT:-443}:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ./ssl:/etc/nginx/ssl:ro
- nginx_cache:/var/cache/nginx
networks:
- stream_network
profiles:
- with-proxy
# Configuration réseau
ports:
- "${HOST_PORT:-8082}:8082"
# Monitoring avec Prometheus (optionnel)
prometheus:
image: prom/prometheus:latest
container_name: stream-prometheus
restart: unless-stopped
ports:
- "${PROMETHEUS_PORT:-9090}:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
networks:
- stream_network
profiles:
- monitoring
# Variables d'environnement
environment:
- SECRET_KEY=${SECRET_KEY}
- STREAM_SERVER_PORT=8082
- AUDIO_DIR=/app/audio
- ALLOWED_ORIGINS=${ALLOWED_ORIGINS:-http://localhost:5173}
- MAX_FILE_SIZE=${MAX_FILE_SIZE:-104857600}
- MAX_RANGE_SIZE=${MAX_RANGE_SIZE:-10485760}
- SIGNATURE_TOLERANCE=${SIGNATURE_TOLERANCE:-60}
- RUST_LOG=${RUST_LOG:-stream_server=info}
- ADMIN_TOKEN=${ADMIN_TOKEN:-}
# Grafana pour la visualisation (optionnel)
grafana:
image: grafana/grafana:latest
container_name: stream-grafana
restart: unless-stopped
depends_on:
- prometheus
ports:
- "${GRAFANA_PORT:-3000}:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources:ro
networks:
- stream_network
profiles:
- monitoring
# Montage des volumes
volumes:
- ./audio:/app/audio:ro
- ./logs:/app/logs:rw
- stream_server_cache:/tmp
# Limitations de ressources
deploy:
resources:
limits:
memory: 512M
cpus: "1.0"
reservations:
memory: 128M
cpus: "0.25"
# Configuration de sécurité
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
read_only: true
tmpfs:
- /tmp:rw,noexec,nosuid,size=100m
# Health check
healthcheck:
test: ["CMD", "/usr/local/bin/healthcheck.sh"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
# Réseau
networks:
- stream_network
# Proxy inverse (optionnel)
nginx:
image: nginx:alpine
container_name: stream-nginx
restart: unless-stopped
depends_on:
- stream-server
ports:
- "${NGINX_PORT:-80}:80"
- "${NGINX_SSL_PORT:-443}:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ./ssl:/etc/nginx/ssl:ro
- nginx_cache:/var/cache/nginx
networks:
- stream_network
profiles:
- with-proxy
# Monitoring avec Prometheus (optionnel)
prometheus:
image: prom/prometheus:latest
container_name: stream-prometheus
restart: unless-stopped
ports:
- "${PROMETHEUS_PORT:-9090}:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.console.libraries=/etc/prometheus/console_libraries"
- "--web.console.templates=/etc/prometheus/consoles"
- "--storage.tsdb.retention.time=200h"
- "--web.enable-lifecycle"
networks:
- stream_network
profiles:
- monitoring
# Grafana pour la visualisation (optionnel)
grafana:
image: grafana/grafana:latest
container_name: stream-grafana
restart: unless-stopped
depends_on:
- prometheus
ports:
- "${GRAFANA_PORT:-3000}:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources:ro
networks:
- stream_network
profiles:
- monitoring
networks:
stream_network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
stream_network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
volumes:
stream_server_cache:
driver: local
nginx_cache:
driver: local
prometheus_data:
driver: local
grafana_data:
driver: local
stream_server_cache:
driver: local
nginx_cache:
driver: local
prometheus_data:
driver: local
grafana_data:
driver: local