# haproxy defaults — TLS-terminating frontend + backend pools for the # stateless backend-api fleet and the stream server. v1.0.9 W4 Day 19. # # Topology : # # client → :443 HAProxy (TLS) → backend-api-1.lxd:8080 # → backend-api-2.lxd:8080 # → stream-server-1.lxd:8082 (track_id hash) # → stream-server-2.lxd:8082 # # WebSocket affinity : HAProxy sets `SERVERID` cookie on the first # response ; subsequent requests (HTTP + WS upgrade) carry the cookie # back to the same backend. The cookie survives across page loads so # a chat session reconnecting after a 30s pause typically lands on the # same instance — but if the original instance is offline, the cookie # is ignored and the next-best healthy backend takes over. --- haproxy_version: "2.8" # Ubuntu 22.04 ships 2.4 ; we explicitly install 2.8 from PPA # Listeners. v1.0 lab : HTTP only (TLS at the edge LB above us, or # none in lab). Phase-2 enables TLS termination here when we have # certs in /etc/haproxy/certs/veza.pem. haproxy_listen_http: 80 haproxy_listen_https: 443 haproxy_listen_stats: 9100 # admin socket bind ; reachable on Incus bridge only haproxy_tls_cert_path: "" # empty = HTTPS frontend disabled # Backend API pool — port 8080 per default (Gin server in cmd/api). # The inventory's `backend_api_instances` group drives the upstream # server list ; if absent, the role falls back to the static defaults # below so the role is testable in isolation. haproxy_backend_api_port: 8080 haproxy_backend_api_fallback: - backend-api-1 - backend-api-2 # Stream server pool — port 8082 (Rust Axum). Uses URI-hash balance so # the same track_id consistently lands on the same node, maximising the # in-process HLS cache hit rate. haproxy_stream_server_port: 8082 haproxy_stream_server_fallback: - stream-server-1 - stream-server-2 # Health check cadence + drain — Day 19 acceptance asks for 5s checks # and 30s drain before remove. haproxy_health_check_interval_ms: 5000 haproxy_health_check_fall: 3 # 3 failed checks = down haproxy_health_check_rise: 2 # 2 passed checks = back up haproxy_graceful_drain_seconds: 30 # Sticky cookie name. Rotating it bumps the SERVERID and forces a # rebalance — useful after a config change that reshapes the pool. haproxy_sticky_cookie_name: "VEZA_SERVERID"