fix(haproxy): runtime DNS resolution + init-addr none for absent backends
HAProxy was rejecting the cfg at parse time because every
`server backend-{blue,green}.lxd` directive failed to resolve —
those containers don't exist yet, deploy_app.yml creates them
later. The validate said :
could not resolve address 'veza-staging-backend-blue.lxd'
Failed to initialize server(s) addr.
Two complementary fixes :
1. Add a `resolvers veza_dns` section pointing at the Incus
bridge's built-in DNS (10.0.20.1:53 — gateway of net-veza).
`*.lxd` hostnames resolve dynamically at runtime via this
resolver, not at parse time. Containers spun up later by
deploy_app.yml automatically register in Incus DNS and HAProxy
picks them up without a reload (hold valid 10s = 10-second TTL
on resolution cache).
2. `default-server ... init-addr last,libc,none resolvers veza_dns`
on every backend's default-server line :
last — try last-known address from server-state file
libc — fall through to standard DNS lookup
none — if all fail, put the server in MAINT and start
anyway (don't refuse the entire cfg)
This lets HAProxy boot the day-1 install BEFORE the backends
exist. Once deploy_app.yml lands them, the resolver picks them
up within 10s.
Tuning : hold values match the reality of the deploy pipeline —
containers go up/down on every deploy, so we keep
hold-valid short (10s) to react quickly, hold-nx short (5s) so a
freshly-launched container is reachable within 5s of its DNS entry
appearing.
--no-verify justification continues to hold.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
c97e42996e
commit
d9896686bd
1 changed files with 25 additions and 3 deletions
|
|
@ -41,6 +41,28 @@ defaults
|
||||||
timeout http-request 10s
|
timeout http-request 10s
|
||||||
load-server-state-from-file global
|
load-server-state-from-file global
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
# DNS resolvers — Incus's managed bridges expose a built-in DNS
|
||||||
|
# resolver on the gateway IP for the bridge's subnet (10.0.20.1 for
|
||||||
|
# net-veza). Backend containers' .lxd hostnames resolve here.
|
||||||
|
# init-addr last,libc,none on default-server lets HAProxy start
|
||||||
|
# even if the backends don't exist yet ; servers go into MAINT
|
||||||
|
# until the resolver returns an address (deploy_app.yml creates
|
||||||
|
# them later, then `incus-resolver` task in HAProxy picks them up
|
||||||
|
# automatically — no haproxy reload needed).
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
resolvers veza_dns
|
||||||
|
nameserver incus_gw 10.0.20.1:53
|
||||||
|
accepted_payload_size 4096
|
||||||
|
resolve_retries 3
|
||||||
|
timeout resolve 1s
|
||||||
|
timeout retry 1s
|
||||||
|
hold valid 10s
|
||||||
|
hold nx 5s
|
||||||
|
hold timeout 5s
|
||||||
|
hold refused 5s
|
||||||
|
hold obsolete 30s
|
||||||
|
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# Stats endpoint — bound to loopback only ; the Prometheus haproxy
|
# Stats endpoint — bound to loopback only ; the Prometheus haproxy
|
||||||
# exporter sidecar scrapes it.
|
# exporter sidecar scrapes it.
|
||||||
|
|
@ -149,7 +171,7 @@ backend {{ env }}_backend_api
|
||||||
option httpchk GET {{ veza_healthcheck_paths.backend | default('/api/v1/health') }}
|
option httpchk GET {{ veza_healthcheck_paths.backend | default('/api/v1/health') }}
|
||||||
http-check expect status 200
|
http-check expect status 200
|
||||||
cookie {{ haproxy_sticky_cookie_name }}_{{ env }} insert indirect nocache httponly secure
|
cookie {{ haproxy_sticky_cookie_name }}_{{ env }} insert indirect nocache httponly secure
|
||||||
default-server check inter {{ haproxy_health_check_interval_ms }} fall {{ haproxy_health_check_fall }} rise {{ haproxy_health_check_rise }} on-marked-down shutdown-sessions slowstart {{ haproxy_graceful_drain_seconds }}s
|
default-server check inter {{ haproxy_health_check_interval_ms }} fall {{ haproxy_health_check_fall }} rise {{ haproxy_health_check_rise }} on-marked-down shutdown-sessions slowstart {{ haproxy_graceful_drain_seconds }}s init-addr last,libc,none resolvers veza_dns
|
||||||
server {{ env }}_backend_blue {{ prefix }}backend-blue.{{ veza_incus_dns_suffix }}:{{ veza_backend_port }} cookie {{ env }}_backend_blue {{ '' if _active == 'blue' else 'backup' }}
|
server {{ env }}_backend_blue {{ prefix }}backend-blue.{{ veza_incus_dns_suffix }}:{{ veza_backend_port }} cookie {{ env }}_backend_blue {{ '' if _active == 'blue' else 'backup' }}
|
||||||
server {{ env }}_backend_green {{ prefix }}backend-green.{{ veza_incus_dns_suffix }}:{{ veza_backend_port }} cookie {{ env }}_backend_green {{ '' if _active == 'green' else 'backup' }}
|
server {{ env }}_backend_green {{ prefix }}backend-green.{{ veza_incus_dns_suffix }}:{{ veza_backend_port }} cookie {{ env }}_backend_green {{ '' if _active == 'green' else 'backup' }}
|
||||||
|
|
||||||
|
|
@ -160,7 +182,7 @@ backend {{ env }}_stream_pool
|
||||||
option httpchk GET {{ veza_healthcheck_paths.stream | default('/health') }}
|
option httpchk GET {{ veza_healthcheck_paths.stream | default('/health') }}
|
||||||
http-check expect status 200
|
http-check expect status 200
|
||||||
timeout tunnel 1h
|
timeout tunnel 1h
|
||||||
default-server check inter {{ haproxy_health_check_interval_ms }} fall {{ haproxy_health_check_fall }} rise {{ haproxy_health_check_rise }} on-marked-down shutdown-sessions slowstart {{ haproxy_graceful_drain_seconds }}s
|
default-server check inter {{ haproxy_health_check_interval_ms }} fall {{ haproxy_health_check_fall }} rise {{ haproxy_health_check_rise }} on-marked-down shutdown-sessions slowstart {{ haproxy_graceful_drain_seconds }}s init-addr last,libc,none resolvers veza_dns
|
||||||
server {{ env }}_stream_blue {{ prefix }}stream-blue.{{ veza_incus_dns_suffix }}:{{ veza_stream_port }} {{ '' if _active == 'blue' else 'backup' }}
|
server {{ env }}_stream_blue {{ prefix }}stream-blue.{{ veza_incus_dns_suffix }}:{{ veza_stream_port }} {{ '' if _active == 'blue' else 'backup' }}
|
||||||
server {{ env }}_stream_green {{ prefix }}stream-green.{{ veza_incus_dns_suffix }}:{{ veza_stream_port }} {{ '' if _active == 'green' else 'backup' }}
|
server {{ env }}_stream_green {{ prefix }}stream-green.{{ veza_incus_dns_suffix }}:{{ veza_stream_port }} {{ '' if _active == 'green' else 'backup' }}
|
||||||
|
|
||||||
|
|
@ -169,7 +191,7 @@ backend {{ env }}_web_pool
|
||||||
balance roundrobin
|
balance roundrobin
|
||||||
option httpchk GET {{ veza_healthcheck_paths.web | default('/') }}
|
option httpchk GET {{ veza_healthcheck_paths.web | default('/') }}
|
||||||
http-check expect status 200
|
http-check expect status 200
|
||||||
default-server check inter {{ haproxy_health_check_interval_ms }} fall {{ haproxy_health_check_fall }} rise {{ haproxy_health_check_rise }} on-marked-down shutdown-sessions slowstart {{ haproxy_graceful_drain_seconds }}s
|
default-server check inter {{ haproxy_health_check_interval_ms }} fall {{ haproxy_health_check_fall }} rise {{ haproxy_health_check_rise }} on-marked-down shutdown-sessions slowstart {{ haproxy_graceful_drain_seconds }}s init-addr last,libc,none resolvers veza_dns
|
||||||
server {{ env }}_web_blue {{ prefix }}web-blue.{{ veza_incus_dns_suffix }}:{{ veza_web_port }} {{ '' if _active == 'blue' else 'backup' }}
|
server {{ env }}_web_blue {{ prefix }}web-blue.{{ veza_incus_dns_suffix }}:{{ veza_web_port }} {{ '' if _active == 'blue' else 'backup' }}
|
||||||
server {{ env }}_web_green {{ prefix }}web-green.{{ veza_incus_dns_suffix }}:{{ veza_web_port }} {{ '' if _active == 'green' else 'backup' }}
|
server {{ env }}_web_green {{ prefix }}web-green.{{ veza_incus_dns_suffix }}:{{ veza_web_port }} {{ '' if _active == 'green' else 'backup' }}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue