# Lab inventory — the R720's local lab Incus container used to dry-run # role changes before they touch staging or prod. Override # ansible_host / ansible_user / ansible_port in `host_vars/.yml` # (gitignored if it carries credentials, otherwise plain values). # # Usage: # ansible-playbook -i inventory/lab.yml playbooks/site.yml --check # ansible-playbook -i inventory/lab.yml playbooks/site.yml # # v1.0.9 Day 6: postgres_ha group added. The 3 containers # (pgaf-monitor, pgaf-primary, pgaf-replica) live ON the veza-lab # host and are addressed via the `community.general.incus` # connection plugin — no SSH setup needed inside the containers. all: hosts: veza-lab: ansible_host: 10.0.20.150 ansible_user: senke ansible_python_interpreter: /usr/bin/python3 children: incus_hosts: hosts: veza-lab: veza_lab: hosts: veza-lab: postgres_ha: hosts: pgaf-monitor: pg_auto_failover_role: monitor pgaf-primary: pg_auto_failover_role: node pgaf-replica: pg_auto_failover_role: node vars: # Containers reached via Incus exec on the parent host. The # plugin lives in the community.general collection — install # with `ansible-galaxy collection install community.general` # before running this playbook. ansible_connection: community.general.incus ansible_python_interpreter: /usr/bin/python3 postgres_ha_monitor: hosts: pgaf-monitor: postgres_ha_nodes: # Order matters — primary first so it registers as primary; replica # second so it joins as standby. hosts: pgaf-primary: pgaf-replica: # v1.0.9 Day 7: pgbouncer fronts the formation. Same # community.general.incus connection plugin as postgres_ha. pgbouncer: hosts: pgaf-pgbouncer: vars: ansible_connection: community.general.incus ansible_python_interpreter: /usr/bin/python3 # v1.0.9 W3 Day 11: Redis Sentinel HA. 3 Incus containers each # running a redis-server + redis-sentinel; redis-1 boots as master, # the other two as replicas. Sentinel quorum = 2 across the 3. redis_ha: hosts: redis-1: redis-2: redis-3: vars: ansible_connection: community.general.incus ansible_python_interpreter: /usr/bin/python3 redis_ha_master: # First in this list is the bootstrap master ; sentinel.conf.j2 # references this group to point each sentinel at it. hosts: redis-1: # v1.0.9 — phase-1 self-hosted edge cache fronting the MinIO cluster. # Single container colocated on the lab host. Phase-2 (W3+) adds a # second node + GeoDNS ; phase-3 only wires Bunny.net via the # existing CDN_* env vars. nginx_cache: hosts: nginx-cache: vars: ansible_connection: community.general.incus ansible_python_interpreter: /usr/bin/python3 # v1.0.9 W4 Day 19 — HAProxy in front of the backend-api + # stream-server pools. Single LB node in phase-1 ; keepalived VIP # comes in phase-2. haproxy: hosts: haproxy: vars: ansible_connection: community.general.incus ansible_python_interpreter: /usr/bin/python3 # 2 backend-api Incus containers (active/active behind haproxy). # Sessions are Redis-backed so the API is stateless ; HAProxy # sticky cookie keeps a logged-in user pinned to one backend # through the session for WS upgrade locality. backend_api_instances: hosts: backend-api-1: backend-api-2: vars: ansible_connection: community.general.incus ansible_python_interpreter: /usr/bin/python3 # 2 stream-server Incus containers (active/active behind haproxy). # Affinity by track_id hash via HAProxy URI-hash balance for HLS # cache locality. stream_server_instances: hosts: stream-server-1: stream-server-2: vars: ansible_connection: community.general.incus ansible_python_interpreter: /usr/bin/python3 # v1.0.9 W5 Day 24 — synthetic monitoring runner. Should sit on a # host external to the prod cluster ; lab phase-1 colocates it. blackbox_exporter: hosts: blackbox-exporter: vars: ansible_connection: community.general.incus ansible_python_interpreter: /usr/bin/python3 # v1.0.9 W3 Day 12: distributed MinIO with EC:2. 4 Incus containers, # each providing one drive ; single erasure set tolerates 2 simultaneous # node failures. minio_nodes: hosts: minio-1: minio-2: minio-3: minio-4: vars: ansible_connection: community.general.incus ansible_python_interpreter: /usr/bin/python3 # v1.0.9 Day 9: otel-collector + Tempo for distributed tracing. # Each runs in its own Incus container; the API on the host points # at otel-collector.lxd:4317 via OTEL_EXPORTER_OTLP_ENDPOINT. observability: hosts: otel-collector: tempo: vars: ansible_connection: community.general.incus ansible_python_interpreter: /usr/bin/python3 otel_collectors: hosts: otel-collector: tempo: hosts: tempo: