diff --git a/infra/ansible/inventory/prod.yml b/infra/ansible/inventory/prod.yml index 4d8cab5c9..8dccb808d 100644 --- a/infra/ansible/inventory/prod.yml +++ b/infra/ansible/inventory/prod.yml @@ -1,21 +1,60 @@ -# Prod inventory — single R720 (self-hosted Incus) at launch, with -# Hetzner debordement planned post-launch. ROADMAP_V1.0_LAUNCH.md §2 -# documents the COMPRESSED HA stance: real multi-host HA arrives -# v1.1+; v1.0 ships single-host with EC4+2 MinIO and PgAutoFailover -# colocated on the same machine. +# Prod inventory — single R720 (self-hosted Incus) at v1.0 launch, +# Hetzner debordement post-launch. ROADMAP_V1.0_LAUNCH.md §2 documents +# the COMPRESSED HA stance : real multi-host HA arrives v1.1+ ; v1.0 +# ships single-host with EC4+2 MinIO + PgAutoFailover colocated. # -# Real ansible_host left as TODO until DNS (EX-5) is live. Use -# ssh-config aliases or fill these in once `api.veza.fr` resolves. +# Topology mirrors staging.yml (same shape, different prefix + +# different network — see group_vars/prod.yml). Phase-2 (post v1.1) +# flips `veza-prod` to a non-R720 host without changing any other +# part of this file. +# +# Naming : every container ends up `veza-[-]` because +# group_vars/prod.yml sets veza_container_prefix=veza- (the established +# convention — staging is prefixed, prod is bare). all: hosts: veza-prod: - ansible_host: TODO_PROD_IP + ansible_host: 10.0.20.150 ansible_user: ansible ansible_python_interpreter: /usr/bin/python3 children: incus_hosts: hosts: veza-prod: - veza_prod: + haproxy: hosts: - veza-prod: + veza-haproxy: + vars: + ansible_connection: community.general.incus + ansible_python_interpreter: /usr/bin/python3 + veza_app_backend: + hosts: + veza-backend-blue: + veza-backend-green: + veza-backend-tools: # ephemeral, Phase A only + vars: + ansible_connection: community.general.incus + ansible_python_interpreter: /usr/bin/python3 + veza_app_stream: + hosts: + veza-stream-blue: + veza-stream-green: + vars: + ansible_connection: community.general.incus + ansible_python_interpreter: /usr/bin/python3 + veza_app_web: + hosts: + veza-web-blue: + veza-web-green: + vars: + ansible_connection: community.general.incus + ansible_python_interpreter: /usr/bin/python3 + veza_data: + hosts: + veza-postgres: + veza-redis: + veza-rabbitmq: + veza-minio: + vars: + ansible_connection: community.general.incus + ansible_python_interpreter: /usr/bin/python3 diff --git a/infra/ansible/inventory/staging.yml b/infra/ansible/inventory/staging.yml index dfaf65e4e..6560c3f9c 100644 --- a/infra/ansible/inventory/staging.yml +++ b/infra/ansible/inventory/staging.yml @@ -1,20 +1,82 @@ -# Staging inventory — Hetzner Cloud host that mirrors prod topology -# (Postgres + Redis + RabbitMQ + MinIO + backend/web/stream -# containers) at a smaller scale, for pre-deploy validation. +# Staging inventory — local R720 (same Incus daemon as the Forgejo +# runner ; phase-1 simplification documented in group_vars/staging.yml). # -# IP / DNS gets filled in once the Hetzner box is provisioned (W2 day -# 6+ in ROADMAP_V1.0_LAUNCH.md). Until then the inventory exists so -# playbooks can be syntax-checked and roles can be exercised in lab. +# Connection model : +# * `veza-staging` is the Incus host (R720 itself). Ansible +# reaches it over SSH ; the runner has the right SSH key in +# ~/.ssh/. +# * Every other host in this inventory lives INSIDE that Incus +# host as an LXC container. Ansible reaches them via the +# `community.general.incus` connection plugin (no SSH-into- +# containers needed) — see group vars under each child group. +# +# Container set : +# * App tier — backend/stream/web in blue/green pairs (6 +# containers) + an ephemeral backend-tools used +# by deploy_app.yml Phase A (migrations). +# * Edge — haproxy (singleton, persistent across deploys). +# * Data tier — postgres, redis, rabbitmq, minio (singletons, +# state survives every deploy). +# +# Used by : +# * .forgejo/workflows/deploy.yml (push:main → -i inventory/staging.yml) +# * .forgejo/workflows/rollback.yml + cleanup-failed.yml +# * Local debug : `ansible-playbook -i inventory/staging.yml \ +# playbooks/deploy_data.yml --check --diff \ +# --vault-password-file ~/.vault-pass` +# +# Naming : every container ends up `veza-staging-[-]` +# because group_vars/staging.yml sets veza_container_prefix=veza-staging-. all: hosts: veza-staging: - ansible_host: TODO_HETZNER_IP + ansible_host: 10.0.20.150 ansible_user: ansible ansible_python_interpreter: /usr/bin/python3 children: incus_hosts: hosts: veza-staging: - veza_staging: + haproxy: hosts: - veza-staging: + veza-staging-haproxy: + vars: + ansible_connection: community.general.incus + ansible_python_interpreter: /usr/bin/python3 + # The 6 app containers + 1 ephemeral tools container. deploy_app.yml + # selects the inactive color dynamically from the haproxy + # container's /var/lib/veza/active-color file ; both blue and + # green sit in inventory so either color is reachable when needed. + veza_app_backend: + hosts: + veza-staging-backend-blue: + veza-staging-backend-green: + veza-staging-backend-tools: # ephemeral, Phase A only + vars: + ansible_connection: community.general.incus + ansible_python_interpreter: /usr/bin/python3 + veza_app_stream: + hosts: + veza-staging-stream-blue: + veza-staging-stream-green: + vars: + ansible_connection: community.general.incus + ansible_python_interpreter: /usr/bin/python3 + veza_app_web: + hosts: + veza-staging-web-blue: + veza-staging-web-green: + vars: + ansible_connection: community.general.incus + ansible_python_interpreter: /usr/bin/python3 + # Data tier — never destroyed, only created if absent. ZFS + # snapshots taken on every deploy as the safety net. + veza_data: + hosts: + veza-staging-postgres: + veza-staging-redis: + veza-staging-rabbitmq: + veza-staging-minio: + vars: + ansible_connection: community.general.incus + ansible_python_interpreter: /usr/bin/python3