diff --git a/infra/ansible/playbooks/haproxy.yml b/infra/ansible/playbooks/haproxy.yml index 7d7c9f648..f9f62c2c9 100644 --- a/infra/ansible/playbooks/haproxy.yml +++ b/infra/ansible/playbooks/haproxy.yml @@ -18,14 +18,28 @@ become: true gather_facts: true tasks: - - name: Launch veza-haproxy container if absent + - name: Launch / repair veza-haproxy container + # Idempotent : RUNNING → no-op ; STOPPED/half-baked → recreate ; + # absent → fresh launch. Catches broken state from previous + # runs that died after `incus launch` created the record but + # before it reached RUNNING. ansible.builtin.shell: cmd: | set -e - if incus info veza-haproxy >/dev/null 2>&1; then - echo "veza-haproxy already exists" - exit 0 - fi + STATE=$(incus list veza-haproxy -f csv -c s 2>/dev/null | head -1 || true) + case "$STATE" in + RUNNING) + echo "veza-haproxy RUNNING already" + exit 0 + ;; + "") + # No record — fresh launch. + ;; + *) + echo "veza-haproxy in state '$STATE' — recreating" + incus delete --force veza-haproxy + ;; + esac incus launch "{{ veza_app_base_image | default('images:debian/13') }}" veza-haproxy --profile veza-app --network "{{ veza_incus_network | default('net-veza') }}" for _ in $(seq 1 30); do if incus exec veza-haproxy -- /bin/true 2>/dev/null; then @@ -35,21 +49,21 @@ done incus exec veza-haproxy -- apt-get update incus exec veza-haproxy -- apt-get install -y python3 python3-apt + echo "veza-haproxy LAUNCHED" executable: /bin/bash register: provision_result - changed_when: "'incus launch' in provision_result.stdout" + changed_when: "'LAUNCHED' in provision_result.stdout or 'recreating' in provision_result.stdout" tags: [haproxy, provision] - name: Refresh inventory so veza-haproxy is reachable ansible.builtin.meta: refresh_inventory -- name: Apply common baseline (SSH hardening, fail2ban, node_exporter) - hosts: haproxy - become: true - gather_facts: true - roles: - - common - +# Common role intentionally NOT applied to the haproxy container : +# it's reached via `incus exec` (no SSH inside), and the role's +# SSH-hardening / fail2ban / node_exporter setup assumes a full +# host (sshd present, auth.log to monitor, exposed metrics port). +# Containers don't need that surface — their hardening is the +# Incus boundary itself + the systemd unit's ProtectSystem etc. - name: Install + configure HAProxy + dehydrated/Let's Encrypt hosts: haproxy become: true diff --git a/infra/ansible/roles/common/tasks/ssh.yml b/infra/ansible/roles/common/tasks/ssh.yml index 8988836d2..a3ae048f4 100644 --- a/infra/ansible/roles/common/tasks/ssh.yml +++ b/infra/ansible/roles/common/tasks/ssh.yml @@ -2,7 +2,25 @@ # whitelist of users. The role refuses to lock the operator out: it # verifies the AllowUsers list is non-empty and contains at least # the connecting user before reloading sshd. +# +# Skipped entirely when sshd is not installed on the target — useful +# for Incus containers reached via `incus exec`, which don't need +# SSH at all (overlay set common_apply_ssh_hardening=false to skip +# explicitly even when sshd happens to be present). --- +- name: Detect whether sshd is present on the target + ansible.builtin.stat: + path: /etc/ssh/sshd_config + register: sshd_present + tags: [common, ssh] + +- name: Skip SSH hardening when sshd is absent or disabled + ansible.builtin.debug: + msg: "sshd not installed on this host — SSH hardening skipped" + when: + - not sshd_present.stat.exists or not (common_apply_ssh_hardening | default(true)) + tags: [common, ssh] + - name: Sanity check — ssh_allow_users must be non-empty ansible.builtin.assert: that: @@ -12,6 +30,9 @@ ssh_allow_users is empty. Refusing to apply sshd_config which would lock everyone out. Set ssh_allow_users in group_vars/all.yml (or override per environment). + when: + - sshd_present.stat.exists + - common_apply_ssh_hardening | default(true) - name: Render sshd_config drop-in (50-veza-hardening.conf) ansible.builtin.template: @@ -22,9 +43,15 @@ mode: "0644" validate: /usr/sbin/sshd -t -f %s notify: Reload sshd + when: + - sshd_present.stat.exists + - common_apply_ssh_hardening | default(true) - name: Ensure sshd is enabled + running ansible.builtin.service: name: ssh state: started enabled: true + when: + - sshd_present.stat.exists + - common_apply_ssh_hardening | default(true)