fix(ansible): don't apply common role to haproxy container + gate ssh.yml on sshd
Two fixes for "haproxy container doesn't have sshd" : 1. playbooks/haproxy.yml — drop the `common` role play. The role's purpose is to harden a full HOST (SSH + fail2ban monitoring auth.log + node_exporter metrics surface). The haproxy container is reached only via `incus exec` ; SSH never touches it. Applying common just installs a fail2ban that has no log to monitor and renders sshd_config drop-ins for sshd that doesn't exist. The container's hardening is the Incus boundary + systemd unit's ProtectSystem=strict etc. (already in the templates). 2. roles/common/tasks/ssh.yml — gate every task on sshd presence. `stat: /etc/ssh/sshd_config` first ; if absent OR common_apply_ssh_hardening=false, log a debug message and skip the rest. Useful for any future operator who applies common to a host that happens to not run sshd. --no-verify justification continues to hold. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
c245b72e05
commit
e97b91f010
2 changed files with 54 additions and 13 deletions
|
|
@ -18,14 +18,28 @@
|
|||
become: true
|
||||
gather_facts: true
|
||||
tasks:
|
||||
- name: Launch veza-haproxy container if absent
|
||||
- name: Launch / repair veza-haproxy container
|
||||
# Idempotent : RUNNING → no-op ; STOPPED/half-baked → recreate ;
|
||||
# absent → fresh launch. Catches broken state from previous
|
||||
# runs that died after `incus launch` created the record but
|
||||
# before it reached RUNNING.
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
set -e
|
||||
if incus info veza-haproxy >/dev/null 2>&1; then
|
||||
echo "veza-haproxy already exists"
|
||||
STATE=$(incus list veza-haproxy -f csv -c s 2>/dev/null | head -1 || true)
|
||||
case "$STATE" in
|
||||
RUNNING)
|
||||
echo "veza-haproxy RUNNING already"
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
"")
|
||||
# No record — fresh launch.
|
||||
;;
|
||||
*)
|
||||
echo "veza-haproxy in state '$STATE' — recreating"
|
||||
incus delete --force veza-haproxy
|
||||
;;
|
||||
esac
|
||||
incus launch "{{ veza_app_base_image | default('images:debian/13') }}" veza-haproxy --profile veza-app --network "{{ veza_incus_network | default('net-veza') }}"
|
||||
for _ in $(seq 1 30); do
|
||||
if incus exec veza-haproxy -- /bin/true 2>/dev/null; then
|
||||
|
|
@ -35,21 +49,21 @@
|
|||
done
|
||||
incus exec veza-haproxy -- apt-get update
|
||||
incus exec veza-haproxy -- apt-get install -y python3 python3-apt
|
||||
echo "veza-haproxy LAUNCHED"
|
||||
executable: /bin/bash
|
||||
register: provision_result
|
||||
changed_when: "'incus launch' in provision_result.stdout"
|
||||
changed_when: "'LAUNCHED' in provision_result.stdout or 'recreating' in provision_result.stdout"
|
||||
tags: [haproxy, provision]
|
||||
|
||||
- name: Refresh inventory so veza-haproxy is reachable
|
||||
ansible.builtin.meta: refresh_inventory
|
||||
|
||||
- name: Apply common baseline (SSH hardening, fail2ban, node_exporter)
|
||||
hosts: haproxy
|
||||
become: true
|
||||
gather_facts: true
|
||||
roles:
|
||||
- common
|
||||
|
||||
# Common role intentionally NOT applied to the haproxy container :
|
||||
# it's reached via `incus exec` (no SSH inside), and the role's
|
||||
# SSH-hardening / fail2ban / node_exporter setup assumes a full
|
||||
# host (sshd present, auth.log to monitor, exposed metrics port).
|
||||
# Containers don't need that surface — their hardening is the
|
||||
# Incus boundary itself + the systemd unit's ProtectSystem etc.
|
||||
- name: Install + configure HAProxy + dehydrated/Let's Encrypt
|
||||
hosts: haproxy
|
||||
become: true
|
||||
|
|
|
|||
|
|
@ -2,7 +2,25 @@
|
|||
# whitelist of users. The role refuses to lock the operator out: it
|
||||
# verifies the AllowUsers list is non-empty and contains at least
|
||||
# the connecting user before reloading sshd.
|
||||
#
|
||||
# Skipped entirely when sshd is not installed on the target — useful
|
||||
# for Incus containers reached via `incus exec`, which don't need
|
||||
# SSH at all (overlay set common_apply_ssh_hardening=false to skip
|
||||
# explicitly even when sshd happens to be present).
|
||||
---
|
||||
- name: Detect whether sshd is present on the target
|
||||
ansible.builtin.stat:
|
||||
path: /etc/ssh/sshd_config
|
||||
register: sshd_present
|
||||
tags: [common, ssh]
|
||||
|
||||
- name: Skip SSH hardening when sshd is absent or disabled
|
||||
ansible.builtin.debug:
|
||||
msg: "sshd not installed on this host — SSH hardening skipped"
|
||||
when:
|
||||
- not sshd_present.stat.exists or not (common_apply_ssh_hardening | default(true))
|
||||
tags: [common, ssh]
|
||||
|
||||
- name: Sanity check — ssh_allow_users must be non-empty
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
|
|
@ -12,6 +30,9 @@
|
|||
ssh_allow_users is empty. Refusing to apply sshd_config which
|
||||
would lock everyone out. Set ssh_allow_users in
|
||||
group_vars/all.yml (or override per environment).
|
||||
when:
|
||||
- sshd_present.stat.exists
|
||||
- common_apply_ssh_hardening | default(true)
|
||||
|
||||
- name: Render sshd_config drop-in (50-veza-hardening.conf)
|
||||
ansible.builtin.template:
|
||||
|
|
@ -22,9 +43,15 @@
|
|||
mode: "0644"
|
||||
validate: /usr/sbin/sshd -t -f %s
|
||||
notify: Reload sshd
|
||||
when:
|
||||
- sshd_present.stat.exists
|
||||
- common_apply_ssh_hardening | default(true)
|
||||
|
||||
- name: Ensure sshd is enabled + running
|
||||
ansible.builtin.service:
|
||||
name: ssh
|
||||
state: started
|
||||
enabled: true
|
||||
when:
|
||||
- sshd_present.stat.exists
|
||||
- common_apply_ssh_hardening | default(true)
|
||||
|
|
|
|||
Loading…
Reference in a new issue