# pg_auto_failover data node — joins the monitor and lets the # formation decide primary/secondary by election order. The first # node to register becomes primary; later nodes become secondaries. # # Sync replication is configured by the monitor itself based on # `number_sync_standbys` + `replication_quorum`, set on the formation # in monitor.yml's post-init step. --- - name: Check whether the data node is already initialised ansible.builtin.stat: path: "{{ pg_auto_failover_state_dir }}/postgres/postgresql.conf" register: node_initialised - name: Initialise pg_auto_failover data node (joins the monitor) become: true become_user: postgres ansible.builtin.command: cmd: > /usr/lib/postgresql/{{ postgres_version }}/bin/pg_autoctl create postgres --pgdata {{ pg_auto_failover_state_dir }}/postgres --pgctl /usr/lib/postgresql/{{ postgres_version }}/bin/pg_ctl --pgport {{ pg_auto_failover_node_port }} --hostname {{ ansible_fqdn }} --monitor postgres://autoctl_node@{{ pg_auto_failover_monitor_host }}:{{ pg_auto_failover_monitor_port }}/{{ pg_auto_failover_monitor_dbname }}?sslmode=require --auth trust --ssl-self-signed --dbname {{ pg_auto_failover_app_dbname }} --username {{ pg_auto_failover_app_user }} --run-as-keeper args: creates: "{{ pg_auto_failover_state_dir }}/postgres/postgresql.conf" when: not node_initialised.stat.exists - name: Render systemd unit for pg_autoctl data node ansible.builtin.template: src: pg_autoctl-node.service.j2 dest: /etc/systemd/system/pg_autoctl.service owner: root group: root mode: "0644" notify: Restart pg_autoctl - name: Enable + start pg_autoctl data node service ansible.builtin.systemd: name: pg_autoctl state: started enabled: true daemon_reload: true - name: Set formation sync replication policy (run from any data node, idempotent) become: true become_user: postgres ansible.builtin.command: cmd: > /usr/lib/postgresql/{{ postgres_version }}/bin/pg_autoctl set formation number-sync-standbys {{ pg_auto_failover_number_sync_standbys }} --pgdata {{ pg_auto_failover_state_dir }}/postgres changed_when: false failed_when: false # Only one node needs to push the policy — but the command is # idempotent on the monitor side, so running it from every node # keeps the role re-entrant without coordination. run_once: false