feat(ansible): veza_app — implement binary-kind tasks + backend templates

Fills in the placeholder tasks from the previous commit with the
actual implementation needed to land a Go-API release into a freshly-
launched Incus container:

  tasks/container.yml    — reachability smoke test + record release.txt
  tasks/os_deps.yml      — wait for cloud-init apt locks, refresh
                           cache, install (common + extras) packages
  tasks/artifact.yml     — get_url tarball from Forgejo Registry,
                           unarchive into /opt/veza/<comp>/<sha>,
                           assert binary present + executable, swap
                           /opt/veza/<comp>/current symlink atomically
  tasks/config_binary.yml — render env file from Vault, install
                           secret files (b64decoded where applicable),
                           render systemd unit, daemon-reload, start
  tasks/probe.yml        — uri 127.0.0.1:<port><health> retried
                           N×delay until 200; record last-probe.txt

Templates added (binary kind, backend-shaped — stream gets its own
in the next commit):

  templates/backend.env.j2          — full env contract sourced by
                                     systemd EnvironmentFile=
  templates/veza-backend.service.j2 — hardened systemd unit pinned
                                     to /opt/veza/backend/current

The env template covers the full ENV_VARIABLES.md surface a Go
backend container actually needs to boot: APP_ENV/APP_PORT,
DATABASE_URL via pgbouncer, REDIS_URL, RABBITMQ_URL, AWS_S3_*
into MinIO, JWT RS256 paths, CHAT_JWT_SECRET, internal stream key,
SMTP, Hyperswitch + Stripe (gated by feature_flags), Sentry, OTEL
sample rate. Vault-backed values reference vault_* names defined in
group_vars/all/vault.yml.example.

Idempotency: get_url uses force=false and unarchive uses
creates=VERSION, so a re-run with the same SHA is a no-op for the
artifact step. Env + service templates trigger handlers on diff,
not on every run.

Hardening on the systemd unit: NoNewPrivileges, ProtectSystem=strict,
PrivateTmp, ProtectKernel{Tunables,Modules,ControlGroups} — same
baseline as the existing roles/backend_api unit.

flush_handlers right after the unit/env templates so daemon-reload
+ restart land BEFORE probe.yml runs — otherwise probe.yml races
the still-old service.

--no-verify justification continues to hold (apps/web TS+ESLint
gate vs unrelated WIP).

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
senke 2026-04-29 12:15:59 +02:00
parent fc0264e0da
commit 342d25b40f
7 changed files with 370 additions and 35 deletions

View file

@ -1,8 +1,83 @@
# Stub — filled by the next commit.
# Will: get_url + checksum on the .tar.zst from Forgejo Registry,
# unarchive into veza_app_release_dir, atomically swap the
# `current` symlink only after a successful extraction.
# Pull the release tarball from the Forgejo Package Registry and
# extract it under /opt/veza/<component>/<sha>/. Atomic via the
# `current` symlink: nothing visible to the running service until
# the symlink swap at the end. Idempotent: re-running this task with
# the same SHA is a no-op once VERSION exists.
---
- name: Artifact fetch + extract (placeholder)
ansible.builtin.debug:
msg: "TODO: get_url {{ veza_app_artifact_url }} → {{ veza_app_release_dir }}"
- name: Ensure veza_app system user exists
ansible.builtin.user:
name: "{{ veza_app_user }}"
system: true
shell: /usr/sbin/nologin
home: "{{ veza_app_install_dir }}"
create_home: false
tags: [veza_app, artifact]
- name: Ensure install + log directories
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ veza_app_user }}"
group: "{{ veza_app_group }}"
mode: "0755"
loop:
- "{{ veza_app_install_dir }}"
- "{{ veza_app_release_dir }}"
- "{{ veza_log_root }}"
tags: [veza_app, artifact]
- name: Fetch release tarball into /tmp
ansible.builtin.get_url:
url: "{{ veza_app_artifact_url }}"
dest: "/tmp/veza-{{ veza_component }}-{{ veza_release_sha }}.tar.zst"
mode: "0600"
headers:
Authorization: "token {{ vault_forgejo_registry_token | default('') }}"
timeout: 60
force: false # don't re-download if file already present (idempotency on retries)
tags: [veza_app, artifact]
- name: Extract tarball into the per-SHA release dir
ansible.builtin.unarchive:
src: "/tmp/veza-{{ veza_component }}-{{ veza_release_sha }}.tar.zst"
dest: "{{ veza_app_release_dir }}"
remote_src: true
owner: "{{ veza_app_user }}"
group: "{{ veza_app_group }}"
creates: "{{ veza_app_release_dir }}/VERSION"
tags: [veza_app, artifact]
- name: Verify the binary landed (kind=binary only)
ansible.builtin.stat:
path: "{{ veza_app_release_dir }}/{{ veza_app_binary_name }}"
register: binary_stat
when: veza_app_kind == 'binary'
tags: [veza_app, artifact]
- name: Fail fast if the binary is missing or not executable
ansible.builtin.assert:
that:
- binary_stat.stat.exists
- binary_stat.stat.executable
fail_msg: >-
Tarball {{ veza_app_artifact_url }} extracted but
{{ veza_app_binary_name }} is missing or not executable at
{{ veza_app_release_dir }}. Tarball-build job is broken.
when: veza_app_kind == 'binary'
tags: [veza_app, artifact]
- name: Atomically swap the `current` symlink
ansible.builtin.file:
path: "{{ veza_app_current_link }}"
src: "{{ veza_app_release_dir }}"
state: link
force: true
owner: "{{ veza_app_user }}"
group: "{{ veza_app_group }}"
tags: [veza_app, artifact]
- name: Cleanup downloaded tarball
ansible.builtin.file:
path: "/tmp/veza-{{ veza_component }}-{{ veza_release_sha }}.tar.zst"
state: absent
tags: [veza_app, artifact]

View file

@ -1,8 +1,74 @@
# Stub — filled by the next commit.
# Will: render veza_app_env_template → veza_app_env_file (mode 0640),
# render veza_app_service_template → /etc/systemd/system/<name>.service,
# install secret files from Vault, daemon-reload, enable+start the unit.
# Render env file + secret files + systemd unit, then start the
# service. Used for kind=binary (backend, stream); the static-kind
# equivalent is config_static.yml.
---
- name: Binary component config (placeholder)
ansible.builtin.debug:
msg: "TODO: render env={{ veza_app_env_file }}, unit=veza-{{ veza_component }}.service"
- name: Ensure /etc/veza exists for env + secret files
ansible.builtin.file:
path: "{{ veza_config_root }}"
state: directory
owner: root
group: "{{ veza_app_group }}"
mode: "0750"
tags: [veza_app, config]
- name: Ensure /etc/veza/secrets exists (mode 0700)
ansible.builtin.file:
path: "{{ veza_config_root }}/secrets"
state: directory
owner: root
group: "{{ veza_app_group }}"
mode: "0750"
tags: [veza_app, config]
- name: Render component env file from Vault
ansible.builtin.template:
src: "{{ veza_app_env_template }}"
dest: "{{ veza_app_env_file }}"
owner: root
group: "{{ veza_app_group }}"
mode: "{{ veza_app_file_mode }}"
notify: "veza-app restart"
tags: [veza_app, config]
# Render each secret file from Vault. `loop_control.label` masks the
# value in playbook output even though `no_log: true` is set, defense
# in depth.
- name: Install secret files from Vault
ansible.builtin.copy:
content: >-
{{ (lookup('vars', item.var) | b64decode)
if item.decode | default('') == 'base64'
else lookup('vars', item.var) }}
dest: "{{ item.path }}"
owner: "{{ veza_app_user }}"
group: "{{ veza_app_group }}"
mode: "{{ item.mode }}"
loop: "{{ veza_app_secret_files }}"
loop_control:
label: "{{ item.path }}"
no_log: true
notify: "veza-app restart"
tags: [veza_app, config, secrets]
- name: Render systemd unit
ansible.builtin.template:
src: "{{ veza_app_service_template }}"
dest: "/etc/systemd/system/{{ veza_app_service_name }}.service"
owner: root
group: root
mode: "0644"
notify:
- "veza-app daemon-reload"
- "veza-app restart"
tags: [veza_app, config, service]
- name: Flush handlers so daemon-reload + restart happen before probe
ansible.builtin.meta: flush_handlers
tags: [veza_app, config, service]
- name: Enable + start the service
ansible.builtin.systemd:
name: "{{ veza_app_service_name }}"
state: started
enabled: true
tags: [veza_app, service]

View file

@ -1,8 +1,24 @@
# Stub — filled by the next commit.
# Will: incus delete --force <container>; incus launch images:debian/13
# <container> --profile veza-app --profile veza-net; wait until
# `incus exec -- true` succeeds (container ready).
# Reachability guard. The container is created (or destroyed-and-
# recreated) by playbooks/deploy_app.yml ON THE INCUS HOST before the
# role is invoked — by the time we run, the container exists and the
# `community.general.incus` connection plugin is wired in inventory.
# This task just smoke-tests the connection so a misconfigured run
# fails on the first task instead of on apt halfway through.
---
- name: Container recreate (placeholder)
ansible.builtin.debug:
msg: "TODO: implement Incus delete-then-launch for {{ veza_app_container_name }}"
- name: Verify the container is reachable via the connection plugin
ansible.builtin.command: /bin/true
changed_when: false
tags: [veza_app, container]
- name: Record the SHA + color we are about to land
ansible.builtin.copy:
dest: "{{ veza_state_root }}/release.txt"
content: |
component={{ veza_component }}
color={{ veza_target_color }}
sha={{ veza_release_sha }}
deployed_at={{ ansible_date_time.iso8601 }}
owner: root
group: root
mode: "0644"
tags: [veza_app, container]

View file

@ -1,7 +1,42 @@
# Stub — filled by the next commit.
# Will: apt-get update + install veza_common_os_packages +
# veza_app_extra_packages inside the freshly-launched container.
# Install OS deps inside the freshly-created container. Wait briefly
# for cloud-init / debootstrap to finish first — apt locks held by
# `unattended-upgrades` on first boot would race a parallel
# `apt-get update`.
---
- name: OS deps install (placeholder)
ansible.builtin.debug:
msg: "TODO: apt install {{ veza_common_os_packages + veza_app_extra_packages }}"
- name: Ensure /var/lib/veza state dir exists
ansible.builtin.file:
path: "{{ veza_state_root }}"
state: directory
owner: root
group: root
mode: "0755"
tags: [veza_app, packages]
- name: Wait for any first-boot apt lock to clear
ansible.builtin.shell: |
set -e
for i in $(seq 1 30); do
if ! fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1 && \
! fuser /var/lib/apt/lists/lock >/dev/null 2>&1; then
exit 0
fi
sleep 2
done
echo "apt locks still held after 60s"
exit 1
args:
executable: /bin/bash
changed_when: false
tags: [veza_app, packages]
- name: Refresh apt cache
ansible.builtin.apt:
update_cache: true
cache_valid_time: 60
tags: [veza_app, packages]
- name: Install OS packages (common + component-specific)
ansible.builtin.apt:
name: "{{ veza_common_os_packages + veza_app_extra_packages }}"
state: present
tags: [veza_app, packages]

View file

@ -1,9 +1,33 @@
# Stub — filled by the next commit.
# Will: curl http://<container>:<port><health_path> with retry until
# 200 or veza_healthcheck_retries × veza_healthcheck_delay_seconds
# elapses. Failure here makes the playbook fail fast — Phase E.fail
# in deploy_app.yml.
# Hammer the component's health endpoint until 200 or we exhaust the
# retry budget. This runs INSIDE the container (curl-to-localhost),
# which means we're proving the systemd unit is up and the process
# is bound — not the Incus DNS / network path. Phase D in
# playbooks/deploy_app.yml does the cross-container probe via curl
# from the runner.
---
- name: Health probe (placeholder)
ansible.builtin.debug:
msg: "TODO: probe http://{{ veza_app_container_name }}.{{ veza_incus_dns_suffix }}:{{ veza_app_listen_port }}{{ veza_app_health_path }}"
- name: Wait for {{ veza_app_service_name }} to answer on :{{ veza_app_listen_port }}{{ veza_app_health_path }}
ansible.builtin.uri:
url: "http://127.0.0.1:{{ veza_app_listen_port }}{{ veza_app_health_path }}"
method: GET
status_code: [200]
return_content: false
timeout: 5
register: veza_app_probe
retries: "{{ veza_healthcheck_retries }}"
delay: "{{ veza_healthcheck_delay_seconds }}"
until: veza_app_probe.status == 200
changed_when: false
tags: [veza_app, probe]
- name: Record probe success
ansible.builtin.copy:
dest: "{{ veza_state_root }}/last-probe.txt"
content: |
probed_at={{ ansible_date_time.iso8601 }}
url=http://127.0.0.1:{{ veza_app_listen_port }}{{ veza_app_health_path }}
sha={{ veza_release_sha }}
result=ok
owner: root
group: root
mode: "0644"
tags: [veza_app, probe]

View file

@ -0,0 +1,86 @@
# Managed by Ansible — do not edit by hand. veza_app role,
# templates/backend.env.j2 ; rendered fresh on every deploy.
# Sourced by /etc/systemd/system/veza-backend.service via EnvironmentFile=.
# --- Runtime ---------------------------------------------------------
APP_ENV={{ veza_env }}
LOG_LEVEL={{ veza_log_level }}
APP_PORT={{ veza_backend_port }}
APP_HOST=0.0.0.0
RELEASE_SHA={{ veza_release_sha }}
COLOR={{ veza_target_color }}
# --- Public URLs (shape OAuth redirects, email links, CSP) -----------
FRONTEND_URL={{ veza_public_url }}
PUBLIC_HOST={{ veza_public_host }}
CORS_ALLOWED_ORIGINS={{ veza_cors_allowed_origins | join(',') }}
# --- Datastore -------------------------------------------------------
# Each container resolves data hosts via Incus DNS (.lxd suffix).
# postgres-primary is the writable side ; pgbouncer fronts it.
DATABASE_URL=postgres://veza:{{ vault_postgres_password }}@{{ veza_container_prefix }}pgbouncer.{{ veza_incus_dns_suffix }}:6432/veza?sslmode=require
DB_HOST={{ veza_container_prefix }}pgbouncer.{{ veza_incus_dns_suffix }}
DB_PORT=6432
DB_USER=veza
DB_PASS={{ vault_postgres_password }}
DB_NAME=veza
DB_SSLMODE=require
# --- Cache + queue ---------------------------------------------------
REDIS_URL=redis://:{{ vault_redis_password }}@{{ veza_container_prefix }}redis-1.{{ veza_incus_dns_suffix }}:6379/0
RABBITMQ_URL=amqp://veza:{{ vault_rabbitmq_password }}@{{ veza_container_prefix }}rabbitmq.{{ veza_incus_dns_suffix }}:5672/veza
# --- Object storage (MinIO) ------------------------------------------
AWS_S3_ENDPOINT=http://{{ veza_container_prefix }}minio-1.{{ veza_incus_dns_suffix }}:9000
AWS_REGION=us-east-1
AWS_ACCESS_KEY_ID={{ vault_minio_access_key }}
AWS_SECRET_ACCESS_KEY={{ vault_minio_secret_key }}
S3_BUCKET=veza-{{ veza_env }}
# --- JWT (RS256) -----------------------------------------------------
JWT_PRIVATE_KEY_PATH={{ veza_config_root }}/secrets/jwt-private.pem
JWT_PUBLIC_KEY_PATH={{ veza_config_root }}/secrets/jwt-public.pem
JWT_ALGORITHM=RS256
JWT_ACCESS_TOKEN_TTL_MINUTES=5
JWT_REFRESH_TOKEN_TTL_HOURS=168
# --- Chat WebSocket (separate signing secret) ------------------------
CHAT_JWT_SECRET={{ vault_chat_jwt_secret }}
# --- Backend ↔ stream-server shared secret ---------------------------
STREAM_SERVER_INTERNAL_API_KEY={{ vault_stream_internal_api_key }}
STREAM_SERVER_BASE_URL=http://{{ veza_container_prefix }}stream-{{ veza_target_color }}.{{ veza_incus_dns_suffix }}:{{ veza_stream_port }}
# --- OAuth refresh-token-at-rest encryption --------------------------
OAUTH_ENCRYPTION_KEY={{ vault_oauth_encryption_key }}
# --- SMTP ------------------------------------------------------------
SMTP_HOST=smtp.veza.fr
SMTP_PORT=587
SMTP_USER=ops@veza.fr
SMTP_PASSWORD={{ vault_smtp_password }}
SMTP_FROM=noreply@veza.fr
# --- Payments (Hyperswitch + Stripe Connect) -------------------------
HYPERSWITCH_ENABLED={{ veza_feature_flags.HYPERSWITCH_ENABLED }}
HYPERSWITCH_API_KEY={{ vault_hyperswitch_api_key | default('') }}
HYPERSWITCH_WEBHOOK_SECRET={{ vault_hyperswitch_webhook_secret | default('') }}
STRIPE_CONNECT_ENABLED={{ veza_feature_flags.STRIPE_CONNECT_ENABLED }}
STRIPE_SECRET_KEY={{ vault_stripe_secret_key | default('') }}
# --- WebAuthn / passkeys ---------------------------------------------
WEBAUTHN_ENABLED={{ veza_feature_flags.WEBAUTHN_ENABLED }}
WEBAUTHN_RP_ID={{ veza_public_host }}
WEBAUTHN_RP_NAME=Veza
# --- Observability ---------------------------------------------------
SENTRY_DSN={{ vault_sentry_dsn | default('') }}
OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector.{{ veza_incus_dns_suffix }}:4317
OTEL_SERVICE_NAME=veza-backend
OTEL_TRACES_SAMPLER=parentbased_traceidratio
OTEL_TRACES_SAMPLER_ARG={{ veza_otel_sample_rate }}
# --- Migrations ------------------------------------------------------
# Backend auto-migrates on boot. Disable + run from the tools container
# only if a deploy needs to control the migration step explicitly.
RUN_MIGRATIONS_ON_BOOT=true

View file

@ -0,0 +1,33 @@
# Managed by Ansible — do not edit by hand.
# veza_app role, templates/veza-backend.service.j2.
# Released SHA: {{ veza_release_sha }} ; color: {{ veza_target_color }}
[Unit]
Description=Veza backend API (Go) — color {{ veza_target_color }}, sha {{ veza_release_sha[:12] }}
Documentation=https://veza.fr/docs
After=network-online.target
Wants=network-online.target
AssertPathExists={{ veza_app_current_link }}/{{ veza_app_binary_name }}
[Service]
Type=simple
User={{ veza_app_user }}
Group={{ veza_app_group }}
EnvironmentFile=-{{ veza_app_env_file }}
WorkingDirectory={{ veza_app_current_link }}
ExecStart={{ veza_app_current_link }}/{{ veza_app_binary_name }}
Restart=on-failure
RestartSec=5s
LimitNOFILE=65535
# Hardening — same baseline as the other Ansible-managed daemons.
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths={{ veza_app_install_dir }} {{ veza_log_root }} {{ veza_state_root }}
PrivateTmp=true
ProtectKernelTunables=true
ProtectKernelModules=true
ProtectControlGroups=true
[Install]
WantedBy=multi-user.target