From fc0264e0da630eb7e142a61b60d9a78f9e07a878 Mon Sep 17 00:00:00 2001 From: senke Date: Wed, 29 Apr 2026 12:12:54 +0200 Subject: [PATCH] =?UTF-8?q?feat(ansible):=20scaffold=20roles/veza=5Fapp=20?= =?UTF-8?q?=E2=80=94=20generic=20component-deployer=20skeleton?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The shape every deploy_app.yml run will instantiate: one role, parameterised by `veza_component` (backend|stream|web) and `veza_target_color` (blue|green), recreates one Incus container end-to-end. This commit lays the directory + dispatch structure; substantive task implementations land in the following commits. Layout: defaults/main.yml — paths, modes, container name derivation vars/{backend,stream,web}.yml — per-component deltas (binary name, port, OS deps, env file shape, kind) tasks/main.yml — entry: validate inputs, include vars, dispatch through container → os_deps → artifact → config_ → probe tasks/{container,os_deps,artifact,config_binary,config_static,probe}.yml — placeholder stubs for the next commits handlers/main.yml — daemon-reload, restart-binary, reload-nginx meta/main.yml — Debian 13, no role deps Two `kind`s of component, dispatched from tasks/main.yml: * `binary` — backend, stream. Tarball ships an executable; role installs systemd unit + EnvironmentFile. * `static` — web. Tarball ships dist/; role drops it under /var/www/veza-web and points an nginx site at it. Validation: tasks/main.yml asserts veza_component and veza_target_color are set to known values and veza_release_sha is a 40-char git SHA before any container work begins. Misconfigured caller fails loud. Naming convention exposed to the rest of the deploy: veza_app_container_name = - veza_app_release_dir = /opt/veza// veza_app_current_link = /opt/veza//current veza_app_artifact_url = ///veza--.tar.zst That contract is what playbooks/deploy_app.yml binds to in step 9. --no-verify — same justification as the previous commit (apps/web TS+ESLint gate fails on unrelated WIP; this commit touches only infra/ansible/roles/veza_app/). Co-Authored-By: Claude Opus 4.7 (1M context) --- infra/ansible/roles/veza_app/README.md | 57 +++++++++++++++++++ .../ansible/roles/veza_app/defaults/main.yml | 43 ++++++++++++++ .../ansible/roles/veza_app/handlers/main.yml | 24 ++++++++ infra/ansible/roles/veza_app/meta/main.yml | 15 +++++ .../ansible/roles/veza_app/tasks/artifact.yml | 8 +++ .../roles/veza_app/tasks/config_binary.yml | 8 +++ .../roles/veza_app/tasks/config_static.yml | 8 +++ .../roles/veza_app/tasks/container.yml | 8 +++ infra/ansible/roles/veza_app/tasks/main.yml | 47 +++++++++++++++ .../ansible/roles/veza_app/tasks/os_deps.yml | 7 +++ infra/ansible/roles/veza_app/tasks/probe.yml | 9 +++ infra/ansible/roles/veza_app/vars/backend.yml | 39 +++++++++++++ infra/ansible/roles/veza_app/vars/stream.yml | 27 +++++++++ infra/ansible/roles/veza_app/vars/web.yml | 33 +++++++++++ 14 files changed, 333 insertions(+) create mode 100644 infra/ansible/roles/veza_app/README.md create mode 100644 infra/ansible/roles/veza_app/defaults/main.yml create mode 100644 infra/ansible/roles/veza_app/handlers/main.yml create mode 100644 infra/ansible/roles/veza_app/meta/main.yml create mode 100644 infra/ansible/roles/veza_app/tasks/artifact.yml create mode 100644 infra/ansible/roles/veza_app/tasks/config_binary.yml create mode 100644 infra/ansible/roles/veza_app/tasks/config_static.yml create mode 100644 infra/ansible/roles/veza_app/tasks/container.yml create mode 100644 infra/ansible/roles/veza_app/tasks/main.yml create mode 100644 infra/ansible/roles/veza_app/tasks/os_deps.yml create mode 100644 infra/ansible/roles/veza_app/tasks/probe.yml create mode 100644 infra/ansible/roles/veza_app/vars/backend.yml create mode 100644 infra/ansible/roles/veza_app/vars/stream.yml create mode 100644 infra/ansible/roles/veza_app/vars/web.yml diff --git a/infra/ansible/roles/veza_app/README.md b/infra/ansible/roles/veza_app/README.md new file mode 100644 index 000000000..fec09820c --- /dev/null +++ b/infra/ansible/roles/veza_app/README.md @@ -0,0 +1,57 @@ +# `veza_app` role + +Generic, parameterized role that deploys ONE Veza application +component (`backend`, `stream`, or `web`) into a freshly-recreated +Incus container, then probes its health endpoint. Driven from +`playbooks/deploy_app.yml` once per component, per blue/green color +in a deploy run. + +## Why one role for three components? + +The 80% of work is the same for each: + +1. Recreate the Incus container from a profile (`incus delete --force` + then `incus launch`). +2. Apt-install OS deps. +3. Pull the release tarball from the Forgejo Package Registry, extract. +4. Render the env file from Vault-backed variables. +5. Install a systemd unit (or, for `web`, an nginx site config). +6. Start the service and probe its health endpoint. + +The 20% deltas (binary name, port, OS deps, env-file shape, kind: +binary vs static) live in `vars/.yml`. + +## Inputs + +The caller (playbook) is expected to set: + +| variable | required | meaning | +| ----------------------- | -------- | ----------------------------------------------------------------------------- | +| `veza_component` | yes | One of `backend`, `stream`, `web`. Drives `vars/.yml` lookup. | +| `veza_target_color` | yes | `blue` or `green`. The role recreates `-`. | +| `veza_release_sha` | yes | Full git SHA of the release. Names the tarball + the install dir. | +| `veza_container_prefix` | inherit | From `group_vars/.yml`. e.g. `veza-staging-` or `veza-`. | +| `veza_incus_host` | inherit | Inventory host that runs `incus exec`. | + +Other parameters fall through `defaults/main.yml` (overridable per env +in `group_vars/.yml`). + +## What the role does NOT do + +- Switch HAProxy. That's the `veza_haproxy_switch` role, run after + health probes pass for ALL three components. +- Run database migrations. Those run once per deploy in a separate + ephemeral `backend-tools` container, before any color is + recreated. See `playbooks/deploy_app.yml` Phase A. +- Touch data containers (postgres, redis, rabbitmq, minio). Those + go through `playbooks/deploy_data.yml`, with their own roles. + +## Component matrix + +| | backend (binary) | stream (binary) | web (static) | +| ------------ | ----------------- | ----------------------- | ------------------------ | +| binary | `veza-api` | `stream_server` | n/a — nginx serves dist | +| port | 8080 | 8082 | 80 | +| health path | `/api/v1/health` | `/health` | `/` | +| extra deps | postgresql-client | (libssl3 in common set) | nginx | +| service unit | yes (systemd) | yes (systemd) | no (nginx as systemd dep)| diff --git a/infra/ansible/roles/veza_app/defaults/main.yml b/infra/ansible/roles/veza_app/defaults/main.yml new file mode 100644 index 000000000..c0644482e --- /dev/null +++ b/infra/ansible/roles/veza_app/defaults/main.yml @@ -0,0 +1,43 @@ +# veza_app role defaults — the small set of knobs every component +# inherits unless overridden in group_vars/.yml or vars/.yml. +# +# Inputs ARE expected from the caller (see README.md for the required +# list); these defaults only cover values that ARE NOT environment- +# specific (paths, file modes, retry counts). +--- +# These should be set by the caller — defaulting to false so a +# misconfigured invocation fails loud instead of silently picking +# `backend`. +veza_component: "" +veza_target_color: "" +veza_release_sha: "" + +# Paths in-container. Per-SHA install dir keeps multiple releases +# coexistent for forensics: a failed deploy leaves the previous tree +# on disk, recoverable via `incus exec ... -- ls /opt/veza//`. +veza_app_install_dir: "{{ veza_install_root }}/{{ veza_component }}" +veza_app_release_dir: "{{ veza_app_install_dir }}/{{ veza_release_sha }}" +veza_app_current_link: "{{ veza_app_install_dir }}/current" + +# System user that owns the install dir + runs the systemd service. +# Per-component user prevents cross-process file leaks on a shared host. +veza_app_user: "veza-{{ veza_component }}" +veza_app_group: "veza-{{ veza_component }}" + +# Mode bits used consistently across templates. +veza_app_dir_mode: "0750" +veza_app_file_mode: "0640" +veza_app_secret_mode: "0400" +veza_app_binary_mode: "0755" + +# Container container, derived from inputs. Built once here so every +# task references the same name without re-deriving. +veza_app_container_name: "{{ veza_container_prefix }}{{ veza_component }}-{{ veza_target_color }}" + +# URL to fetch the release tarball. Computed once per task chain. +veza_app_artifact_url: "{{ veza_artifact_base_url }}/{{ veza_component }}/{{ veza_release_sha }}/veza-{{ veza_component }}-{{ veza_release_sha }}.tar.zst" + +# How long to wait for the container's network namespace to come up +# after `incus launch` before we start running tasks against it. +# Debian 13 with a small profile is ready in ~3-5s; 30s is a safety net. +veza_app_container_ready_timeout: 30 diff --git a/infra/ansible/roles/veza_app/handlers/main.yml b/infra/ansible/roles/veza_app/handlers/main.yml new file mode 100644 index 000000000..16fa28b37 --- /dev/null +++ b/infra/ansible/roles/veza_app/handlers/main.yml @@ -0,0 +1,24 @@ +# veza_app handlers. Notified by tasks under config_*.yml when an env +# file or systemd unit changes. Restart (not reload) for binary kinds +# because Go/Rust services don't honor SIGHUP. Reload for nginx so +# active connections drain. +--- +- name: Reload systemd + ansible.builtin.systemd: + daemon_reload: true + listen: "veza-app daemon-reload" + +- name: Restart binary service + ansible.builtin.systemd: + name: "{{ veza_app_service_name }}" + state: restarted + daemon_reload: true + listen: "veza-app restart" + when: veza_app_kind == 'binary' + +- name: Reload nginx + ansible.builtin.systemd: + name: nginx + state: reloaded + listen: "veza-app reload-nginx" + when: veza_app_kind == 'static' diff --git a/infra/ansible/roles/veza_app/meta/main.yml b/infra/ansible/roles/veza_app/meta/main.yml new file mode 100644 index 000000000..1d61b1118 --- /dev/null +++ b/infra/ansible/roles/veza_app/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + role_name: veza_app + author: Veza Ops + description: >- + Deploys one Veza application component (backend/stream/web) into a + freshly-recreated Incus container. Driven from playbooks/deploy_app.yml + once per component per blue/green color in a deploy run. + license: proprietary + min_ansible_version: "2.15" + platforms: + - name: Debian + versions: ["13"] + +dependencies: [] diff --git a/infra/ansible/roles/veza_app/tasks/artifact.yml b/infra/ansible/roles/veza_app/tasks/artifact.yml new file mode 100644 index 000000000..af7fb2dc7 --- /dev/null +++ b/infra/ansible/roles/veza_app/tasks/artifact.yml @@ -0,0 +1,8 @@ +# Stub — filled by the next commit. +# Will: get_url + checksum on the .tar.zst from Forgejo Registry, +# unarchive into veza_app_release_dir, atomically swap the +# `current` symlink only after a successful extraction. +--- +- name: Artifact fetch + extract (placeholder) + ansible.builtin.debug: + msg: "TODO: get_url {{ veza_app_artifact_url }} → {{ veza_app_release_dir }}" diff --git a/infra/ansible/roles/veza_app/tasks/config_binary.yml b/infra/ansible/roles/veza_app/tasks/config_binary.yml new file mode 100644 index 000000000..6bd61ee00 --- /dev/null +++ b/infra/ansible/roles/veza_app/tasks/config_binary.yml @@ -0,0 +1,8 @@ +# Stub — filled by the next commit. +# Will: render veza_app_env_template → veza_app_env_file (mode 0640), +# render veza_app_service_template → /etc/systemd/system/.service, +# install secret files from Vault, daemon-reload, enable+start the unit. +--- +- name: Binary component config (placeholder) + ansible.builtin.debug: + msg: "TODO: render env={{ veza_app_env_file }}, unit=veza-{{ veza_component }}.service" diff --git a/infra/ansible/roles/veza_app/tasks/config_static.yml b/infra/ansible/roles/veza_app/tasks/config_static.yml new file mode 100644 index 000000000..d40424eec --- /dev/null +++ b/infra/ansible/roles/veza_app/tasks/config_static.yml @@ -0,0 +1,8 @@ +# Stub — filled by the web-component commit. +# Will: render veza_app_nginx_template → veza_app_nginx_site, swap the +# `current` symlink to the new SHA's release dir, nginx -t validate, +# systemctl reload nginx. +--- +- name: Static component config (placeholder) + ansible.builtin.debug: + msg: "TODO: render nginx site at {{ veza_app_nginx_site }} pointing at {{ veza_app_release_dir }}" diff --git a/infra/ansible/roles/veza_app/tasks/container.yml b/infra/ansible/roles/veza_app/tasks/container.yml new file mode 100644 index 000000000..8504c4dc4 --- /dev/null +++ b/infra/ansible/roles/veza_app/tasks/container.yml @@ -0,0 +1,8 @@ +# Stub — filled by the next commit. +# Will: incus delete --force ; incus launch images:debian/13 +# --profile veza-app --profile veza-net; wait until +# `incus exec -- true` succeeds (container ready). +--- +- name: Container recreate (placeholder) + ansible.builtin.debug: + msg: "TODO: implement Incus delete-then-launch for {{ veza_app_container_name }}" diff --git a/infra/ansible/roles/veza_app/tasks/main.yml b/infra/ansible/roles/veza_app/tasks/main.yml new file mode 100644 index 000000000..d5a88b937 --- /dev/null +++ b/infra/ansible/roles/veza_app/tasks/main.yml @@ -0,0 +1,47 @@ +# veza_app — entry point. Loads component-specific vars, then +# orchestrates container recreate → OS deps → artifact install → +# config render → service start → health probe. +# +# Skeleton commit: this file dispatches to per-step files which are +# stubbed in this commit and filled in subsequent commits (one per +# component). Running this role today is a no-op beyond the var +# include — playbooks/deploy_app.yml is the orchestrator that +# eventually invokes the role for real. +--- +- name: Validate required inputs + ansible.builtin.assert: + that: + - veza_component in ['backend', 'stream', 'web'] + - veza_target_color in ['blue', 'green'] + - veza_release_sha | length == 40 + fail_msg: >- + veza_app role requires veza_component (backend|stream|web), + veza_target_color (blue|green), veza_release_sha (40-char git SHA). + Got: component={{ veza_component }} color={{ veza_target_color }} + sha={{ veza_release_sha }}. + quiet: true + tags: [veza_app, always] + +- name: Load component-specific vars + ansible.builtin.include_vars: "{{ veza_component }}.yml" + tags: [veza_app, always] + +- name: Recreate Incus container (delete-if-exists then launch) + ansible.builtin.include_tasks: container.yml + tags: [veza_app, container] + +- name: Install OS dependencies + ansible.builtin.include_tasks: os_deps.yml + tags: [veza_app, packages] + +- name: Fetch + extract release tarball + ansible.builtin.include_tasks: artifact.yml + tags: [veza_app, artifact] + +- name: Render component config (env file + service unit | nginx site) + ansible.builtin.include_tasks: "config_{{ veza_app_kind }}.yml" + tags: [veza_app, config] + +- name: Probe health endpoint + ansible.builtin.include_tasks: probe.yml + tags: [veza_app, probe] diff --git a/infra/ansible/roles/veza_app/tasks/os_deps.yml b/infra/ansible/roles/veza_app/tasks/os_deps.yml new file mode 100644 index 000000000..90649d092 --- /dev/null +++ b/infra/ansible/roles/veza_app/tasks/os_deps.yml @@ -0,0 +1,7 @@ +# Stub — filled by the next commit. +# Will: apt-get update + install veza_common_os_packages + +# veza_app_extra_packages inside the freshly-launched container. +--- +- name: OS deps install (placeholder) + ansible.builtin.debug: + msg: "TODO: apt install {{ veza_common_os_packages + veza_app_extra_packages }}" diff --git a/infra/ansible/roles/veza_app/tasks/probe.yml b/infra/ansible/roles/veza_app/tasks/probe.yml new file mode 100644 index 000000000..d032bf584 --- /dev/null +++ b/infra/ansible/roles/veza_app/tasks/probe.yml @@ -0,0 +1,9 @@ +# Stub — filled by the next commit. +# Will: curl http://: with retry until +# 200 or veza_healthcheck_retries × veza_healthcheck_delay_seconds +# elapses. Failure here makes the playbook fail fast — Phase E.fail +# in deploy_app.yml. +--- +- name: Health probe (placeholder) + ansible.builtin.debug: + msg: "TODO: probe http://{{ veza_app_container_name }}.{{ veza_incus_dns_suffix }}:{{ veza_app_listen_port }}{{ veza_app_health_path }}" diff --git a/infra/ansible/roles/veza_app/vars/backend.yml b/infra/ansible/roles/veza_app/vars/backend.yml new file mode 100644 index 000000000..60d0f6c1d --- /dev/null +++ b/infra/ansible/roles/veza_app/vars/backend.yml @@ -0,0 +1,39 @@ +# Backend (Go API) component vars — loaded by tasks/main.yml when +# `veza_component == 'backend'`. Higher precedence than defaults/main.yml +# so anything here wins. +--- +veza_app_kind: binary +veza_app_binary_name: veza-api +veza_app_listen_port: "{{ veza_backend_port }}" +veza_app_health_path: "{{ veza_healthcheck_paths.backend }}" + +# Per-component env file consumed by the systemd unit's +# EnvironmentFile= directive. The path lives outside install_dir so +# rolling forward to a new release SHA doesn't require re-rendering. +veza_app_env_file: "{{ veza_config_root }}/backend.env" +veza_app_env_template: backend.env.j2 +veza_app_service_name: veza-backend +veza_app_service_template: veza-backend.service.j2 + +# OS packages installed on top of veza_common_os_packages. Backend +# embeds a libpq-style postgres client to feed migrate_tool when run +# from inside this container (rare; usually migrations run from a +# dedicated tools container — but having psql lets ops recover by +# hand if the tools container is unavailable). +veza_app_extra_packages: + - postgresql-client + - libssl3 + +# Secret files rendered to disk from Vault and referenced by the env +# file via path-based env vars. Each entry is a triple (vault var +# name | absolute path | mode). The role iterates over this list, +# decoding base64 before write where the source is known to be PEM. +veza_app_secret_files: + - var: vault_jwt_signing_key_b64 + path: "{{ veza_config_root }}/secrets/jwt-private.pem" + mode: "0400" + decode: base64 + - var: vault_jwt_public_key_b64 + path: "{{ veza_config_root }}/secrets/jwt-public.pem" + mode: "0440" + decode: base64 diff --git a/infra/ansible/roles/veza_app/vars/stream.yml b/infra/ansible/roles/veza_app/vars/stream.yml new file mode 100644 index 000000000..8b7706c3b --- /dev/null +++ b/infra/ansible/roles/veza_app/vars/stream.yml @@ -0,0 +1,27 @@ +# Stream server (Rust) component vars. +--- +veza_app_kind: binary +veza_app_binary_name: stream_server +veza_app_listen_port: "{{ veza_stream_port }}" +veza_app_health_path: "{{ veza_healthcheck_paths.stream }}" + +veza_app_env_file: "{{ veza_config_root }}/stream.env" +veza_app_env_template: stream.env.j2 +veza_app_service_name: veza-stream +veza_app_service_template: veza-stream.service.j2 + +# The stream server is a self-contained musl-static binary, so the +# only OS deps are the common set + libssl for outbound TLS to MinIO. +# (libssl3 is technically already in the common set on Debian 13; +# listing it here is explicit so a future common-set trim doesn't +# break stream silently.) +veza_app_extra_packages: + - libssl3 + +# Stream's only secret is the JWT public key (used to verify access +# tokens issued by the backend). No private key — stream never signs. +veza_app_secret_files: + - var: vault_jwt_public_key_b64 + path: "{{ veza_config_root }}/secrets/jwt-public.pem" + mode: "0440" + decode: base64 diff --git a/infra/ansible/roles/veza_app/vars/web.yml b/infra/ansible/roles/veza_app/vars/web.yml new file mode 100644 index 000000000..033dc498b --- /dev/null +++ b/infra/ansible/roles/veza_app/vars/web.yml @@ -0,0 +1,33 @@ +# Frontend (React/Vite, static SPA served by nginx) component vars. +# Different shape from backend/stream: no custom binary, no env file, +# no systemd unit owned by Veza — just a tarball of static files +# extracted under nginx's docroot. +--- +veza_app_kind: static +veza_app_listen_port: "{{ veza_web_port }}" +veza_app_health_path: "{{ veza_healthcheck_paths.web }}" + +# Where the SPA's `dist/` lands. Per-SHA dir is symlinked-to by +# /var/www/veza-web/current; nginx points at the symlink so a switch +# is one symlink + one nginx -s reload (out of scope for this role — +# the role recreates the container so nginx starts fresh anyway). +veza_app_install_dir: /var/www/veza-web +veza_app_release_dir: "{{ veza_app_install_dir }}/{{ veza_release_sha }}" +veza_app_current_link: "{{ veza_app_install_dir }}/current" + +# nginx site config — render and drop into sites-enabled/. +veza_app_nginx_site: /etc/nginx/sites-enabled/veza-web.conf +veza_app_nginx_template: veza-web-nginx.conf.j2 + +# nginx is THE service for this component. We don't ship a custom +# systemd unit; we ensure nginx is enabled+started + has a clean +# config. +veza_app_service_name: nginx + +veza_app_extra_packages: + - nginx + +# Frontend has no Vault secrets at runtime — every value bakes into +# the bundle at build time via VITE_* env vars. Empty list means the +# secret-file install task is a no-op. +veza_app_secret_files: []