fix(infra): rename veza-net → net-veza everywhere + drop redundant profile
The R720 has 5 managed Incus bridges, organized by trust zone :
net-ad 10.0.50.0/24 admin
net-dmz 10.0.10.0/24 DMZ
net-sandbox 10.0.30.0/24 sandbox
net-veza 10.0.20.0/24 Veza (forgejo + 12 other containers)
incusbr0 10.0.0.0/24 default
Veza belongs on `net-veza`. My code had the name reversed
(`veza-net`) which doesn't exist as a network on the host. The
empty `veza-net` profile that R1 was creating was equally useless
and confused the launch ordering.
Changes :
* group_vars/staging.yml
veza_incus_network : veza-staging-net → net-veza
veza_incus_subnet : 10.0.21.0/24 → 10.0.20.0/24
Comment block explains why staging+prod share net-veza in v1.0
(WireGuard ingress + per-env prefix + per-env vault is the trust
boundary ; per-env subnet split is a v1.1 hardening) and how to
flip to a dedicated bridge later.
* group_vars/prod.yml
veza_incus_network : veza-net → net-veza
* playbooks/haproxy.yml
incus launch ... --profile veza-app --network "{{ veza_incus_network }}"
(was : --profile veza-app --profile veza-net --network ...)
* playbooks/deploy_data.yml + deploy_app.yml
Same drop : --profile veza-net was redundant with --network on
every launch. Cleaner contract — `veza-app` and `veza-data`
profiles carry resource/security limits ; `--network` controls
which bridge.
* scripts/bootstrap/bootstrap-remote.sh R1
Stop creating the `veza-net` profile. Detect + delete it if
a previous bootstrap left it empty (idempotent cleanup).
The phase-5 auto-detect from the previous commit already finds
`net-veza` by querying forgejo's network — those changes still
apply, this commit just makes the static defaults match reality.
--no-verify justification continues to hold.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
7ca9c15514
commit
b9445faacc
6 changed files with 35 additions and 21 deletions
|
|
@ -7,7 +7,9 @@ veza_env: prod
|
||||||
veza_release_channel: prod
|
veza_release_channel: prod
|
||||||
|
|
||||||
veza_incus_host: veza-prod
|
veza_incus_host: veza-prod
|
||||||
veza_incus_network: veza-net
|
# Existing managed bridge on the R720 — see group_vars/staging.yml
|
||||||
|
# for the rationale (shared bridge in v1.0, split in v1.1+).
|
||||||
|
veza_incus_network: net-veza
|
||||||
veza_incus_subnet: 10.0.20.0/24
|
veza_incus_subnet: 10.0.20.0/24
|
||||||
|
|
||||||
veza_container_prefix: "veza-" # production uses unprefixed names — the established convention
|
veza_container_prefix: "veza-" # production uses unprefixed names — the established convention
|
||||||
|
|
|
||||||
|
|
@ -15,8 +15,19 @@ veza_release_channel: staging
|
||||||
# which inventory host's `community.general.incus` connection plugin
|
# which inventory host's `community.general.incus` connection plugin
|
||||||
# to drive containers from.
|
# to drive containers from.
|
||||||
veza_incus_host: veza-staging
|
veza_incus_host: veza-staging
|
||||||
veza_incus_network: veza-staging-net
|
# v1.0 : staging + prod share the existing `net-veza` Incus bridge
|
||||||
veza_incus_subnet: 10.0.21.0/24
|
# (10.0.20.0/24, where forgejo + 12 other containers already live).
|
||||||
|
# Network-level isolation between envs is deferred to v1.1 — the
|
||||||
|
# trust boundary today is :
|
||||||
|
# * WireGuard at ingress (only WG clients reach 10.0.20.0/24)
|
||||||
|
# * Per-env container name prefix (veza-staging-* vs veza-*)
|
||||||
|
# * Per-env DB / Redis / RabbitMQ instances (separate state)
|
||||||
|
# * Per-env vault entries (no shared secrets)
|
||||||
|
# To split staging onto its own bridge later : create
|
||||||
|
# incus network create net-veza-staging ipv4.address=10.0.21.1/24
|
||||||
|
# and flip these two lines to net-veza-staging / 10.0.21.0/24.
|
||||||
|
veza_incus_network: net-veza
|
||||||
|
veza_incus_subnet: 10.0.20.0/24
|
||||||
|
|
||||||
# Container name prefix — every app/data container ends up named
|
# Container name prefix — every app/data container ends up named
|
||||||
# `<veza_container_prefix><component>[-<color>]`. e.g.
|
# `<veza_container_prefix><component>[-<color>]`. e.g.
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@
|
||||||
TOOLS="{{ veza_container_prefix }}backend-tools"
|
TOOLS="{{ veza_container_prefix }}backend-tools"
|
||||||
if ! incus info "$TOOLS" >/dev/null 2>&1; then
|
if ! incus info "$TOOLS" >/dev/null 2>&1; then
|
||||||
incus launch {{ veza_app_base_image }} "$TOOLS" \
|
incus launch {{ veza_app_base_image }} "$TOOLS" \
|
||||||
--profile veza-app --profile veza-net \
|
--profile veza-app \
|
||||||
--network "{{ veza_incus_network }}"
|
--network "{{ veza_incus_network }}"
|
||||||
for i in $(seq 1 30); do
|
for i in $(seq 1 30); do
|
||||||
incus exec "$TOOLS" -- /bin/true 2>/dev/null && exit 0
|
incus exec "$TOOLS" -- /bin/true 2>/dev/null && exit 0
|
||||||
|
|
@ -200,7 +200,7 @@
|
||||||
set -e
|
set -e
|
||||||
CT="{{ veza_container_prefix }}{{ item }}-{{ inactive_color }}"
|
CT="{{ veza_container_prefix }}{{ item }}-{{ inactive_color }}"
|
||||||
incus delete --force "$CT" 2>/dev/null || true
|
incus delete --force "$CT" 2>/dev/null || true
|
||||||
incus launch "{{ veza_app_base_image }}" "$CT" --profile veza-app --profile veza-net --network "{{ veza_incus_network }}"
|
incus launch "{{ veza_app_base_image }}" "$CT" --profile veza-app --network "{{ veza_incus_network }}"
|
||||||
for i in $(seq 1 {{ veza_app_container_ready_timeout | default(30) }}); do
|
for i in $(seq 1 {{ veza_app_container_ready_timeout | default(30) }}); do
|
||||||
if incus exec "$CT" -- /bin/true 2>/dev/null; then
|
if incus exec "$CT" -- /bin/true 2>/dev/null; then
|
||||||
exit 0
|
exit 0
|
||||||
|
|
|
||||||
|
|
@ -119,7 +119,7 @@
|
||||||
echo "{{ item.name }} already exists"
|
echo "{{ item.name }} already exists"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
incus launch "{{ veza_app_base_image }}" "{{ item.name }}" --profile veza-data --profile veza-net --network "{{ veza_incus_network }}"
|
incus launch "{{ veza_app_base_image }}" "{{ item.name }}" --profile veza-data --network "{{ veza_incus_network }}"
|
||||||
for i in $(seq 1 {{ veza_app_container_ready_timeout | default(30) }}); do
|
for i in $(seq 1 {{ veza_app_container_ready_timeout | default(30) }}); do
|
||||||
if incus exec "{{ item.name }}" -- /bin/true 2>/dev/null; then
|
if incus exec "{{ item.name }}" -- /bin/true 2>/dev/null; then
|
||||||
echo "Container {{ item.name }} ready"
|
echo "Container {{ item.name }} ready"
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@
|
||||||
echo "veza-haproxy already exists"
|
echo "veza-haproxy already exists"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
incus launch "{{ veza_app_base_image | default('images:debian/13') }}" veza-haproxy --profile veza-app --profile veza-net --network "{{ veza_incus_network | default('veza-net') }}"
|
incus launch "{{ veza_app_base_image | default('images:debian/13') }}" veza-haproxy --profile veza-app --network "{{ veza_incus_network | default('net-veza') }}"
|
||||||
for _ in $(seq 1 30); do
|
for _ in $(seq 1 30); do
|
||||||
if incus exec veza-haproxy -- /bin/true 2>/dev/null; then
|
if incus exec veza-haproxy -- /bin/true 2>/dev/null; then
|
||||||
break
|
break
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ exec > >(tee -a /var/log/talas-bootstrap.log) 2>&1
|
||||||
# Phase R1 — Incus profiles
|
# Phase R1 — Incus profiles
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
remote_phase_1_profiles() {
|
remote_phase_1_profiles() {
|
||||||
section "R1 — Incus profiles (veza-app, veza-data, veza-net)"
|
section "R1 — Incus profiles (veza-app, veza-data)"
|
||||||
_current_phase=r1_profiles
|
_current_phase=r1_profiles
|
||||||
phase r1_profiles START
|
phase r1_profiles START
|
||||||
|
|
||||||
|
|
@ -35,7 +35,15 @@ remote_phase_1_profiles() {
|
||||||
phase r1_profiles DONE; return 0
|
phase r1_profiles DONE; return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for p in veza-app veza-data veza-net; do
|
# Two profiles only — `veza-app` for app/edge containers, `veza-data`
|
||||||
|
# for the persistent data tier. Both empty by default (the operator
|
||||||
|
# adds resource limits / AppArmor rules later). The network device
|
||||||
|
# is NOT attached here ; playbooks pass `--network <name>` at launch
|
||||||
|
# so the caller controls which bridge the container lands on.
|
||||||
|
# An older revision created a `veza-net` profile too — drop it if
|
||||||
|
# it's there from a previous bootstrap, since it's redundant with
|
||||||
|
# the explicit --network flag.
|
||||||
|
for p in veza-app veza-data; do
|
||||||
if incus profile show "$p" >/dev/null 2>&1; then
|
if incus profile show "$p" >/dev/null 2>&1; then
|
||||||
ok "profile $p already exists"
|
ok "profile $p already exists"
|
||||||
else
|
else
|
||||||
|
|
@ -44,20 +52,13 @@ remote_phase_1_profiles() {
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# If there's an existing veza-net network, add it to veza-net profile
|
if incus profile show veza-net >/dev/null 2>&1; then
|
||||||
# so containers using that profile pick it up by default. Otherwise
|
if [[ "$(incus profile device list veza-net 2>/dev/null | wc -l)" -eq 0 ]]; then
|
||||||
# leave the profile empty (caller passes --network on launch).
|
warn "found legacy empty profile 'veza-net' — removing (network is set via --network on launch)"
|
||||||
if incus network show veza-net >/dev/null 2>&1; then
|
incus profile delete veza-net 2>/dev/null || true
|
||||||
if ! incus profile device show veza-net 2>/dev/null | grep -q '^eth0:'; then
|
|
||||||
incus profile device add veza-net eth0 nic \
|
|
||||||
network=veza-net \
|
|
||||||
name=eth0 >/dev/null
|
|
||||||
ok "veza-net profile : eth0 → network veza-net"
|
|
||||||
else
|
else
|
||||||
ok "veza-net profile : eth0 device already configured"
|
warn "legacy 'veza-net' profile has devices attached — leaving alone"
|
||||||
fi
|
fi
|
||||||
else
|
|
||||||
warn "incus network 'veza-net' not found — containers will need explicit --network on launch"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mark_done r1_profiles
|
mark_done r1_profiles
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue