Compare commits
6 commits
a514f4986b
...
c323d37c30
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c323d37c30 | ||
|
|
bf24a5e3ce | ||
|
|
947630e38f | ||
|
|
6a54268476 | ||
|
|
5f6625cc56 | ||
|
|
4298f0c26a |
7 changed files with 187 additions and 6 deletions
|
|
@ -49,14 +49,19 @@ export const FEATURES = {
|
||||||
* HLS Streaming
|
* HLS Streaming
|
||||||
* Backend endpoints: /api/v1/tracks/:id/hls/info, /api/v1/tracks/:id/hls/status
|
* Backend endpoints: /api/v1/tracks/:id/hls/info, /api/v1/tracks/:id/hls/status
|
||||||
*
|
*
|
||||||
* Default is `false` to match backend `HLS_STREAMING` env (off by default).
|
* Default flipped to `true` in v1.0.10 polish to match backend
|
||||||
* When off, playback goes through `/api/v1/tracks/:id/stream` (MP3 range requests).
|
* `HLS_STREAMING=true` (Day 17 of the v1.0.9 sprint). Adaptive
|
||||||
* Enable via VITE_FEATURE_HLS_STREAMING=true in environments where the backend
|
* bitrate via HLS is the canonical playback path; MP3 range
|
||||||
* transcoder is actually running.
|
* requests via `/api/v1/tracks/:id/stream` remain a fallback when
|
||||||
|
* the browser can't play HLS or the transcoder hasn't produced
|
||||||
|
* segments yet.
|
||||||
|
*
|
||||||
|
* Set VITE_FEATURE_HLS_STREAMING=false to opt out (unit-test envs
|
||||||
|
* without a transcoder, or to bisect playback regressions).
|
||||||
*/
|
*/
|
||||||
HLS_STREAMING: parseFeatureEnv(
|
HLS_STREAMING: parseFeatureEnv(
|
||||||
import.meta.env.VITE_FEATURE_HLS_STREAMING,
|
import.meta.env.VITE_FEATURE_HLS_STREAMING,
|
||||||
false,
|
true,
|
||||||
),
|
),
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -181,6 +181,21 @@ services:
|
||||||
- AWS_ACCESS_KEY_ID=${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
|
- AWS_ACCESS_KEY_ID=${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
|
||||||
- AWS_SECRET_ACCESS_KEY=${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
|
- AWS_SECRET_ACCESS_KEY=${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
|
||||||
- AWS_REGION=${AWS_REGION:-us-east-1}
|
- AWS_REGION=${AWS_REGION:-us-east-1}
|
||||||
|
# v1.0.10 polish: enable the S3 stack and route track uploads through
|
||||||
|
# MinIO end-to-end. Without these two flags, defaults (local +
|
||||||
|
# disabled) win and the AWS_S3_* credentials above are inert. With
|
||||||
|
# blue/green active/active behind HAProxy, local-disk uploads on
|
||||||
|
# one pod are invisible to the other — S3 is required for HA.
|
||||||
|
- AWS_S3_ENABLED=true
|
||||||
|
- TRACK_STORAGE_BACKEND=s3
|
||||||
|
# WebRTC ICE servers — populated from the coturn service above.
|
||||||
|
# Empty TURN vars degrade to STUN-only (calls work peer-to-peer
|
||||||
|
# but fail behind symmetric NAT); the all-or-nothing rule in
|
||||||
|
# webrtc_config_handler.go means partial config is rejected.
|
||||||
|
- WEBRTC_STUN_URLS=stun:${WEBRTC_TURN_PUBLIC_IP:?WEBRTC_TURN_PUBLIC_IP must be set}:3478
|
||||||
|
- WEBRTC_TURN_URLS=turn:${WEBRTC_TURN_PUBLIC_IP}:3478,turns:${WEBRTC_TURN_PUBLIC_IP}:5349
|
||||||
|
- WEBRTC_TURN_USERNAME=${WEBRTC_TURN_USERNAME:?WEBRTC_TURN_USERNAME must be set}
|
||||||
|
- WEBRTC_TURN_CREDENTIAL=${WEBRTC_TURN_CREDENTIAL:?WEBRTC_TURN_CREDENTIAL must be set}
|
||||||
- HLS_STREAMING=true
|
- HLS_STREAMING=true
|
||||||
- HLS_STORAGE_DIR=/data/hls
|
- HLS_STORAGE_DIR=/data/hls
|
||||||
volumes:
|
volumes:
|
||||||
|
|
@ -236,6 +251,21 @@ services:
|
||||||
- AWS_ACCESS_KEY_ID=${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
|
- AWS_ACCESS_KEY_ID=${S3_ACCESS_KEY:?S3_ACCESS_KEY must be set}
|
||||||
- AWS_SECRET_ACCESS_KEY=${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
|
- AWS_SECRET_ACCESS_KEY=${S3_SECRET_KEY:?S3_SECRET_KEY must be set}
|
||||||
- AWS_REGION=${AWS_REGION:-us-east-1}
|
- AWS_REGION=${AWS_REGION:-us-east-1}
|
||||||
|
# v1.0.10 polish: enable the S3 stack and route track uploads through
|
||||||
|
# MinIO end-to-end. Without these two flags, defaults (local +
|
||||||
|
# disabled) win and the AWS_S3_* credentials above are inert. With
|
||||||
|
# blue/green active/active behind HAProxy, local-disk uploads on
|
||||||
|
# one pod are invisible to the other — S3 is required for HA.
|
||||||
|
- AWS_S3_ENABLED=true
|
||||||
|
- TRACK_STORAGE_BACKEND=s3
|
||||||
|
# WebRTC ICE servers — populated from the coturn service above.
|
||||||
|
# Empty TURN vars degrade to STUN-only (calls work peer-to-peer
|
||||||
|
# but fail behind symmetric NAT); the all-or-nothing rule in
|
||||||
|
# webrtc_config_handler.go means partial config is rejected.
|
||||||
|
- WEBRTC_STUN_URLS=stun:${WEBRTC_TURN_PUBLIC_IP:?WEBRTC_TURN_PUBLIC_IP must be set}:3478
|
||||||
|
- WEBRTC_TURN_URLS=turn:${WEBRTC_TURN_PUBLIC_IP}:3478,turns:${WEBRTC_TURN_PUBLIC_IP}:5349
|
||||||
|
- WEBRTC_TURN_USERNAME=${WEBRTC_TURN_USERNAME:?WEBRTC_TURN_USERNAME must be set}
|
||||||
|
- WEBRTC_TURN_CREDENTIAL=${WEBRTC_TURN_CREDENTIAL:?WEBRTC_TURN_CREDENTIAL must be set}
|
||||||
- HLS_STREAMING=true
|
- HLS_STREAMING=true
|
||||||
- HLS_STORAGE_DIR=/data/hls
|
- HLS_STORAGE_DIR=/data/hls
|
||||||
volumes:
|
volumes:
|
||||||
|
|
@ -350,6 +380,59 @@ services:
|
||||||
networks:
|
networks:
|
||||||
- veza-network
|
- veza-network
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# COTURN — TURN/STUN relay for WebRTC NAT traversal (v1.0.10 polish)
|
||||||
|
# ----------------------------------------------------------------------------
|
||||||
|
# Calls (1:1 audio/video) signal through chat WebSocket but the actual
|
||||||
|
# media stream needs a relay when both peers are behind symmetric NAT.
|
||||||
|
# Without this service, every call between users on corporate firewalls,
|
||||||
|
# mobile CGNAT or Incus default networking will silently fail with
|
||||||
|
# iceConnectionState=failed after ~30s.
|
||||||
|
#
|
||||||
|
# network_mode: host is REQUIRED — coturn allocates UDP ports in the
|
||||||
|
# 49152-65535 range for media relay, and Docker's NAT layer drops them.
|
||||||
|
# Host networking exposes the host's public IP directly, which is what
|
||||||
|
# WEBRTC_TURN_PUBLIC_IP must point at (so coturn advertises the right
|
||||||
|
# candidate to remote peers).
|
||||||
|
#
|
||||||
|
# The infra/coturn/README.md describes a parallel Incus-native deploy
|
||||||
|
# path; this compose service is the simpler dev/single-host option.
|
||||||
|
# If you run prod on multiple hosts behind a load balancer, prefer the
|
||||||
|
# Ansible/Incus path so coturn lives on a host with a stable public IP.
|
||||||
|
# ============================================================================
|
||||||
|
coturn:
|
||||||
|
image: coturn/coturn:4.6.2
|
||||||
|
container_name: veza_coturn
|
||||||
|
restart: unless-stopped
|
||||||
|
network_mode: host
|
||||||
|
command:
|
||||||
|
- "-n"
|
||||||
|
- "--listening-port=3478"
|
||||||
|
- "--tls-listening-port=5349"
|
||||||
|
- "--external-ip=${WEBRTC_TURN_PUBLIC_IP:?WEBRTC_TURN_PUBLIC_IP must be set (the public IP coturn advertises to peers)}"
|
||||||
|
- "--realm=${WEBRTC_TURN_REALM:-turn.veza.fr}"
|
||||||
|
- "--lt-cred-mech"
|
||||||
|
- "--user=${WEBRTC_TURN_USERNAME:?WEBRTC_TURN_USERNAME must be set}:${WEBRTC_TURN_CREDENTIAL:?WEBRTC_TURN_CREDENTIAL must be set}"
|
||||||
|
- "--min-port=49152"
|
||||||
|
- "--max-port=65535"
|
||||||
|
- "--no-cli"
|
||||||
|
- "--no-tlsv1"
|
||||||
|
- "--no-tlsv1_1"
|
||||||
|
- "--cert=/etc/coturn/cert.pem"
|
||||||
|
- "--pkey=/etc/coturn/key.pem"
|
||||||
|
volumes:
|
||||||
|
# Map the TLS cert dir read-only. Default points at a Let's Encrypt
|
||||||
|
# rotation managed outside this compose (certbot on the host or
|
||||||
|
# similar). Override TURN_CERT_DIR for self-signed dev certs.
|
||||||
|
- ${TURN_CERT_DIR:-/etc/letsencrypt/live/turn.veza.fr}:/etc/coturn:ro
|
||||||
|
healthcheck:
|
||||||
|
# nc -uz checks UDP/3478 is bound; doesn't validate auth but catches
|
||||||
|
# crashes / cert-load failures cleanly.
|
||||||
|
test: ["CMD-SHELL", "nc -zu localhost 3478 || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# MONITORING - Alertmanager
|
# MONITORING - Alertmanager
|
||||||
# Set SLACK_WEBHOOK_URL for Slack notifications. Works with Prometheus.
|
# Set SLACK_WEBHOOK_URL for Slack notifications. Works with Prometheus.
|
||||||
|
|
|
||||||
|
|
@ -77,6 +77,17 @@ services:
|
||||||
- AWS_ACCESS_KEY_ID=${STAGING_S3_ACCESS_KEY:?STAGING_S3_ACCESS_KEY must be set}
|
- AWS_ACCESS_KEY_ID=${STAGING_S3_ACCESS_KEY:?STAGING_S3_ACCESS_KEY must be set}
|
||||||
- AWS_SECRET_ACCESS_KEY=${STAGING_S3_SECRET_KEY:?STAGING_S3_SECRET_KEY must be set}
|
- AWS_SECRET_ACCESS_KEY=${STAGING_S3_SECRET_KEY:?STAGING_S3_SECRET_KEY must be set}
|
||||||
- AWS_REGION=us-east-1
|
- AWS_REGION=us-east-1
|
||||||
|
# v1.0.10 polish: enable the S3 stack and route track uploads through
|
||||||
|
# MinIO end-to-end. Without these two flags, defaults (local +
|
||||||
|
# disabled) win and the AWS_S3_* credentials above are inert.
|
||||||
|
- AWS_S3_ENABLED=true
|
||||||
|
- TRACK_STORAGE_BACKEND=s3
|
||||||
|
# WebRTC ICE — STUN-only by default in staging (no public TURN
|
||||||
|
# box). Set the WEBRTC_TURN_* envs externally to flip to relay.
|
||||||
|
- WEBRTC_STUN_URLS=${WEBRTC_STUN_URLS:-stun:stun.l.google.com:19302}
|
||||||
|
- WEBRTC_TURN_URLS=${WEBRTC_TURN_URLS:-}
|
||||||
|
- WEBRTC_TURN_USERNAME=${WEBRTC_TURN_USERNAME:-}
|
||||||
|
- WEBRTC_TURN_CREDENTIAL=${WEBRTC_TURN_CREDENTIAL:-}
|
||||||
- HLS_STREAMING=true
|
- HLS_STREAMING=true
|
||||||
- HLS_STORAGE_DIR=/data/hls
|
- HLS_STORAGE_DIR=/data/hls
|
||||||
volumes:
|
volumes:
|
||||||
|
|
|
||||||
|
|
@ -29,6 +29,7 @@ all:
|
||||||
forgejo-runner:
|
forgejo-runner:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
# SHARED edge — one HAProxy on the R720 public 443. Serves
|
# SHARED edge — one HAProxy on the R720 public 443. Serves
|
||||||
# staging + prod + forgejo.talas.group simultaneously. Same
|
# staging + prod + forgejo.talas.group simultaneously. Same
|
||||||
|
|
@ -38,6 +39,7 @@ all:
|
||||||
veza-haproxy:
|
veza-haproxy:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
veza_app_backend:
|
veza_app_backend:
|
||||||
children:
|
children:
|
||||||
|
|
@ -46,6 +48,7 @@ all:
|
||||||
veza_app_backend_tools:
|
veza_app_backend_tools:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
veza_app_backend_blue:
|
veza_app_backend_blue:
|
||||||
hosts:
|
hosts:
|
||||||
|
|
@ -62,6 +65,7 @@ all:
|
||||||
veza_app_stream_green:
|
veza_app_stream_green:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
veza_app_stream_blue:
|
veza_app_stream_blue:
|
||||||
hosts:
|
hosts:
|
||||||
|
|
@ -75,6 +79,7 @@ all:
|
||||||
veza_app_web_green:
|
veza_app_web_green:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
veza_app_web_blue:
|
veza_app_web_blue:
|
||||||
hosts:
|
hosts:
|
||||||
|
|
@ -90,6 +95,7 @@ all:
|
||||||
veza_data_minio:
|
veza_data_minio:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
veza_data_postgres:
|
veza_data_postgres:
|
||||||
hosts:
|
hosts:
|
||||||
|
|
|
||||||
|
|
@ -47,6 +47,7 @@ all:
|
||||||
forgejo-runner:
|
forgejo-runner:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
# SHARED edge — one HAProxy on the R720 public 443. Serves
|
# SHARED edge — one HAProxy on the R720 public 443. Serves
|
||||||
# staging + prod + forgejo.talas.group simultaneously, Host-based
|
# staging + prod + forgejo.talas.group simultaneously, Host-based
|
||||||
|
|
@ -58,6 +59,7 @@ all:
|
||||||
veza-haproxy:
|
veza-haproxy:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
# The 6 app containers + 1 ephemeral tools container. deploy_app.yml
|
# The 6 app containers + 1 ephemeral tools container. deploy_app.yml
|
||||||
# selects the inactive color dynamically from the haproxy
|
# selects the inactive color dynamically from the haproxy
|
||||||
|
|
@ -70,6 +72,7 @@ all:
|
||||||
veza_app_backend_tools:
|
veza_app_backend_tools:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
veza_app_backend_blue:
|
veza_app_backend_blue:
|
||||||
hosts:
|
hosts:
|
||||||
|
|
@ -86,6 +89,7 @@ all:
|
||||||
veza_app_stream_green:
|
veza_app_stream_green:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
veza_app_stream_blue:
|
veza_app_stream_blue:
|
||||||
hosts:
|
hosts:
|
||||||
|
|
@ -99,6 +103,7 @@ all:
|
||||||
veza_app_web_green:
|
veza_app_web_green:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
veza_app_web_blue:
|
veza_app_web_blue:
|
||||||
hosts:
|
hosts:
|
||||||
|
|
@ -116,6 +121,7 @@ all:
|
||||||
veza_data_minio:
|
veza_data_minio:
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: community.general.incus
|
ansible_connection: community.general.incus
|
||||||
|
ansible_incus_remote: "{{ veza_incus_remote_name | default('srv-102v') }}"
|
||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
veza_data_postgres:
|
veza_data_postgres:
|
||||||
hosts:
|
hosts:
|
||||||
|
|
|
||||||
|
|
@ -54,7 +54,41 @@
|
||||||
become: true
|
become: true
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
tasks:
|
tasks:
|
||||||
- name: Ensure veza-{app,data} profiles exist (empty by default)
|
- name: Detect Incus storage pool actually used by forgejo
|
||||||
|
# Containers need a root disk device that references a storage pool.
|
||||||
|
# The host may have multiple pools, some of which are stale or
|
||||||
|
# unavailable. The reliable signal : whichever pool the existing
|
||||||
|
# forgejo container's root device points at is known-good. Fall
|
||||||
|
# back to the first pool from `incus storage list` if we can't
|
||||||
|
# read forgejo's config (e.g. fresh host without forgejo yet).
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
forgejo_pool=$(incus config device get forgejo root pool 2>/dev/null \
|
||||||
|
|| incus config device get forgejo eth0 pool 2>/dev/null \
|
||||||
|
|| true)
|
||||||
|
if [ -n "$forgejo_pool" ] && [ "$forgejo_pool" != "None" ]; then
|
||||||
|
echo "$forgejo_pool"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
# No forgejo or no pool on its root → expand profile inheritance.
|
||||||
|
# `incus config show forgejo --expanded` includes inherited devices.
|
||||||
|
forgejo_pool=$(incus config show forgejo --expanded 2>/dev/null \
|
||||||
|
| awk '/^ root:/{flag=1} flag && /^ pool:/{print $2; exit}' \
|
||||||
|
|| true)
|
||||||
|
if [ -n "$forgejo_pool" ]; then
|
||||||
|
echo "$forgejo_pool"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
# Last resort : first pool from `incus storage list`.
|
||||||
|
incus storage list -f csv 2>/dev/null | awk -F, 'NR==1{print $1; exit}'
|
||||||
|
register: storage_pool
|
||||||
|
changed_when: false
|
||||||
|
failed_when: storage_pool.stdout | trim == ""
|
||||||
|
|
||||||
|
- name: Show detected storage pool
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Storage pool : {{ storage_pool.stdout | trim }}"
|
||||||
|
|
||||||
|
- name: Ensure veza-{app,data} profiles exist
|
||||||
ansible.builtin.command: incus profile create {{ item }}
|
ansible.builtin.command: incus profile create {{ item }}
|
||||||
register: profile_create
|
register: profile_create
|
||||||
failed_when: profile_create.rc != 0 and 'already exists' not in profile_create.stderr
|
failed_when: profile_create.rc != 0 and 'already exists' not in profile_create.stderr
|
||||||
|
|
@ -63,6 +97,31 @@
|
||||||
- veza-app
|
- veza-app
|
||||||
- veza-data
|
- veza-data
|
||||||
|
|
||||||
|
- name: Ensure each profile's root disk points at pool={{ storage_pool.stdout | trim }}
|
||||||
|
# If a root device already exists but on the WRONG pool (e.g. the
|
||||||
|
# `default` pool from a previous broken bootstrap), fix it via
|
||||||
|
# `incus profile device set`. Else add fresh.
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
POOL="{{ storage_pool.stdout | trim }}"
|
||||||
|
existing=$(incus profile device get {{ item }} root pool 2>/dev/null || true)
|
||||||
|
if [ "$existing" = "$POOL" ]; then
|
||||||
|
echo "root device on $POOL already"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
if [ -n "$existing" ]; then
|
||||||
|
# Device exists with wrong pool — correct it.
|
||||||
|
incus profile device set {{ item }} root pool "$POOL"
|
||||||
|
echo "root device repointed to $POOL"
|
||||||
|
else
|
||||||
|
incus profile device add {{ item }} root disk path=/ pool="$POOL"
|
||||||
|
echo "root device added on $POOL"
|
||||||
|
fi
|
||||||
|
register: profile_root
|
||||||
|
changed_when: "'already' not in profile_root.stdout"
|
||||||
|
loop:
|
||||||
|
- veza-app
|
||||||
|
- veza-data
|
||||||
|
|
||||||
- name: Detect legacy empty veza-net profile
|
- name: Detect legacy empty veza-net profile
|
||||||
ansible.builtin.command: incus profile show veza-net
|
ansible.builtin.command: incus profile show veza-net
|
||||||
register: vnet_show
|
register: vnet_show
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,17 @@
|
||||||
R720_HOST=srv-102v
|
R720_HOST=srv-102v
|
||||||
R720_USER=senke
|
R720_USER=senke
|
||||||
|
|
||||||
|
# ---- Incus remote (laptop-side) ----------------------------------------------
|
||||||
|
# Name of the incus remote on YOUR LAPTOP that points at the R720's
|
||||||
|
# Incus daemon. Run `incus remote list` to confirm. The
|
||||||
|
# community.general.incus connection plugin uses this remote to reach
|
||||||
|
# containers via the R720's Incus API (TLS authenticated).
|
||||||
|
# Set up once with :
|
||||||
|
# incus remote add <name> https://<R720_IP>:8443 --token <TRUST_TOKEN>
|
||||||
|
# Override default by exporting VEZA_INCUS_REMOTE_NAME in your shell
|
||||||
|
# or appending here.
|
||||||
|
# VEZA_INCUS_REMOTE_NAME=srv-102v
|
||||||
|
|
||||||
# ---- Forgejo API (for secret + variable provisioning) ------------------------
|
# ---- Forgejo API (for secret + variable provisioning) ------------------------
|
||||||
# First-run, before HAProxy + LE certs are up : use the LAN IP on port 3000
|
# First-run, before HAProxy + LE certs are up : use the LAN IP on port 3000
|
||||||
# directly. Forgejo serves a self-signed cert there, so set FORGEJO_INSECURE=1
|
# directly. Forgejo serves a self-signed cert there, so set FORGEJO_INSECURE=1
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue