Compare commits

..

No commits in common. "f615a50c42cb5816830d206a8fdbeee33b8f809d" and "5e1e2bd720c2c2120203db501363527f4af3ae72" have entirely different histories.

21 changed files with 220 additions and 703 deletions

View file

@ -32,7 +32,7 @@ export const AdminAuditLogsView: React.FC = () => {
try {
const data = await adminService.getAuditLogs({ page, limit: 20 });
setLogs(data.logs || []);
setTotal(data.pagination?.total || 0);
setTotal(data.pagination?.total_items || 0);
} catch (e) {
logger.error('Failed to fetch audit logs', { error: e });
} finally {

View file

@ -7,27 +7,31 @@ import { BanUserModal } from './modals/BanUserModal';
import { User } from '../../types';
import { Search, Shield, Activity, Users, Download, UserPlus, Loader2 } from 'lucide-react';
import { useToast } from '../../components/feedback/ToastProvider';
import { useGetUsers } from '@/services/generated/user/user';
import { userService } from '../../services/userService';
import { logger } from '@/utils/logger';
export const AdminUsersView: React.FC = () => {
const { addToast } = useToast();
const [search, setSearch] = useState('');
const [selectedUser, setSelectedUser] = useState<User | null>(null);
const [users, setUsers] = useState<User[]>([]);
// Use generated hook. The orval-generated response type wraps in a
// {data, status, headers} discriminated union, but the apiClient response
// interceptor (services/api/interceptors/response.ts) unwraps the
// {success, data} envelope before the mutator returns. So at runtime
// `usersData` IS the payload — cast accordingly.
const { data: usersData, isLoading: loading } = useGetUsers();
const [loading, setLoading] = useState(true);
const [selectedUser, setSelectedUser] = useState<User | null>(null);
useEffect(() => {
const payload = usersData as unknown as { users?: User[] } | undefined;
if (payload?.users) {
setUsers(payload.users);
}
}, [usersData]);
const loadUsers = async () => {
setLoading(true);
try {
const res = await userService.list();
setUsers(res.users);
} catch (e) {
logger.error('Failed to load users', { error: e });
addToast('Failed to load users', 'error');
} finally {
setLoading(false);
}
};
loadUsers();
}, []);
const handleBan = (duration: string) => {
if (!selectedUser) return;

View file

@ -1,6 +1,6 @@
import React, { useState, useCallback } from 'react';
import React, { useState, useEffect, useCallback } from 'react';
import { X, Info, AlertTriangle, AlertCircle } from 'lucide-react';
import { useGetApiV1AnnouncementsActive } from '@/services/generated/admin/admin';
import { adminService } from '@/services/adminService';
import { cn } from '@/lib/utils';
interface Announcement {
@ -36,15 +36,13 @@ const typeConfig: Record<string, { icon: React.ElementType; className: string }>
};
export function AnnouncementBanner() {
const [announcements, setAnnouncements] = useState<Announcement[]>([]);
const [dismissed, setDismissed] = useState<Set<string>>(loadDismissed);
const [showAll, setShowAll] = useState(false);
// Use generated hook. apiClient response interceptor unwraps the
// {success, data} envelope, so at runtime announcementsData is the
// payload directly — see services/api/interceptors/response.ts.
const { data: announcementsData } = useGetApiV1AnnouncementsActive();
const payload = announcementsData as unknown as { announcements?: Announcement[] } | undefined;
const announcements: Announcement[] = payload?.announcements ?? [];
useEffect(() => {
adminService.getActiveAnnouncements().then(setAnnouncements).catch(() => {});
}, []);
const dismiss = useCallback((id: string) => {
setDismissed((prev) => {
@ -54,7 +52,7 @@ export function AnnouncementBanner() {
});
}, []);
const visible = announcements.filter((a: Announcement) => !dismissed.has(a.id));
const visible = announcements.filter((a) => !dismissed.has(a.id));
if (visible.length === 0) return null;
const shown = showAll ? visible : visible.slice(0, 1);
@ -62,7 +60,7 @@ export function AnnouncementBanner() {
return (
<div className="space-y-2 px-4 pt-2">
{shown.map((a: Announcement) => {
{shown.map((a) => {
const config = typeConfig[a.type] ?? defaultConfig;
const Icon = config.icon;
return (

View file

@ -19,20 +19,8 @@ interface PlaylistDetailViewProps {
onBack: () => void;
}
/**
* Extended Playlist type for UI-specific fields used in this view
*/
interface ExtendedPlaylist extends Playlist {
creator: string;
userId: string;
likes: number;
isCollaborative: boolean;
duration: string;
followers: number;
}
// Mock Data Fetcher
const getPlaylistById = (id: string): ExtendedPlaylist => ({
const getPlaylistById = (id: string): any => ({
id,
title: 'Cyberpunk 2077 Vibes',
creator: 'Cyber_Producer',
@ -47,31 +35,16 @@ const getPlaylistById = (id: string): ExtendedPlaylist => ({
isCollaborative: false,
duration: '45 min',
followers: 850,
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
user_id: 'u1',
tracks: Array.from({ length: 12 }).map((_, i) => ({
id: `t${i}`,
title: `Neon Track ${i + 1}`,
artist: 'Various Artists',
album: 'Compilation',
cover_url: '',
cover_art_path: '',
duration: '3:45',
durationSec: 225,
plays: 1000 + i * 100,
likes: 50 + i,
creator_id: 'u1',
file_path: '',
file_size: 0,
format: 'mp3',
play_count: 1000 + i * 100,
like_count: 50 + i,
is_public: true,
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
status: 'completed',
stream_status: 'ready',
})),
});
@ -81,14 +54,14 @@ export const PlaylistDetailView: React.FC<PlaylistDetailViewProps> = ({
}) => {
const { addToast } = useToast();
const { playTrack } = useAudio();
const [playlist, setPlaylist] = useState<ExtendedPlaylist>(getPlaylistById(playlistId));
const [playlist, setPlaylist] = useState<any>(getPlaylistById(playlistId));
const [isEditing, setIsEditing] = useState(false);
const [tracks, setTracks] = useState<Track[]>(playlist.tracks || []);
const [draggedIndex, setDraggedIndex] = useState<number | null>(null);
const [dragOverIndex, setDragOverIndex] = useState<number | null>(null);
const handleUpdate = (data: Partial<Playlist>) => {
setPlaylist((prev) => ({ ...prev, ...data }));
setPlaylist((prev: any) => ({ ...prev, ...data }));
addToast('Playlist updated', 'success');
};
@ -133,7 +106,7 @@ export const PlaylistDetailView: React.FC<PlaylistDetailViewProps> = ({
<div className="flex flex-col md:flex-row gap-8 items-end mb-8 p-8 bg-card/40 rounded-2xl border-t border-border">
<div className="w-52 h-52 shadow-2xl shadow-sm rounded-lg overflow-hidden flex-shrink-0 group relative">
<OptimizedImage
src={playlist.cover_url ?? ''}
src={playlist.cover_url}
alt={playlist.title || 'Playlist cover'}
className="w-full h-full object-cover"
/>

View file

@ -1,4 +1,4 @@
import React, { useEffect } from 'react';
import React, { useEffect, useCallback } from 'react';
import { Card } from '../../ui/card';
import { Button } from '../../ui/button';
import { useTheme } from '../../../components/theme/ThemeProvider';
@ -15,8 +15,7 @@ import {
import { useToast } from '../../../components/feedback/ToastProvider';
import { Switch } from '../../ui/switch';
import { useAuth } from '@/features/auth/hooks/useAuth';
import { useGetUsersMePreferences, usePutUsersMePreferences } from '@/services/generated/user/user';
import type { VezaBackendApiInternalTypesPreferenceSettings } from '@/services/generated/model/vezaBackendApiInternalTypesPreferenceSettings';
import { userService } from '@/services/userService';
import { usePWA } from '@/hooks/usePWA';
import { Download } from 'lucide-react';
@ -56,20 +55,10 @@ export const AppearanceSettingsView: React.FC = () => {
const { canInstall, install, isInstalling } = usePWA();
const [showSidebar, setShowSidebar] = React.useState(true);
// Use generated hooks. apiClient response interceptor unwraps the
// {success, data} envelope so at runtime prefData IS the payload —
// see services/api/interceptors/response.ts.
const { data: prefData } = useGetUsersMePreferences({
query: {
enabled: isAuthenticated,
}
});
const updatePrefsMutation = usePutUsersMePreferences();
useEffect(() => {
const prefs = prefData as unknown as VezaBackendApiInternalTypesPreferenceSettings | undefined;
if (prefs) {
const loadPreferences = useCallback(async () => {
if (!isAuthenticated) return;
try {
const prefs = await userService.getPreferences();
const themeVal = prefs.theme as 'dark' | 'light' | 'system';
if (themeVal && ['dark', 'light', 'system'].includes(themeVal)) {
setTheme(themeVal);
@ -81,20 +70,24 @@ export const AppearanceSettingsView: React.FC = () => {
}
setAccentHue(prefs.accentHue ?? 220);
setFontSize(Math.min(20, Math.max(14, prefs.fontSize ?? 16)));
} catch {
/* ignore, use local state */
}
}, [prefData, setTheme, setContrast, setDensity, setAccentHue, setFontSize]);
}, [isAuthenticated, setTheme, setContrast, setDensity, setAccentHue, setFontSize]);
useEffect(() => {
loadPreferences();
}, [loadPreferences]);
const handleSave = async () => {
if (isAuthenticated) {
try {
await updatePrefsMutation.mutateAsync({
data: {
theme,
contrast,
density,
accentHue,
fontSize,
}
await userService.updatePreferences({
theme,
contrast,
density,
accentHue,
fontSize,
});
addToast('Appearance settings saved', 'success');
} catch {

View file

@ -1,7 +1,8 @@
import { useState, useEffect, useCallback } from 'react';
import { useAuth } from '@/features/auth/hooks/useAuth';
import { useToast } from '@/components/feedback/ToastProvider';
import { useGetUsersId, usePutUsersId } from '@/services/generated/user/user';
import { userService } from '@/services/userService';
import { logger } from '@/utils/logger';
import { getCroppedImg } from './cropUtils';
import type { EditProfileFormData, PixelCrop } from './types';
@ -27,53 +28,49 @@ export function useEditProfile() {
const [loading, setLoading] = useState(false);
const [formData, setFormData] = useState<EditProfileFormData>(initialFormData);
// Use generated hooks
const { data: profileData } = useGetUsersId(user?.id || '', {
query: {
enabled: !!user?.id,
}
});
const updateProfileMutation = usePutUsersId();
useEffect(() => {
// apiClient response interceptor unwraps the {success, data} envelope,
// so at runtime profileData is the payload directly. See
// services/api/interceptors/response.ts.
const payload = profileData as unknown as { profile?: Record<string, unknown> } | undefined;
const p = payload?.profile as Record<string, string | undefined> | undefined;
if (p) {
setFormData({
username: p.username || '',
first_name: p.first_name || '',
last_name: p.last_name || '',
bio: p.bio || '',
banner_url: p.banner_url ?? p.banner ?? '',
location: p.location || '',
gender: p.gender || 'Prefer not to say',
birthdate: p.birthdate || '',
});
if (p.avatar_url) setAvatar(p.avatar_url);
const bannerUrl = p.banner_url ?? p.banner;
if (bannerUrl) setBanner(bannerUrl);
}
}, [profileData]);
const fetchProfile = async () => {
if (!user) return;
try {
const res = await userService.getProfile(user.id);
const p = res.profile;
setFormData({
username: p.username || '',
first_name: p.first_name || '',
last_name: p.last_name || '',
bio: p.bio || '',
banner_url: (p as { banner_url?: string }).banner_url ?? (p as { banner?: string }).banner ?? '',
location: p.location || '',
gender: p.gender || 'Prefer not to say',
birthdate: p.birthdate || '',
});
if (p.avatar_url) setAvatar(p.avatar_url);
const bannerUrl = (p as { banner_url?: string }).banner_url ?? (p as { banner?: string }).banner;
if (bannerUrl) setBanner(bannerUrl);
} catch (e) {
logger.error('Failed to load profile settings', {
error: e instanceof Error ? e.message : String(e),
stack: e instanceof Error ? e.stack : undefined,
userId: user?.id,
});
addToast('Failed to load profile settings', 'error');
}
};
fetchProfile();
}, [user, addToast]);
const handleSave = useCallback(async () => {
if (!user) return;
setLoading(true);
try {
await updateProfileMutation.mutateAsync({
id: user.id,
data: formData as any,
});
await userService.updateProfile(user.id, formData);
addToast('Profile updated successfully', 'success');
} catch (e) {
addToast('Failed to update profile', 'error');
} finally {
setLoading(false);
}
}, [user, formData, addToast, updateProfileMutation]);
}, [user, formData, addToast]);
const handleFileChange = useCallback(
(e: React.ChangeEvent<HTMLInputElement>, type: 'avatar' | 'banner') => {

View file

@ -7,11 +7,7 @@ roles_path = ./roles
host_key_checking = False
retry_files_enabled = False
forks = 10
# YAML-formatted output via the default callback (community.general's
# `yaml` callback was removed in 12.0.0 ; the equivalent is the built-in
# default callback with result_format=yaml from ansible-core 2.13+).
stdout_callback = default
result_format = yaml
stdout_callback = yaml
# v1.0.9 Day 5: keep diffs visible by default — every changed file in
# `--check` mode prints its before/after so a dry-run review is useful.
nocows = 1

View file

@ -14,10 +14,8 @@
all:
hosts:
veza-prod:
# Same R720 as staging at v1.0 — separate Incus network keeps
# blast radius contained. Move to a dedicated host post-v1.1.
ansible_host: srv-102v
ansible_user: senke
ansible_host: 10.0.20.150
ansible_user: ansible
ansible_python_interpreter: /usr/bin/python3
children:
incus_hosts:

View file

@ -30,10 +30,8 @@
all:
hosts:
veza-staging:
# SSH config alias `srv-102v` resolves to the operator's R720 host.
# Override per-operator in host_vars/ if your alias differs.
ansible_host: srv-102v
ansible_user: senke
ansible_host: 10.0.20.150
ansible_user: ansible
ansible_python_interpreter: /usr/bin/python3
children:
incus_hosts:

View file

@ -2,36 +2,18 @@
# pick it up automatically.
#
# cp .env.example .env
# vim .env # NB: $EDITOR is unset by default in many shells
# ↑ use the editor name directly
# $EDITOR .env
# ---- R720 SSH target ---------------------------------------------------------
# If you use an SSH config Host alias (e.g. `srv-102v` in ~/.ssh/config),
# point R720_HOST at that alias and leave R720_USER empty so the alias's
# User= line wins.
R720_HOST=srv-102v
R720_USER=senke
R720_HOST=10.0.20.150
R720_USER=ansible
# ---- Forgejo API (for secret + variable provisioning) ------------------------
# First-run, before HAProxy + LE certs are up : use the LAN IP on port 3000
# directly. Forgejo serves a self-signed cert there, so set FORGEJO_INSECURE=1
# to skip cert verification on the API helper's curls.
FORGEJO_API_URL=https://10.0.20.105:3000
FORGEJO_INSECURE=1
# Once the edge HAProxy is up + Let's Encrypt has issued forgejo.talas.group :
# FORGEJO_API_URL=https://forgejo.talas.group
# FORGEJO_INSECURE=0
# Owner = the path segment between forgejo.talas.group/ and /veza in the URL
# of your repo. Run `git remote -v` to confirm — usually `senke` (user) or
# `talas` (org).
FORGEJO_OWNER=senke
FORGEJO_API_URL=https://forgejo.talas.group
FORGEJO_OWNER=talas
FORGEJO_REPO=veza
# Forgejo personal access token with scopes :
# write:admin — for runner registration token
# write:repository — for repo secrets/variables
# write:package — for the registry token created on the fly
# write:admin (for runner registration token)
# write:repository (for repo secrets/variables)
# write:package (for the registry token created on the fly)
# Generate at $FORGEJO_API_URL/-/user/settings/applications
FORGEJO_ADMIN_TOKEN=

View file

@ -8,14 +8,12 @@ asked to mutate.
| File | Where it runs | What it does |
|---|---|---|
| `lib.sh` | sourced by all | logging, error trap, idempotent state file, Forgejo API helpers (honours `FORGEJO_INSECURE=1`) |
| `lib.sh` | sourced by both | logging, error trap, idempotent state file, Forgejo API helpers |
| `bootstrap-local.sh` | dev workstation | drives the whole flow (preflight → vault → Forgejo → R720 → haproxy → summary) |
| `bootstrap-remote.sh` | R720 (over SSH) | Incus profiles, runner socket mount, runner labels |
| `verify-local.sh` | dev workstation | read-only checks of local state |
| `verify-remote.sh` | R720 | read-only checks of R720 state (run via `verify-remote-ssh.sh`) |
| `verify-remote-ssh.sh` | dev workstation | scp+ssh wrapper that runs `verify-remote.sh` on R720 |
| `enable-auto-deploy.sh` | dev workstation | restores `.forgejo/workflows/` if disabled, uncomments push: trigger |
| `reset-vault.sh` | dev workstation | recovery from a vault password mismatch (destructive — re-prompts) |
| `verify-remote.sh` | R720 | read-only checks of R720 state |
| `enable-auto-deploy.sh` | dev workstation | flips the deploy.yml gate from workflow_dispatch-only to push:main + tag:v* |
| `.env.example` | template | copy to `.env`, fill in, gitignored |
## State file
@ -80,43 +78,20 @@ ssh ansible@10.0.20.150 'sudo bash' < verify-remote.sh
## Troubleshooting
- **Phase 1 SSH fails** — verify `R720_HOST` + `R720_USER` in `.env`.
If you use an SSH config alias (e.g. `Host srv-102v` in
`~/.ssh/config`), set `R720_HOST=srv-102v` and either set
`R720_USER=` (empty, alias's User= wins) or match the alias's user.
Test manually : `ssh ${R720_USER}@${R720_HOST} /bin/true`.
- **Phase 2 `cannot decrypt vault.yml`** — the password in
`.vault-pass` doesn't match what was used to encrypt `vault.yml`.
- If you remember the original password, edit `.vault-pass`
(`echo "<correct password>" > infra/ansible/.vault-pass ; chmod 0400 …`).
- Otherwise : `./reset-vault.sh` — destructive, re-prompts for
everything.
- **Phase 3 `Forgejo API unreachable`** — Forgejo on
`https://10.0.20.105:3000` serves a self-signed cert. Set
`FORGEJO_INSECURE=1` in `.env`. Once the edge HAProxy is up + LE has
issued `forgejo.talas.group`, switch to that URL and clear
`FORGEJO_INSECURE`.
- **Phase 3 `repo not found`** — set `FORGEJO_OWNER` to the actual
org/user owning the repo. Confirm with `git remote -v` (the path
segment after `host:port/`).
- **Phase 4 SSH timeout / sudo prompt** — passwordless sudo needed
for the SSH user. Add to `/etc/sudoers.d/talas-bootstrap` :
org/user owning the repo (e.g., `senke` instead of `talas`).
- **Phase 4 SSH timeout**`sudo` may prompt for password ; configure
passwordless sudo for the SSH user, OR run remote bootstrap manually :
```
senke ALL=(ALL) NOPASSWD: /usr/bin/bash
scp scripts/bootstrap/{lib.sh,bootstrap-remote.sh} r720:/tmp/
ssh r720 'sudo FORGEJO_REGISTRATION_TOKEN=… bash /tmp/bootstrap-remote.sh'
```
Or run the remote half manually :
```
scp scripts/bootstrap/{lib.sh,bootstrap-remote.sh} srv-102v:/tmp/
ssh srv-102v 'sudo FORGEJO_REGISTRATION_TOKEN=<token> bash /tmp/bootstrap-remote.sh'
```
- **Phase 5 dehydrated fails** — port 80 must be reachable from
Internet for HTTP-01 (not blocked by ISP, NAT-forwarded). Test
from outside : `curl http://veza.fr/.well-known/acme-challenge/test`
should hit HAProxy's `letsencrypt_backend` (will 404, which is
fine ; what matters is reaching the R720).
- **`.forgejo/workflows/` is missing, only `workflows.disabled/` present** —
expected when the auto-trigger has been gated by renaming the dir.
`enable-auto-deploy.sh` restores it.
- **Phase 5 dehydrated fails** — check that port 80 reaches the R720
from Internet (not blocked by ISP, NAT-forwarded, etc.). dehydrated
needs HTTP-01 inbound. Test: from outside,
`curl http://veza.fr/.well-known/acme-challenge/test` should hit
HAProxy's letsencrypt_backend (will 404, which is fine ; what
matters is it reaches the R720).
## After bootstrap

View file

@ -51,89 +51,6 @@ VAULT_PASS="$REPO_ROOT/infra/ansible/.vault-pass"
TALAS_STATE_DIR="$REPO_ROOT/.git/talas-bootstrap"
TALAS_STATE_FILE="$TALAS_STATE_DIR/local.state"
# ============================================================================
# Vault autofill helpers (used by phase 2)
# ============================================================================
# Generate a URL-safe random string (no /=+ which break sed and yaml).
_rand_token() {
local len=${1:-32}
openssl rand -base64 $((len * 2)) 2>/dev/null | tr -dc 'A-Za-z0-9' | head -c "$len"
}
# Replace a single `vault_<key>: "<TODO ...>"` line with a generated value.
# Idempotent : if the line is already non-TODO, no-op.
_autofill_field() {
local file=$1 key=$2 value=$3
# Escape sed delimiters in value (we use | as delimiter, so escape any |)
local esc=${value//|/\\|}
sed -i "s|^${key}: \"<TODO[^\"]*\"|${key}: \"${esc}\"|" "$file"
}
# Auto-generate the RS256 JWT keypair if either key is still <TODO>.
_autogen_jwt_keys() {
local file=$1
if ! grep -q '<TODO: base64 of RS256 private PEM>' "$file"; then
return 0
fi
info "generating RS256 JWT keypair"
local priv pub
priv=$(openssl genrsa 4096 2>/dev/null) || die "openssl genrsa failed"
pub=$(echo "$priv" | openssl rsa -pubout 2>/dev/null) || die "openssl rsa -pubout failed"
local priv_b64 pub_b64
priv_b64=$(echo "$priv" | base64 -w0)
pub_b64=$(echo "$pub" | base64 -w0)
_autofill_field "$file" vault_jwt_signing_key_b64 "$priv_b64"
_autofill_field "$file" vault_jwt_public_key_b64 "$pub_b64"
ok "JWT keys generated and inserted"
}
# Autofill all the vault fields whose value can be safely random-generated.
# Optional / external fields (smtp, hyperswitch, stripe, oauth_clients,
# sentry) are left as <TODO> for the operator to either fill or skip.
_autofill_vault_secrets() {
local file=$1
local filled=()
# Strong passwords (32 alphanumeric chars).
local pw_fields=(
vault_postgres_password
vault_postgres_replication_password
vault_redis_password
vault_rabbitmq_password
vault_minio_root_password
vault_chat_jwt_secret
vault_oauth_encryption_key
vault_stream_internal_api_key
)
for k in "${pw_fields[@]}"; do
if grep -q "^${k}: \"<TODO" "$file"; then
_autofill_field "$file" "$k" "$(_rand_token 32)"
filled+=("$k")
fi
done
# MinIO access/secret keys (S3-style — alphanumeric, MinIO accepts these).
if grep -q '^vault_minio_access_key: "<TODO' "$file"; then
_autofill_field "$file" vault_minio_access_key "$(_rand_token 20)"
filled+=(vault_minio_access_key)
fi
if grep -q '^vault_minio_secret_key: "<TODO' "$file"; then
_autofill_field "$file" vault_minio_secret_key "$(_rand_token 40)"
filled+=(vault_minio_secret_key)
fi
# MinIO root user — fixed username.
if grep -q '^vault_minio_root_user: "<TODO' "$file"; then
_autofill_field "$file" vault_minio_root_user "veza-admin"
filled+=(vault_minio_root_user)
fi
if (( ${#filled[@]} > 0 )); then
ok "auto-generated ${#filled[@]} secret(s) : ${filled[*]}"
fi
}
# ============================================================================
# Phase 1 — preflight
# ============================================================================
@ -196,32 +113,28 @@ phase_2_vault() {
if [[ -f "$VAULT_YML" ]] && head -1 "$VAULT_YML" 2>/dev/null | grep -q '^\$ANSIBLE_VAULT'; then
info "vault.yml already encrypted — verifying password works"
[[ -f "$VAULT_PASS" ]] || die "vault.yml encrypted but $VAULT_PASS missing — re-create it manually"
elif [[ -f "$VAULT_YML" ]]; then
warn "vault.yml exists in PLAINTEXT — will encrypt now"
else
if [[ -f "$VAULT_YML" ]]; then
warn "vault.yml exists in PLAINTEXT — will autofill remaining <TODO> + encrypt"
else
info "rendering vault.yml from example"
cp "$VAULT_EXAMPLE" "$VAULT_YML"
info "rendering vault.yml from example"
cp "$VAULT_EXAMPLE" "$VAULT_YML"
warn "edit $VAULT_YML now to fill in <TODO> placeholders"
warn "(JWT keys are auto-generated below if you leave their <TODO> values)"
prompt_value _ "Press Enter when done editing"
# Auto-fill JWT keys if user left the TODO placeholders
if grep -q '<TODO: base64 of RS256 private PEM>' "$VAULT_YML"; then
info "generating RS256 JWT keypair"
local jwt_priv jwt_pub
jwt_priv=$(openssl genrsa 4096 2>/dev/null | base64 -w0)
jwt_pub=$(echo "$jwt_priv" | base64 -d | openssl rsa -pubout 2>/dev/null | base64 -w0)
sed -i "s|<TODO: base64 of RS256 private PEM>|$jwt_priv|" "$VAULT_YML"
sed -i "s|<TODO: base64 of RS256 public PEM>|$jwt_pub|" "$VAULT_YML"
ok "JWT keys generated and inserted"
fi
_autogen_jwt_keys "$VAULT_YML"
_autofill_vault_secrets "$VAULT_YML"
local remaining
remaining=$(grep -cE '<TODO' "$VAULT_YML" || true)
if (( remaining > 0 )); then
warn "$remaining <TODO> placeholders left (optional fields ; safe to leave or fill later)"
grep -n '<TODO' "$VAULT_YML" >&2
local cont
prompt_value cont "blank these out and continue ? (y/n)" "y"
if [[ "${cont,,}" == "y" ]]; then
# Replace any line whose value still has <TODO with empty string ;
# for nested fields under vault_oauth_clients, set sub-values to "".
sed -i 's|"<TODO[^"]*"|""|g' "$VAULT_YML"
ok "remaining placeholders blanked out"
else
die "edit $VAULT_YML manually then rerun PHASE=2 ./bootstrap-local.sh"
fi
if grep -qE '<TODO' "$VAULT_YML"; then
local remaining
remaining=$(grep -cE '<TODO' "$VAULT_YML")
die "$remaining <TODO> placeholders still in $VAULT_YML — fill them and rerun PHASE=2 ./bootstrap-local.sh"
fi
fi
@ -241,8 +154,7 @@ phase_2_vault() {
info "verifying we can decrypt"
if ! ansible-vault view --vault-password-file "$VAULT_PASS" "$VAULT_YML" >/dev/null 2>&1; then
TALAS_HINT="if you remember the password, edit $VAULT_PASS to match. Otherwise run scripts/bootstrap/reset-vault.sh to start over."
die "cannot decrypt $VAULT_YML with $VAULT_PASS — password mismatch"
die "cannot decrypt $VAULT_YML with $VAULT_PASS — password mismatch ?"
fi
ok "vault decryption verified"
@ -263,68 +175,34 @@ phase_3_forgejo() {
fi
require_env FORGEJO_ADMIN_TOKEN \
"create at $FORGEJO_API_URL/-/user/settings/applications (scopes: write:repository + write:package, optionally write:admin to auto-create registry tokens)"
"create at $FORGEJO_API_URL/-/user/settings/applications (scopes: write:admin, write:repository, write:package)"
local insecure=()
[[ "${FORGEJO_INSECURE:-0}" == "1" ]] && insecure=(-k)
info "checking Forgejo API reachability (no-auth /version probe)"
if ! curl -fsSL "${insecure[@]}" --max-time 10 \
"$FORGEJO_API_URL/api/v1/version" >/dev/null 2>&1; then
TALAS_HINT="check FORGEJO_API_URL ($FORGEJO_API_URL) ; for self-signed certs set FORGEJO_INSECURE=1 in .env ; verify WireGuard if URL is on the LAN"
die "Forgejo API unreachable"
info "checking Forgejo API reachability"
if ! curl -fsSL --max-time 10 \
-H "Authorization: token $FORGEJO_ADMIN_TOKEN" \
"$FORGEJO_API_URL/api/v1/user" >/dev/null 2>&1; then
TALAS_HINT="check FORGEJO_API_URL ($FORGEJO_API_URL) ; if no DNS yet, try FORGEJO_API_URL=http://10.0.20.105:3000"
die "Forgejo API unreachable or token invalid"
fi
ok "Forgejo API reachable"
ok "Forgejo API reachable, token valid"
info "checking repo $FORGEJO_OWNER/$FORGEJO_REPO + token has write access"
info "checking repo $FORGEJO_OWNER/$FORGEJO_REPO exists"
if ! forgejo_api GET "/repos/$FORGEJO_OWNER/$FORGEJO_REPO" >/dev/null 2>&1; then
TALAS_HINT="verify FORGEJO_OWNER + FORGEJO_REPO (currently $FORGEJO_OWNER/$FORGEJO_REPO) ; verify token scope includes read:repository"
die "repo $FORGEJO_OWNER/$FORGEJO_REPO not found or token lacks read:repository"
fi
ok "repo + token OK"
# FORGEJO_REGISTRY_TOKEN — set once, then leave alone. Re-runs of
# phase 3 don't re-prompt unless the secret has been deleted in
# Forgejo UI, OR the operator sets FORCE_FORGEJO_REPROMPT=1.
# NB: Forgejo doesn't expose GET /actions/secrets/<name> — we list
# all secrets and grep by name.
local _secret_exists=0
if forgejo_api GET "/repos/$FORGEJO_OWNER/$FORGEJO_REPO/actions/secrets" 2>/dev/null \
| jq -e '.[]? | select(.name == "FORGEJO_REGISTRY_TOKEN")' >/dev/null 2>&1; then
_secret_exists=1
fi
if [[ "${FORCE_FORGEJO_REPROMPT:-0}" != "1" ]] && (( _secret_exists == 1 )); then
ok "secret FORGEJO_REGISTRY_TOKEN already set (set FORCE_FORGEJO_REPROMPT=1 to replace)"
else
local registry_token=""
if [[ -n "${FORGEJO_REGISTRY_TOKEN:-}" ]]; then
info "using FORGEJO_REGISTRY_TOKEN from environment"
registry_token="$FORGEJO_REGISTRY_TOKEN"
else
info "trying to auto-create a registry token (needs write:admin scope on admin token)"
local resp
resp=$(forgejo_api POST "/users/$FORGEJO_OWNER/tokens" \
--data "$(jq -nc --arg n "veza-deploy-registry-$(date +%s)" \
--argjson s '["write:package", "read:package"]' \
'{name: $n, scopes: $s}')" 2>/dev/null \
|| true)
registry_token=$(echo "$resp" | jq -r '.sha1 // empty' 2>/dev/null || true)
if [[ -z "$registry_token" ]]; then
warn "auto-create failed (admin token lacks write:admin or sudo)"
warn "create the token manually :"
warn " $FORGEJO_API_URL/-/user/settings/applications"
warn " → 'Generate New Token' → name 'veza-deploy-registry'"
warn " → scopes: write:package, read:package"
prompt_password registry_token "paste the token value (input hidden)"
else
ok "auto-created registry token (${#registry_token} chars)"
fi
fi
forgejo_set_secret "$FORGEJO_OWNER" "$FORGEJO_REPO" FORGEJO_REGISTRY_TOKEN "$registry_token"
TALAS_HINT="set FORGEJO_OWNER + FORGEJO_REPO env vars (currently $FORGEJO_OWNER/$FORGEJO_REPO)"
die "repo $FORGEJO_OWNER/$FORGEJO_REPO not found"
fi
# Vault password is always re-set from the current .vault-pass — cheap,
# idempotent, and survives a re-run after rotation.
# Create a long-lived registry token via the API.
info "creating a registry token (write:package)"
local registry_token
registry_token=$(forgejo_api POST "/users/$FORGEJO_OWNER/tokens" \
--data "$(jq -nc --arg n "veza-deploy-registry-$(date +%s)" \
--argjson s '["write:package", "read:package"]' \
'{name: $n, scopes: $s}')" \
| jq -er '.sha1 // empty') \
|| die "could not create registry token via API ; create one manually at $FORGEJO_API_URL/-/user/settings/applications and re-run with FORGEJO_REGISTRY_TOKEN env var set"
forgejo_set_secret "$FORGEJO_OWNER" "$FORGEJO_REPO" FORGEJO_REGISTRY_TOKEN "$registry_token"
forgejo_set_secret "$FORGEJO_OWNER" "$FORGEJO_REPO" ANSIBLE_VAULT_PASSWORD "$(cat "$VAULT_PASS")"
forgejo_set_var "$FORGEJO_OWNER" "$FORGEJO_REPO" FORGEJO_REGISTRY_URL \
"$FORGEJO_API_URL/api/packages/$FORGEJO_OWNER/generic"
@ -357,39 +235,18 @@ phase_4_r720() {
require_file "$remote_script"
require_file "$remote_lib"
# SSH target string handles both "user@host" and pure "host" (when
# the alias's User= line is the source of truth).
local ssh_target
if [[ -n "${R720_USER:-}" ]]; then
ssh_target="${R720_USER}@${R720_HOST}"
else
ssh_target="${R720_HOST}"
fi
info "uploading lib.sh + bootstrap-remote.sh to $ssh_target:/tmp/talas-bootstrap/"
ssh "$ssh_target" "mkdir -p /tmp/talas-bootstrap" \
|| die "ssh mkdir failed (target: $ssh_target)"
scp -q "$remote_lib" "$remote_script" "$ssh_target:/tmp/talas-bootstrap/" \
|| die "scp failed (target: $ssh_target)"
ok "uploaded"
info "running bootstrap-remote.sh over ssh -t (TTY for sudo prompt)"
info " → if sudo asks for a password, type it once at the prompt below"
# ssh -t allocates a TTY so sudo can prompt for the password. Set vars
# via env=… so they're available inside the sudo'd script (sudo -E
# only preserves explicit pre-existing env vars, not ones set on the
# ssh command line). The /var/log/talas-bootstrap.log on R720 keeps
# a copy of the output even if the SSH stream gets cut.
if ! ssh -t "$ssh_target" \
"sudo env FORGEJO_REGISTRATION_TOKEN='$reg_token' \
FORGEJO_API_URL='$FORGEJO_API_URL' \
bash /tmp/talas-bootstrap/bootstrap-remote.sh"; then
TALAS_HINT="ssh to $ssh_target and tail /var/log/talas-bootstrap.log ; or set up passwordless sudo : echo '$R720_USER ALL=(ALL) NOPASSWD: /usr/bin/bash' | sudo tee /etc/sudoers.d/talas-bootstrap"
die "remote bootstrap failed"
fi
# Cleanup uploaded scripts.
ssh "$ssh_target" "rm -rf /tmp/talas-bootstrap" || true
info "streaming bootstrap-remote.sh over SSH (logs to /var/log/talas-bootstrap.log on R720)"
# Concatenate lib.sh + remote script so the remote bash sees both.
{
cat "$remote_lib"
echo
cat "$remote_script"
} | ssh "$R720_USER@$R720_HOST" \
"FORGEJO_REGISTRATION_TOKEN='$reg_token' \
FORGEJO_API_URL='$FORGEJO_API_URL' \
sudo -E bash -s" \
| tee >(grep -E '>>>PHASE:' >&2) \
|| die "remote bootstrap failed ; ssh to $R720_HOST and tail /var/log/talas-bootstrap.log"
mark_done r720
phase r720 DONE
@ -408,44 +265,10 @@ phase_5_haproxy() {
fi
cd "$REPO_ROOT/infra/ansible"
# Ansible collections needed by the haproxy/deploy playbooks.
# ansible.cfg sets stdout_callback=yaml which lives in
# community.general — without it, ansible-playbook errors out
# immediately ("Invalid callback for stdout specified: yaml").
info "ensuring ansible collections (community.general / .postgresql / .rabbitmq) are installed"
for col in community.general community.postgresql community.rabbitmq; do
if ! ansible-galaxy collection list "$col" 2>/dev/null | grep -q "^$col"; then
info "installing $col"
ansible-galaxy collection install "$col" >/dev/null \
|| die "ansible-galaxy collection install $col failed (network ? ~/.ansible/ writable ?)"
fi
done
ok "collections present"
# Compute SSH target the same way phase 4 does.
local ssh_target
if [[ -n "${R720_USER:-}" ]]; then
ssh_target="${R720_USER}@${R720_HOST}"
else
ssh_target="${R720_HOST}"
fi
# Detect if NOPASSWD sudo is configured ; if not, pass --ask-become-pass.
local become_flag=()
if ssh "$ssh_target" "sudo -n /bin/true" >/dev/null 2>&1; then
ok "passwordless sudo on R720 — running ansible without -K"
else
info "sudo on R720 needs a password — passing --ask-become-pass"
info " → ansible will prompt 'BECOME password:' below ; type your sudo password"
become_flag=(--ask-become-pass)
fi
info "running ansible-playbook playbooks/haproxy.yml (510 min)"
if ! ansible-playbook -i inventory/staging.yml playbooks/haproxy.yml \
--vault-password-file .vault-pass \
"${become_flag[@]}"; then
TALAS_HINT="check the ansible output above ; common issues : Incus profile missing, port 80 blocked from Internet, DNS not yet propagated, sudo password rejected"
--vault-password-file .vault-pass; then
TALAS_HINT="check the ansible output above ; common issues : Incus profile missing, port 80 blocked from Internet, DNS not yet propagated"
die "ansible-playbook haproxy.yml failed"
fi

View file

@ -99,36 +99,20 @@ remote_phase_2_runner_socket() {
sleep 3
fi
info "ensuring incus client binary is in the runner"
if incus exec forgejo-runner -- command -v incus >/dev/null 2>&1; then
ok "incus already in runner"
elif [[ -x /usr/bin/incus ]]; then
# Push the host's binary into the container — avoids apt repo
# issues (Debian 13 doesn't ship incus-client as a separate
# package, and the full `incus` package would also pull in the
# daemon which we don't want in a runner container).
info "pushing /usr/bin/incus from host into runner:/usr/local/bin/incus"
incus file push /usr/bin/incus forgejo-runner/usr/local/bin/incus --mode 0755
ok "incus binary pushed"
info "ensuring incus client is installed inside the runner"
if ! incus exec forgejo-runner -- command -v incus >/dev/null 2>&1; then
incus exec forgejo-runner -- apt-get update -qq
incus exec forgejo-runner -- apt-get install -y incus-client >/dev/null
ok "incus-client installed in runner"
else
die "no /usr/bin/incus on host AND none in runner — install incus on the host first"
ok "incus-client already in runner"
fi
info "smoke-test : runner can incus list"
if incus exec forgejo-runner -- incus list >/dev/null 2>&1; then
ok "runner has Incus access"
else
# Common cause : the runner's process can read /var/lib/incus/
# unix.socket only if it has the right gid. The socket is owned
# root:incus-admin (or equivalent) on the host. Inside the
# container we either run as root (works) or need to add the
# runner user to a group with the same gid as host's incus-admin.
# We don't try to fix that here — it's runner-process-specific.
warn "runner cannot incus list as default user"
warn "this may be normal if the systemd unit runs as root inside"
warn "the container ; if not, add the runner user to a group with"
warn "the same gid as the host's incus-admin group"
if ! incus exec forgejo-runner -- incus list >/dev/null 2>&1; then
die "runner cannot reach Incus socket — verify nesting + permissions"
fi
ok "runner has Incus access"
mark_done r2_runner_socket
phase r2_runner_socket DONE

View file

@ -1,15 +1,8 @@
#!/usr/bin/env bash
# enable-auto-deploy.sh — re-enable Forgejo Actions deploy workflow.
#
# Two scenarios :
# A. .forgejo/workflows.disabled/ exists (current state on this branch)
# → rename back to .forgejo/workflows/, then ensure deploy.yml's
# push: trigger is uncommented.
# B. .forgejo/workflows/deploy.yml exists with push: commented out
# → just uncomment.
#
# Run AFTER one successful workflow_dispatch run has proven the chain
# end-to-end.
# enable-auto-deploy.sh — flip the workflow_dispatch-only gate on
# .forgejo/workflows/deploy.yml back to push:main + tag:v*. Run this
# AFTER one successful manual workflow_dispatch run has proven the
# chain end-to-end.
set -Eeuo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
@ -17,55 +10,43 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
trap_errors
REPO_ROOT=$(git -C "$SCRIPT_DIR" rev-parse --show-toplevel) || die "not in a git repo"
WF_DIR="$REPO_ROOT/.forgejo/workflows"
WF_DISABLED="$REPO_ROOT/.forgejo/workflows.disabled"
# --- Step 1 : if workflows are renamed-disabled, restore the directory. -------
if [[ -d "$WF_DISABLED" ]]; then
if [[ -d "$WF_DIR" ]]; then
die "BOTH $WF_DIR and $WF_DISABLED exist — manual cleanup needed"
fi
info "rename $WF_DISABLED$WF_DIR"
git -C "$REPO_ROOT" mv .forgejo/workflows.disabled .forgejo/workflows
ok "directory restored"
fi
DEPLOY_YML="$WF_DIR/deploy.yml"
DEPLOY_YML="$REPO_ROOT/.forgejo/workflows/deploy.yml"
require_file "$DEPLOY_YML"
# --- Step 2 : if push: trigger is commented, uncomment it. --------------------
if grep -qE '^[[:space:]]+push:$' "$DEPLOY_YML"; then
ok "auto-deploy trigger already active in deploy.yml"
else
if ! grep -qE '^[[:space:]]+# push:' "$DEPLOY_YML"; then
die "deploy.yml has neither active push: nor commented '# push:' — manual edit required"
fi
info "uncommenting push: + branches: + tags: in $DEPLOY_YML"
sed -i \
-e 's|^ # push: # GATED — uncomment after first| push:|' \
-e 's|^ # branches: \[main\] # successful workflow_dispatch run| branches: [main]|' \
-e "s|^ # tags: \\['v\\*'\\] # see RUNBOOK_DEPLOY_BOOTSTRAP.md| tags: ['v*']|" \
"$DEPLOY_YML"
if ! grep -qE '^[[:space:]]+push:$' "$DEPLOY_YML"; then
die "sed didn't apply — open $DEPLOY_YML and uncomment by hand"
fi
ok "trigger uncommented"
ok "auto-deploy already enabled"
exit 0
fi
# --- Step 3 : prompt to commit + push. ----------------------------------------
if ! grep -qE '^[[:space:]]+# push:' "$DEPLOY_YML"; then
die "deploy.yml has neither active push: nor commented '# push:' — manual edit required"
fi
info "uncommenting push: + branches: + tags: in $DEPLOY_YML"
# Conservative single-line replacements, indentation preserved.
sed -i \
-e 's|^ # push: # GATED — uncomment after first| push:|' \
-e 's|^ # branches: \[main\] # successful workflow_dispatch run| branches: [main]|' \
-e 's|^ # tags: \['"'"'v\*'"'"'\] # see RUNBOOK_DEPLOY_BOOTSTRAP.md| tags: ['"'"'v*'"'"']|' \
"$DEPLOY_YML"
# Verify.
if ! grep -qE '^[[:space:]]+push:$' "$DEPLOY_YML"; then
die "sed didn't apply — open $DEPLOY_YML and uncomment by hand"
fi
ok "edited $DEPLOY_YML"
info "diff:"
git -C "$REPO_ROOT" --no-pager diff -- "$WF_DIR" >&2 || true
git -C "$REPO_ROOT" --no-pager diff -- "$DEPLOY_YML" >&2
cat >&2 <<EOF
Next step :
cd $REPO_ROOT
git add .forgejo/
git commit --no-verify -m "feat(forgejo): re-enable auto-deploy"
git add .forgejo/workflows/deploy.yml
git commit --no-verify -m "feat(forgejo): re-enable auto-deploy on push:main + tag:v*"
git push origin main
The push itself triggers the first auto-deploy. Watch :
${FORGEJO_API_URL:-https://10.0.20.105:3000}/${FORGEJO_OWNER:-senke}/${FORGEJO_REPO:-veza}/actions
https://forgejo.talas.group/${FORGEJO_OWNER:-talas}/${FORGEJO_REPO:-veza}/actions
EOF

View file

@ -160,15 +160,9 @@ prompt_value() {
# ----- Forgejo API helper -----------------------------------------------------
# Requires: $FORGEJO_API_URL, $FORGEJO_ADMIN_TOKEN
# Honours $FORGEJO_INSECURE=1 to disable TLS verification (useful on
# first-run, before Let's Encrypt has issued the cert for
# forgejo.talas.group and the LAN URL https://10.0.20.105:3000 is
# self-signed).
forgejo_api() {
local method=$1 path=$2; shift 2
local insecure=()
[[ "${FORGEJO_INSECURE:-0}" == "1" ]] && insecure=(-k)
curl -fsSL "${insecure[@]}" --max-time 30 \
curl -fsSL --max-time 30 \
-X "$method" \
-H "Authorization: token ${FORGEJO_ADMIN_TOKEN:?FORGEJO_ADMIN_TOKEN unset}" \
-H "Accept: application/json" \
@ -189,22 +183,15 @@ forgejo_set_secret() {
forgejo_set_var() {
local owner=$1 repo=$2 name=$3 value=$4
# Forgejo API quirks (verified empirically against 1.21+ Gitea-fork) :
# * POST /actions/variables/<name> body {name, value} → 204 create
# * PUT /actions/variables/<name> body {name, value} → 204 update
# * POST /actions/variables (no <name> in URL) → 405
# Both the URL path AND the body's "name" field are required even
# though they're redundant — the Forgejo validator rejects body
# without "name". The stored field is "data" on read, but on write
# we send "value".
local body
body=$(jq -nc --arg n "$name" --arg v "$value" '{name: $n, value: $v}')
# Try update (PUT) ; if 404, create (POST).
if forgejo_api PUT "/repos/$owner/$repo/actions/variables/$name" --data "$body" >/dev/null 2>&1; then
ok "variable $name updated"
elif forgejo_api POST "/repos/$owner/$repo/actions/variables/$name" --data "$body" >/dev/null 2>&1; then
elif forgejo_api POST "/repos/$owner/$repo/actions/variables" --data "$body" >/dev/null 2>&1; then
ok "variable $name created"
else
die "failed to set variable $name (URL: $FORGEJO_API_URL/api/v1/repos/$owner/$repo/actions/variables/$name)"
die "failed to set variable $name"
fi
}

View file

@ -1,60 +0,0 @@
#!/usr/bin/env bash
# reset-vault.sh — recover from a vault password mismatch.
#
# Symptoms : `verify-local.sh` or `bootstrap-local.sh phase 2` reports
# "can decrypt vault.yml" failing — the password in .vault-pass doesn't
# match what was used to encrypt vault.yml. Common cause : typo when
# encrypting the first time, or rerunning the script with a different
# password.
#
# This script :
# 1. Confirms with the operator (destructive — vault.yml content is lost)
# 2. Removes infra/ansible/group_vars/all/vault.yml
# 3. Removes infra/ansible/.vault-pass
# 4. Clears the `vault=DONE` marker in the local state file
# 5. Suggests `PHASE=2 ./bootstrap-local.sh` to re-do
#
# If you remember the original password, this script is the wrong tool.
# Edit .vault-pass to put the correct password instead.
set -Eeuo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "$SCRIPT_DIR/lib.sh"
trap_errors
REPO_ROOT=$(git -C "$SCRIPT_DIR" rev-parse --show-toplevel)
VAULT_YML="$REPO_ROOT/infra/ansible/group_vars/all/vault.yml"
VAULT_PASS="$REPO_ROOT/infra/ansible/.vault-pass"
STATE_FILE="$REPO_ROOT/.git/talas-bootstrap/local.state"
warn "This script DELETES the encrypted vault.yml + .vault-pass."
warn "If you remember the encryption password, edit $VAULT_PASS"
warn "to match it instead of running this. The vault contents will"
warn "be LOST and you'll have to re-fill every secret from memory."
echo
read -rp "Type 'RESET' to confirm: " confirm
if [[ "$confirm" != "RESET" ]]; then
info "aborted"
exit 0
fi
info "removing $VAULT_YML"
rm -f "$VAULT_YML"
info "removing $VAULT_PASS"
rm -f "$VAULT_PASS"
if [[ -f "$STATE_FILE" ]]; then
info "clearing 'vault=DONE' from $STATE_FILE"
sed -i '/^vault=/d' "$STATE_FILE"
fi
ok "vault state cleared"
echo
cat <<EOF >&2
Next step :
cd $SCRIPT_DIR
PHASE=2 ./bootstrap-local.sh
You will be re-prompted for the JWT keys (auto-generated) and the
vault password (memorize it this time !).
EOF

View file

@ -61,25 +61,10 @@ check "dig available" "command -v dig"
section "Repo state"
check "in repo root" "[[ -f $REPO_ROOT/CLAUDE.md ]]"
check "infra/ansible/ exists" "[[ -d $REPO_ROOT/infra/ansible ]]"
# .forgejo/workflows/ may be active OR renamed to .disabled/ — both are
# valid states. Active = auto-trigger may fire ; disabled = manual run
# only via re-enable script.
if [[ -d "$REPO_ROOT/.forgejo/workflows.disabled" ]]; then
check "deploy.yml present (under workflows.disabled/)" \
"[[ -f $REPO_ROOT/.forgejo/workflows.disabled/deploy.yml ]]"
info " → workflows are DISABLED (renamed to workflows.disabled/) ;"
info " re-enable with scripts/bootstrap/enable-auto-deploy.sh"
elif [[ -d "$REPO_ROOT/.forgejo/workflows" ]]; then
check "deploy.yml present" \
"[[ -f $REPO_ROOT/.forgejo/workflows/deploy.yml ]]"
check_with_hint "deploy.yml gated (no auto-trigger)" \
"! grep -E '^[[:space:]]+push:$' $REPO_ROOT/.forgejo/workflows/deploy.yml" \
"if you want auto-deploy, run scripts/bootstrap/enable-auto-deploy.sh"
else
err "neither .forgejo/workflows/ nor .forgejo/workflows.disabled/ found"
FAIL+=1
fi
check ".forgejo/workflows/deploy.yml" "[[ -f $REPO_ROOT/.forgejo/workflows/deploy.yml ]]"
check_with_hint "deploy.yml gated (no auto-trigger)" \
"! grep -E '^[[:space:]]+push:$' $REPO_ROOT/.forgejo/workflows/deploy.yml" \
"if you want auto-deploy, run scripts/bootstrap/enable-auto-deploy.sh"
section "Vault"
check "vault.yml.example exists" "[[ -f $REPO_ROOT/infra/ansible/group_vars/all/vault.yml.example ]]"
@ -116,26 +101,21 @@ done
if [[ -n "${FORGEJO_ADMIN_TOKEN:-}" ]]; then
section "Forgejo API + secrets/vars"
# Reuse the lib's API helper which honours FORGEJO_INSECURE=1.
_CURL_OPTS=()
[[ "${FORGEJO_INSECURE:-0}" == "1" ]] && _CURL_OPTS+=(-k)
# /version is auth-free → reachability only ; /repos/.. tests auth + scope.
check_with_hint "Forgejo API reachable" \
"curl -fsSL ${_CURL_OPTS[*]} --max-time 10 $FORGEJO_API_URL/api/v1/version" \
"set FORGEJO_API_URL ; for self-signed certs, set FORGEJO_INSECURE=1 in .env"
"curl -fsSL --max-time 10 -H 'Authorization: token $FORGEJO_ADMIN_TOKEN' $FORGEJO_API_URL/api/v1/user" \
"set FORGEJO_API_URL ; if no DNS yet, FORGEJO_API_URL=http://10.0.20.105:3000"
check_with_hint "repo $FORGEJO_OWNER/$FORGEJO_REPO exists" \
"curl -fsSL ${_CURL_OPTS[*]} -H 'Authorization: token $FORGEJO_ADMIN_TOKEN' $FORGEJO_API_URL/api/v1/repos/$FORGEJO_OWNER/$FORGEJO_REPO" \
"curl -fsSL -H 'Authorization: token $FORGEJO_ADMIN_TOKEN' $FORGEJO_API_URL/api/v1/repos/$FORGEJO_OWNER/$FORGEJO_REPO" \
"set FORGEJO_OWNER + FORGEJO_REPO env vars"
check_with_hint "secret FORGEJO_REGISTRY_TOKEN exists" \
"curl -fsSL ${_CURL_OPTS[*]} -H 'Authorization: token $FORGEJO_ADMIN_TOKEN' $FORGEJO_API_URL/api/v1/repos/$FORGEJO_OWNER/$FORGEJO_REPO/actions/secrets/FORGEJO_REGISTRY_TOKEN" \
"curl -fsSL -H 'Authorization: token $FORGEJO_ADMIN_TOKEN' $FORGEJO_API_URL/api/v1/repos/$FORGEJO_OWNER/$FORGEJO_REPO/actions/secrets/FORGEJO_REGISTRY_TOKEN" \
"PHASE=3 ./bootstrap-local.sh"
check_with_hint "secret ANSIBLE_VAULT_PASSWORD exists" \
"curl -fsSL ${_CURL_OPTS[*]} -H 'Authorization: token $FORGEJO_ADMIN_TOKEN' $FORGEJO_API_URL/api/v1/repos/$FORGEJO_OWNER/$FORGEJO_REPO/actions/secrets/ANSIBLE_VAULT_PASSWORD" \
"curl -fsSL -H 'Authorization: token $FORGEJO_ADMIN_TOKEN' $FORGEJO_API_URL/api/v1/repos/$FORGEJO_OWNER/$FORGEJO_REPO/actions/secrets/ANSIBLE_VAULT_PASSWORD" \
"PHASE=3 ./bootstrap-local.sh"
check_with_hint "variable FORGEJO_REGISTRY_URL exists" \
"curl -fsSL ${_CURL_OPTS[*]} -H 'Authorization: token $FORGEJO_ADMIN_TOKEN' $FORGEJO_API_URL/api/v1/repos/$FORGEJO_OWNER/$FORGEJO_REPO/actions/variables/FORGEJO_REGISTRY_URL" \
"curl -fsSL -H 'Authorization: token $FORGEJO_ADMIN_TOKEN' $FORGEJO_API_URL/api/v1/repos/$FORGEJO_OWNER/$FORGEJO_REPO/actions/variables/FORGEJO_REGISTRY_URL" \
"PHASE=3 ./bootstrap-local.sh"
else
warn "FORGEJO_ADMIN_TOKEN not set — skipping API checks. Set it to run those."

View file

@ -1,36 +0,0 @@
#!/usr/bin/env bash
# verify-remote-ssh.sh — wrapper that scp's lib.sh + verify-remote.sh
# to the R720 then runs verify-remote.sh there. Saves the operator
# from having to clone the repo on the R720.
#
# Reads R720_HOST + R720_USER from .env or environment.
set -Eeuo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "$SCRIPT_DIR/lib.sh"
trap_errors
[[ -f "$SCRIPT_DIR/.env" ]] && . "$SCRIPT_DIR/.env"
: "${R720_HOST:=srv-102v}"
R720_USER_PFX=""
[[ -n "${R720_USER:-}" ]] && R720_USER_PFX="$R720_USER@"
SSH_TARGET="${R720_USER_PFX}${R720_HOST}"
info "uploading lib.sh + verify-remote.sh to $SSH_TARGET:/tmp/"
scp -q "$SCRIPT_DIR/lib.sh" "$SCRIPT_DIR/verify-remote.sh" \
"$SSH_TARGET:/tmp/" \
|| die "scp failed — check SSH config (current target: $SSH_TARGET)"
ok "uploaded"
info "running verify-remote.sh as root"
# `sudo bash` so the state file at /var/lib/talas/bootstrap.state is
# accessible. If your account has incus group access without sudo,
# drop the `sudo`.
ssh -t "$SSH_TARGET" "sudo bash /tmp/verify-remote.sh" \
|| warn "verify-remote.sh exited non-zero — see output above"
info "cleaning up tmp files on $SSH_TARGET"
ssh "$SSH_TARGET" "sudo rm -f /tmp/lib.sh /tmp/verify-remote.sh" || true
ok "done"

View file

@ -8,7 +8,7 @@ import (
func TestSearchService_Search_NilClient(t *testing.T) {
svc := NewSearchService(nil, zap.NewNop())
_, err := svc.Search("test", nil, nil)
_, err := svc.Search("test", nil)
if err == nil {
t.Fatal("expected error when client is nil")
}

View file

@ -20,15 +20,6 @@ func NewAnnouncementHandler(svc *services.AnnouncementService) *AnnouncementHand
}
// List returns all announcements (admin)
// @Summary List all announcements
// @Description Get a list of all announcements, including expired ones. Admin only.
// @Tags Admin
// @Accept json
// @Produce json
// @Security BearerAuth
// @Success 200 {object} object{announcements=array}
// @Failure 401 {object} handlers.APIResponse "Unauthorized"
// @Router /api/v1/announcements [get]
func (h *AnnouncementHandler) List(c *gin.Context) {
list, err := h.svc.List(c.Request.Context())
if err != nil {
@ -39,16 +30,6 @@ func (h *AnnouncementHandler) List(c *gin.Context) {
}
// Create creates an announcement (admin)
// @Summary Create announcement
// @Description Create a new platform announcement. Admin only.
// @Tags Admin
// @Accept json
// @Produce json
// @Security BearerAuth
// @Param announcement body services.CreateAnnouncementRequest true "Announcement data"
// @Success 201 {object} models.Announcement
// @Failure 401 {object} handlers.APIResponse "Unauthorized"
// @Router /api/v1/announcements [post]
func (h *AnnouncementHandler) Create(c *gin.Context) {
userID, ok := GetUserIDUUID(c)
if !ok || userID == uuid.Nil {
@ -71,16 +52,6 @@ func (h *AnnouncementHandler) Create(c *gin.Context) {
}
// Delete deletes an announcement (admin)
// @Summary Delete announcement
// @Description Permanently delete an announcement. Admin only.
// @Tags Admin
// @Accept json
// @Produce json
// @Security BearerAuth
// @Param id path string true "Announcement ID"
// @Success 200 {object} object{message=string}
// @Failure 401 {object} handlers.APIResponse "Unauthorized"
// @Router /api/v1/announcements/{id} [delete]
func (h *AnnouncementHandler) Delete(c *gin.Context) {
idStr := c.Param("id")
id, err := uuid.Parse(idStr)
@ -97,13 +68,6 @@ func (h *AnnouncementHandler) Delete(c *gin.Context) {
}
// GetActive returns active announcements (public)
// @Summary Get active announcements
// @Description Get a list of currently active announcements. Public access.
// @Tags Admin
// @Accept json
// @Produce json
// @Success 200 {object} object{announcements=array}
// @Router /api/v1/announcements/active [get]
func (h *AnnouncementHandler) GetActive(c *gin.Context) {
list, err := h.svc.GetActive(c.Request.Context())
if err != nil {

View file

@ -20,15 +20,6 @@ func NewFeatureFlagHandler(svc *services.FeatureFlagService) *FeatureFlagHandler
}
// List returns all feature flags (admin)
// @Summary List all feature flags
// @Description Get a list of all feature flags and their current status. Admin only.
// @Tags Admin
// @Accept json
// @Produce json
// @Security BearerAuth
// @Success 200 {object} object{feature_flags=array}
// @Failure 401 {object} handlers.APIResponse "Unauthorized"
// @Router /api/v1/admin/feature-flags [get]
func (h *FeatureFlagHandler) List(c *gin.Context) {
list, err := h.svc.List(c.Request.Context())
if err != nil {
@ -39,17 +30,6 @@ func (h *FeatureFlagHandler) List(c *gin.Context) {
}
// Toggle enables or disables a feature flag (admin)
// @Summary Toggle feature flag
// @Description Enable or disable a specific feature flag. Admin only.
// @Tags Admin
// @Accept json
// @Produce json
// @Security BearerAuth
// @Param name path string true "Flag name"
// @Param data body object{enabled=boolean} true "Toggle data"
// @Success 200 {object} models.FeatureFlag
// @Failure 401 {object} handlers.APIResponse "Unauthorized"
// @Router /api/v1/admin/feature-flags/{name}/toggle [put]
func (h *FeatureFlagHandler) Toggle(c *gin.Context) {
name := c.Param("name")
if name == "" {