BASE: completing the initial repo state

This commit is contained in:
okinrev 2025-12-03 22:56:50 +01:00
parent 66f5ae9921
commit 327ac36a30
906 changed files with 171113 additions and 0 deletions

103
Makefile Normal file
View file

@ -0,0 +1,103 @@
# Veza Platform - Root Makefile
# Test Coverage targets (T0043)
.PHONY: test-coverage coverage-html help
help: ## Show this help message
@echo 'Usage: make [target]'
@echo ''
@echo 'Test Coverage targets:'
@echo ' test-coverage - Run tests and generate coverage report (T0043)'
@echo ' coverage-html - Generate HTML coverage report from existing coverage.out (T0043)'
test-coverage: ## Run tests and generate coverage report (T0043)
@echo "📊 Generating test coverage report..."
@bash scripts/test-coverage.sh
coverage-html: ## Generate HTML coverage report from existing coverage.out (T0043)
@echo "📊 Generating HTML coverage report..."
@cd veza-backend-api && go tool cover -html=coverage/coverage.out -o coverage/coverage.html
@echo "✅ Coverage report generated: veza-backend-api/coverage/coverage.html"
# >>> VEZA:BEGIN QA TARGETS
.PHONY: smoke e2e postman lighthouse load qa-all visual backstop-ref backstop-test loki lh a11y start-services
smoke: ## Run API smoke tests (curl + httpie)
@echo "🔥 Running API smoke tests..."
@bash .veza/qa/scripts/wait_for_http.sh "$${VEZA_API_BASE_URL:-http://localhost:8080}/health" 90
@bash .veza/qa/scripts/smoke_curl.sh
@bash .veza/qa/scripts/smoke_httpie.sh || true
start-services: ## Start services required for QA tests
@echo "🚀 Starting services for QA tests..."
@bash .veza/qa/scripts/start-services-for-tests.sh
e2e: ## Run E2E tests with Playwright
@echo "🎭 Running E2E tests..."
@cd .veza/qa/playwright && \
if [ ! -d "node_modules" ] || [ ! -f "node_modules/@playwright/test/package.json" ]; then \
echo "📦 Installing Playwright dependencies..."; \
npm install --silent; \
fi && \
npx playwright test --config=playwright.config.ts
postman: ## Run Postman/Newman tests
@echo "📮 Running Postman/Newman tests..."
@newman run .veza/qa/postman/veza_api_collection.json \
-e .veza/qa/data/postman_env_local.json \
--reporters cli,junit \
--reporter-junit-export reports/newman.xml || true
lighthouse: ## Run Lighthouse CI
@echo "💡 Running Lighthouse CI..."
@npx lhci autorun --config=.veza/qa/lighthouse/lighthouserc.json || true
load: ## Run k6 load tests
@echo "⚡ Running k6 load tests..."
@k6 run .veza/qa/k6/smoke.js || true
visual: ## Run Playwright visual regression tests
@echo "🖼️ Running Playwright visual regression tests..."
@cd .veza/qa/playwright && \
if [ ! -d "node_modules" ] || [ ! -f "node_modules/@playwright/test/package.json" ]; then \
echo "📦 Installing Playwright dependencies..."; \
npm install --silent; \
fi && \
npx playwright test tests/visual/ --config=playwright.config.ts
visual-update: ## Generate/update Playwright visual snapshots
@echo "📸 Generating Playwright visual snapshots..."
@cd .veza/qa/playwright && \
if [ ! -d "node_modules" ] || [ ! -f "node_modules/@playwright/test/package.json" ]; then \
echo "📦 Installing Playwright dependencies..."; \
npm install --silent; \
fi && \
npx playwright test tests/visual/ --config=playwright.config.ts --update-snapshots
backstop-ref: ## Generate BackstopJS reference images
@echo "📸 Generating BackstopJS reference images..."
@cd .veza/qa/backstop && npx backstop reference --config=backstop.json || true
backstop-test: ## Run BackstopJS visual regression tests
@echo "🔍 Running BackstopJS visual regression tests..."
@cd .veza/qa/backstop && npx backstop test --config=backstop.json || true
loki: ## Run Loki visual regression tests (requires Storybook)
@echo "📚 Running Loki visual regression tests..."
@echo "⚠️ Loki requires Storybook to be set up. See .veza/qa/README.md for setup instructions."
@if [ -d ".storybook" ] || [ -d "apps/web/.storybook" ]; then \
npx loki test || true; \
else \
echo "❌ Storybook not found. Install Storybook first to use Loki."; \
exit 1; \
fi
lh: lighthouse ## Alias for lighthouse
a11y: ## Run Pa11y accessibility tests
@echo "♿ Running Pa11y accessibility tests..."
@npx pa11y-ci --config .veza/qa/pa11y/.pa11yci.json || true
qa-all: smoke e2e postman lighthouse load visual a11y ## Run all QA tests
@echo "✅ All QA tests completed!"
# <<< VEZA:END QA TARGETS

380
ansible/DEPLOYMENT_GUIDE.md Normal file
View file

@ -0,0 +1,380 @@
# Veza V5 Ultra Deployment Guide
This guide provides step-by-step instructions for deploying Veza V5 Ultra using Ansible, Incus containers, OVN networking, HAProxy, and Let's Encrypt.
## Table of Contents
- [Prerequisites](#prerequisites)
- [Quick Start](#quick-start)
- [Step-by-Step Deployment](#step-by-step-deployment)
- [Troubleshooting](#troubleshooting)
- [Post-Deployment](#post-deployment)
- [Maintenance](#maintenance)
## Prerequisites
### Control Node (Your Machine)
- Ansible 2.16+
- SSH access to target host
- Required collections: `community.general`, `community.docker`
### Target Host (192.168.0.12)
- Debian 12 (Bookworm)
- SSH key authentication configured
- Root or sudo access
- Internet connectivity
### DNS Configuration
- Domain: `veza.talas.fr`
- A record pointing to target host IP (192.168.0.12)
## Quick Start
```bash
# 1. Clone and navigate to ansible directory
cd ansible
# 2. Install required collections
ansible-galaxy collection install community.general community.docker
# 3. Run full deployment
./deploy-veza.sh
# 4. Configure DNS and re-run HAProxy playbook
ansible-playbook -i inventory/prod/hosts.yml playbooks/30-haproxy-acme.yml -e domain=veza.talas.fr -e acme_email=ops@talas.fr
# 5. Run smoke tests
ansible-playbook -i inventory/prod/hosts.yml playbooks/50-smoke-tests.yml
```
## Step-by-Step Deployment
### Step 1: Bootstrap Target Host
```bash
ansible-playbook -i inventory/prod/hosts.yml playbooks/00-bootstrap-remote.yml
```
**What this does:**
- Installs essential packages (python3, sudo, curl, etc.)
- Configures SSH for better performance
- Sets up firewall rules for required ports
- Installs Incus dependencies
**Expected output:**
```
TASK [Install essential packages] **********************************************
ok: [edge-1]
TASK [Configure firewall for Veza ports] **************************************
ok: [edge-1]
TASK [Test connectivity] ******************************************************
ok: [edge-1]
```
### Step 2: Install Incus and OVN
```bash
ansible-playbook -i inventory/prod/hosts.yml playbooks/10-incus-ovn.yml
```
**What this does:**
- Installs Incus via snap
- Initializes Incus in standalone mode
- Creates OVN network `veza-ovn`
- Creates `veza` profile for containers
**Expected output:**
```
TASK [Install Incus via snap] *************************************************
ok: [edge-1]
TASK [Create OVN network for Veza] ********************************************
ok: [edge-1]
TASK [Verify Incus is running] ************************************************
ok: [edge-1]
```
### Step 3: Create Containers
```bash
ansible-playbook -i inventory/prod/hosts.yml playbooks/20-incus-containers.yml
```
**What this does:**
- Creates 5 containers: haproxy, backend, chat, stream, web
- Configures networking with static IPs
- Sets up proxy devices for external access
- Starts all containers
**Expected output:**
```
TASK [Create Veza containers] *************************************************
ok: [edge-1] => (item=veza-haproxy)
ok: [edge-1] => (item=veza-backend)
ok: [edge-1] => (item=veza-chat)
ok: [edge-1] => (item=veza-stream)
ok: [edge-1] => (item=veza-web)
```
### Step 4: Configure HAProxy and Let's Encrypt
```bash
ansible-playbook -i inventory/prod/hosts.yml playbooks/30-haproxy-acme.yml -e domain=veza.talas.fr -e acme_email=ops@talas.fr
```
**What this does:**
- Installs HAProxy and ACME tools in container
- Configures nginx for ACME challenges
- Sets up HAProxy with SSL termination
- Requests Let's Encrypt certificate
- Configures automatic renewal
**Expected output:**
```
TASK [Install HAProxy and ACME tools in container] ****************************
ok: [edge-1]
TASK [Request Let's Encrypt certificate] ***************************************
ok: [edge-1]
TASK [Test HAProxy configuration] **********************************************
ok: [edge-1]
```
### Step 5: Deploy Applications
```bash
ansible-playbook -i inventory/prod/hosts.yml playbooks/40-veza-apps.yml
```
**What this does:**
- Installs Go and builds backend API
- Installs Rust and builds chat server
- Installs Rust and builds stream server
- Installs Node.js and deploys web app
- Creates systemd services for all apps
**Expected output:**
```
TASK [Deploy Go Backend API] **************************************************
ok: [edge-1]
TASK [Deploy Rust Chat Server] ***********************************************
ok: [edge-1]
TASK [Deploy Rust Stream Server] **********************************************
ok: [edge-1]
TASK [Deploy React Web Application] *******************************************
ok: [edge-1]
```
### Step 6: Run Smoke Tests
```bash
ansible-playbook -i inventory/prod/hosts.yml playbooks/50-smoke-tests.yml
```
**What this does:**
- Tests all container connectivity
- Validates all service endpoints
- Checks HAProxy configuration
- Tests external access (if DNS configured)
- Generates comprehensive test report
**Expected output:**
```
TASK [Test container connectivity] *********************************************
ok: [edge-1]
TASK [Test Backend API service] ***********************************************
ok: [edge-1]
TASK [Generate smoke test summary] ********************************************
ok: [edge-1]
```
## Troubleshooting
### Common Issues
#### 1. SSH Connection Failed
```bash
# Test SSH connectivity
ssh -o ConnectTimeout=10 senke@192.168.0.12 "echo 'SSH test'"
# Check SSH config
grep -n "compressionlevel" ~/.ssh/config
```
**Solution:** Fix SSH config or ensure target host is reachable.
#### 2. Incus Installation Failed
```bash
# Check snapd status
incus exec veza-haproxy -- systemctl status snapd
# Reinstall Incus
incus exec veza-haproxy -- snap remove incus
incus exec veza-haproxy -- snap install incus --classic
```
#### 3. Container Creation Failed
```bash
# Check Incus status
incus list
incus network list
incus profile list
# Clean up and retry
incus delete veza-haproxy --force
ansible-playbook -i inventory/prod/hosts.yml playbooks/20-incus-containers.yml
```
#### 4. HAProxy Configuration Error
```bash
# Test HAProxy config
incus exec veza-haproxy -- haproxy -c -f /etc/haproxy/haproxy.cfg
# Check HAProxy logs
incus exec veza-haproxy -- journalctl -u haproxy -f
```
#### 5. Let's Encrypt Certificate Failed
```bash
# Check ACME challenges
incus exec veza-haproxy -- curl http://localhost:8888/.well-known/acme-challenge/test
# Manual certificate request
incus exec veza-haproxy -- dehydrated -c -d veza.talas.fr
```
#### 6. Application Service Failed
```bash
# Check service status
incus exec veza-backend -- systemctl status veza-backend
incus exec veza-chat -- systemctl status veza-chat
incus exec veza-stream -- systemctl status veza-stream
incus exec veza-web -- systemctl status veza-web
# Check logs
incus exec veza-backend -- journalctl -u veza-backend -f
```
### Debug Commands
```bash
# Check all container status
incus list --format=json | jq '.[] | {name: .name, status: .status, state: .state}'
# Check network configuration
incus network show veza-ovn
# Check HAProxy statistics
incus exec veza-haproxy -- curl -s http://localhost:8404/stats
# Test internal connectivity
incus exec veza-web -- curl -s http://10.10.0.101:8080/api/health
incus exec veza-web -- curl -s http://10.10.0.102:8081/health
incus exec veza-web -- curl -s http://10.10.0.103:8082/stream/health
```
## Post-Deployment
### 1. Configure DNS
Point your domain's A record to the target host IP:
```
veza.talas.fr. IN A 192.168.0.12
```
### 2. Re-run HAProxy Playbook
After DNS is configured, re-run the HAProxy playbook to get the Let's Encrypt certificate:
```bash
ansible-playbook -i inventory/prod/hosts.yml playbooks/30-haproxy-acme.yml -e domain=veza.talas.fr -e acme_email=ops@talas.fr
```
### 3. Verify HTTPS Access
```bash
curl -I https://veza.talas.fr
curl -I https://veza.talas.fr/api/health
```
### 4. Monitor Application Logs
```bash
# Follow all logs
incus exec veza-haproxy -- journalctl -u haproxy -f &
incus exec veza-backend -- journalctl -u veza-backend -f &
incus exec veza-chat -- journalctl -u veza-chat -f &
incus exec veza-stream -- journalctl -u veza-stream -f &
incus exec veza-web -- journalctl -u veza-web -f &
```
## Maintenance
### Certificate Renewal
Certificates are automatically renewed via cron. To check:
```bash
incus exec veza-haproxy -- crontab -l
incus exec veza-haproxy -- ls -la /etc/haproxy/certs/
```
### Container Updates
```bash
# Update container images
incus exec veza-backend -- apt update && apt upgrade -y
incus exec veza-chat -- apt update && apt upgrade -y
incus exec veza-stream -- apt update && apt upgrade -y
incus exec veza-web -- apt update && apt upgrade -y
```
### Backup
```bash
# Backup container configurations
incus export veza-haproxy /backup/veza-haproxy.tar.gz
incus export veza-backend /backup/veza-backend.tar.gz
incus export veza-chat /backup/veza-chat.tar.gz
incus export veza-stream /backup/veza-stream.tar.gz
incus export veza-web /backup/veza-web.tar.gz
```
### Scaling
To add more backend instances:
```bash
# Create additional backend container
incus launch debian/bookworm veza-backend-2 --profile veza
incus config device set veza-backend-2 eth0 ipv4.address=10.10.0.105/24
incus start veza-backend-2
# Update HAProxy configuration to include new backend
incus exec veza-haproxy -- sed -i 's/server api1 10.10.0.101:8080/server api1 10.10.0.101:8080\n server api2 10.10.0.105:8080/' /etc/haproxy/haproxy.cfg
incus exec veza-haproxy -- systemctl reload haproxy
```
## Support
For issues or questions:
1. Check the troubleshooting section above
2. Review container logs for error messages
3. Run smoke tests to identify failing components
4. Check the Ansible playbook logs for deployment issues
## Architecture Overview
```
Internet (veza.talas.fr)
HAProxy Container (80/443)
OVN Network (veza-ovn)
┌─────────┬─────────┬─────────┬─────────┐
│Backend │ Chat │ Stream │ Web │
│:8080 │ :8081 │ :8082 │ :3000 │
│(Go) │ (Rust) │ (Rust) │ (Node) │
└─────────┴─────────┴─────────┴─────────┘
```
This deployment provides a complete, production-ready Veza V5 Ultra platform with automatic SSL certificate management, load balancing, and comprehensive monitoring.

215
ansible/README.md Normal file
View file

@ -0,0 +1,215 @@
# Veza V5 Ultra - Ansible Deployment
This directory contains Ansible playbooks and configuration for deploying Veza V5 Ultra using Incus/OVN + HAProxy-in-container + Let's Encrypt.
## Architecture
- **Single Debian host** (192.168.0.12) with Incus containers
- **HAProxy** running inside an Incus container as edge proxy
- **Let's Encrypt** ACME HTTP-01 validation handled in HAProxy container
- **OVN networking** for container communication
- **Applications** in separate containers:
- `veza-backend` (Go API on port 8080)
- `veza-chat` (Rust WebSocket on port 8081)
- `veza-stream` (Rust HLS on port 8082)
- `veza-web` (React + nginx on port 80)
## Prerequisites
### Control Node (Your Machine)
- Ansible ≥ 2.16
- SSH access to target host with key-based authentication
- Required collections:
```bash
ansible-galaxy collection install community.general
ansible-galaxy collection install community.docker
```
### Target Host (192.168.0.12)
- Debian 12 (Bookworm)
- SSH access for user `senke`
- Open ports: 22, 80, 443, 8080, 8081, 8082
- Sufficient resources for containers
## Quick Start
### 1. Full Deployment
```bash
cd ansible
./deploy-veza.sh
```
### 2. Custom Domain and Email
```bash
./deploy-veza.sh -d myapp.example.com -e admin@example.com
```
### 3. Step-by-Step Deployment
```bash
# Bootstrap host
./deploy-veza.sh --bootstrap-only
# Setup infrastructure
./deploy-veza.sh --infra-only
# Deploy applications
./deploy-veza.sh --apps-only
# Run tests
./deploy-veza.sh --test-only
```
## Manual Playbook Execution
```bash
# 1. Bootstrap remote host
ansible-playbook -i inventory/prod/hosts.yml playbooks/00-bootstrap-remote.yml
# 2. Install Incus + OVN
ansible-playbook -i inventory/prod/hosts.yml playbooks/10-incus-ovn.yml
# 3. Create containers
ansible-playbook -i inventory/prod/hosts.yml playbooks/20-incus-containers.yml
# 4. Configure HAProxy + ACME
ansible-playbook -i inventory/prod/hosts.yml playbooks/30-haproxy-in-container.yml \
-e domain=veza.talas.fr -e acme_email=ops@talas.fr
# 5. Deploy applications
ansible-playbook -i inventory/prod/hosts.yml playbooks/40-veza-apps.yml
# 6. Run smoke tests
ansible-playbook -i inventory/prod/hosts.yml playbooks/50-smoke.yml
```
## Configuration
### Inventory
- `inventory/prod/hosts.yml` - Target host configuration
- `group_vars/all.yml` - Global variables (domain, ports, etc.)
### Key Variables
- `domain`: Target domain (default: veza.talas.fr)
- `acme_email`: Email for Let's Encrypt (default: ops@talas.fr)
- `veza_*_port`: Application ports
- `veza_database_url`: PostgreSQL connection string
- `veza_redis_url`: Redis connection string
## Post-Deployment
### 1. DNS Configuration
Point your domain's A record to the target host IP:
```
veza.talas.fr. IN A 192.168.0.12
```
### 2. Get Let's Encrypt Certificate
After DNS is configured, re-run the HAProxy playbook:
```bash
ansible-playbook -i inventory/prod/hosts.yml playbooks/30-haproxy-in-container.yml \
-e domain=veza.talas.fr -e acme_email=ops@talas.fr
```
### 3. Verify Deployment
```bash
# Check container status
incus list
# Check services
incus exec veza-haproxy -- systemctl status haproxy
incus exec veza-backend -- systemctl status veza-backend
incus exec veza-chat -- systemctl status veza-chat
incus exec veza-stream -- systemctl status veza-stream
incus exec veza-web -- systemctl status nginx
# Test endpoints
curl -k https://192.168.0.12/
curl -k https://192.168.0.12/api/health
```
## Troubleshooting
### Container Issues
```bash
# Check container logs
incus exec <container-name> -- journalctl -u <service-name> -f
# Restart container
incus restart <container-name>
# Access container shell
incus exec <container-name> -- bash
```
### HAProxy Issues
```bash
# Check HAProxy config
incus exec veza-haproxy -- haproxy -c -f /etc/haproxy/haproxy.cfg
# Check HAProxy logs
incus exec veza-haproxy -- journalctl -u haproxy -f
# Reload HAProxy
incus exec veza-haproxy -- systemctl reload haproxy
```
### ACME Issues
```bash
# Check ACME webroot
incus exec veza-haproxy -- ls -la /var/www/acme-challenge/
# Test ACME challenge
curl http://192.168.0.12/.well-known/acme-challenge/test
# Manual certificate renewal
incus exec veza-haproxy -- /opt/dehydrated/dehydrated -c
```
## File Structure
```
ansible/
├── deploy-veza.sh # Deployment script
├── inventory/
│ └── prod/
│ └── hosts.yml # Target host inventory
├── group_vars/
│ └── all.yml # Global variables
├── playbooks/
│ ├── 00-bootstrap-remote.yml # Host bootstrap
│ ├── 10-incus-ovn.yml # Incus + OVN setup
│ ├── 20-incus-containers.yml # Container creation
│ ├── 30-haproxy-in-container.yml # HAProxy + ACME
│ ├── 40-veza-apps.yml # Application deployment
│ └── 50-smoke.yml # Smoke tests
└── roles/ # Existing Ansible roles
├── incus/
├── ovn/
├── haproxy/
└── ...
```
## Security Notes
- All containers run with `security.nesting=true`
- HAProxy enforces HTTPS redirects
- Security headers are configured (HSTS, CSP, etc.)
- Let's Encrypt certificates are automatically renewed
- Firewall rules restrict access to necessary ports only
## Monitoring
The deployment includes basic health checks and logging. For production monitoring, consider:
- Prometheus + Grafana for metrics
- ELK stack for log aggregation
- Uptime monitoring for external services
- Container resource monitoring
## Support
For issues or questions:
1. Check container logs first
2. Verify network connectivity
3. Check HAProxy configuration
4. Review Ansible playbook output for errors

View file

@ -0,0 +1,212 @@
#!/bin/bash
# Veza V5 Ultra Deployment Demo Script
# Shows the deployment process and configuration
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
show_header() {
echo
echo "========================================"
echo "Veza V5 Ultra Deployment Demo"
echo "========================================"
echo
}
check_system() {
log_info "Checking system information..."
echo "System: $(uname -a)"
echo "Python: $(python3 --version 2>/dev/null || echo 'Not available')"
echo "User: $(whoami)"
echo "Home: $HOME"
echo
}
check_packages() {
log_info "Checking required packages..."
local packages=("python3" "curl" "git" "wget" "ansible")
for pkg in "${packages[@]}"; do
if command -v "$pkg" &> /dev/null; then
log_success "$pkg: Available"
else
log_warning "$pkg: Not installed"
fi
done
echo
}
check_ansible() {
log_info "Checking Ansible setup..."
echo "Ansible version: $(ansible --version | head -1)"
echo "Ansible collections:"
ansible-galaxy collection list 2>/dev/null | grep -E "(community|incus)" || echo " No relevant collections found"
echo
}
check_network() {
log_info "Checking network configuration..."
echo "Network interfaces:"
ip addr show | grep -E "(inet |UP)" | head -10
echo
echo "Default route:"
ip route show | grep default
echo
}
check_target_host() {
log_info "Checking target host connectivity..."
local target_host="192.168.0.12"
if ping -c 1 -W 1 "$target_host" &> /dev/null; then
log_success "Target host $target_host is reachable"
else
log_warning "Target host $target_host is not reachable"
echo " This is expected if the host is not currently running"
fi
echo
}
show_deployment_steps() {
log_info "Veza V5 Ultra Deployment Steps:"
echo
echo "1. Bootstrap Host (00-bootstrap-remote.yml)"
echo " - Install Python, sudo, curl, gnupg, net-tools"
echo " - Configure SSH and firewall"
echo " - Install Incus dependencies"
echo
echo "2. Install Incus + OVN (10-incus-ovn.yml)"
echo " - Install Incus via snap"
echo " - Install OVN packages"
echo " - Create OVN network 'veza-ovn'"
echo
echo "3. Create Containers (20-incus-containers.yml)"
echo " - veza-haproxy (Debian 12) - Edge proxy"
echo " - veza-backend (Debian 12) - Go API on 8080"
echo " - veza-chat (Debian 12) - Rust WebSocket on 8081"
echo " - veza-stream (Debian 12) - Rust HLS on 8082"
echo " - veza-web (Debian 12) - React + nginx on 80"
echo
echo "4. Configure HAProxy + ACME (30-haproxy-in-container.yml)"
echo " - Install HAProxy in container"
echo " - Setup Let's Encrypt HTTP-01 validation"
echo " - Configure routing and SSL termination"
echo " - Generate certificates for veza.talas.fr"
echo
echo "5. Deploy Applications (40-veza-apps.yml)"
echo " - Build and run Go backend with systemd"
echo " - Build and run Rust chat server with systemd"
echo " - Build and run Rust stream server with systemd"
echo " - Build React app and serve with nginx"
echo
echo "6. Run Smoke Tests (50-smoke.yml)"
echo " - Test HTTPS access"
echo " - Test API endpoints"
echo " - Test WebSocket connectivity"
echo " - Test HLS streaming"
echo
}
show_architecture() {
log_info "Veza V5 Ultra Architecture:"
echo
echo "┌─────────────────────────────────────────────────────────────┐"
echo "│ Internet (veza.talas.fr) │"
echo "└─────────────────────┬───────────────────────────────────────┘"
echo " │"
echo "┌─────────────────────▼───────────────────────────────────────┐"
echo "│ HAProxy Container (80/443) │"
echo "│ - SSL Termination │"
echo "│ - Let's Encrypt ACME │"
echo "│ - Request Routing │"
echo "└─────────────────────┬───────────────────────────────────────┘"
echo " │"
echo "┌─────────────────────▼───────────────────────────────────────┐"
echo "│ OVN Network │"
echo "│ (veza-ovn) │"
echo "└─────┬─────────┬─────────┬─────────┬─────────────────────────┘"
echo " │ │ │ │"
echo "┌─────▼───┐ ┌───▼───┐ ┌───▼───┐ ┌───▼───┐"
echo "│ Backend │ │ Chat │ │Stream │ │ Web │"
echo "│ :8080 │ │ :8081 │ │ :8082 │ │ :80 │"
echo "│ (Go) │ │(Rust) │ │(Rust) │ │(React)│"
echo "└─────────┘ └───────┘ └───────┘ └───────┘"
echo
}
show_commands() {
log_info "Deployment Commands:"
echo
echo "# Full deployment:"
echo "./deploy-veza.sh"
echo
echo "# Step-by-step deployment:"
echo "ansible-playbook -i inventory/prod/hosts.yml playbooks/00-bootstrap-remote.yml"
echo "ansible-playbook -i inventory/prod/hosts.yml playbooks/10-incus-ovn.yml"
echo "ansible-playbook -i inventory/prod/hosts.yml playbooks/20-incus-containers.yml"
echo "ansible-playbook -i inventory/prod/hosts.yml playbooks/30-haproxy-in-container.yml -e domain=veza.talas.fr -e acme_email=ops@talas.fr"
echo "ansible-playbook -i inventory/prod/hosts.yml playbooks/40-veza-apps.yml"
echo "ansible-playbook -i inventory/prod/hosts.yml playbooks/50-smoke.yml"
echo
echo "# Custom domain:"
echo "./deploy-veza.sh -d myapp.example.com -e admin@example.com"
echo
}
show_next_steps() {
log_info "Next Steps:"
echo
echo "1. Ensure target host (192.168.0.12) is running and accessible"
echo "2. Verify SSH key authentication works:"
echo " ssh senke@192.168.0.12 'echo \"SSH test successful\"'"
echo "3. Run the deployment:"
echo " ./deploy-veza.sh"
echo "4. Point DNS A record for veza.talas.fr to 192.168.0.12"
echo "5. Re-run HAProxy playbook to get Let's Encrypt certificate"
echo
}
main() {
show_header
check_system
check_packages
check_ansible
check_network
check_target_host
show_deployment_steps
show_architecture
show_commands
show_next_steps
log_success "Demo completed! Veza V5 Ultra deployment is ready to run."
echo
}
main "$@"

235
ansible/deploy-veza.sh Normal file
View file

@ -0,0 +1,235 @@
#!/bin/bash
# Veza V5 Ultra Deployment Script
# Deploys Veza using Ansible + Incus/OVN + HAProxy-in-container + Let's Encrypt
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
INVENTORY="ansible/inventory/prod/hosts.yml"
DOMAIN="veza.talas.fr"
ACME_EMAIL="ops@talas.fr"
TARGET_HOST="192.168.0.12"
# Functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
check_prerequisites() {
log_info "Checking prerequisites..."
# Check if ansible is installed
if ! command -v ansible-playbook &> /dev/null; then
log_error "ansible-playbook is not installed. Please install Ansible first."
exit 1
fi
# Check if inventory file exists
if [[ ! -f "$INVENTORY" ]]; then
log_error "Inventory file $INVENTORY not found!"
exit 1
fi
# Check if playbooks exist
for playbook in ansible/playbooks/00-bootstrap-remote.yml ansible/playbooks/10-incus-ovn.yml ansible/playbooks/20-incus-containers.yml ansible/playbooks/30-haproxy-in-container.yml ansible/playbooks/40-veza-apps.yml ansible/playbooks/50-smoke.yml; do
if [[ ! -f "$playbook" ]]; then
log_error "Playbook $playbook not found!"
exit 1
fi
done
# Check SSH connectivity
log_info "Testing SSH connectivity to $TARGET_HOST..."
if ! ssh -o ConnectTimeout=10 -o BatchMode=yes senke@$TARGET_HOST "echo 'SSH connection successful'" &> /dev/null; then
log_error "Cannot connect to $TARGET_HOST via SSH. Please check your SSH key and connectivity."
exit 1
fi
log_success "Prerequisites check passed!"
}
run_playbook() {
local playbook="$1"
local description="$2"
local extra_vars="$3"
log_info "Running: $description"
log_info "Playbook: $playbook"
if [[ -n "$extra_vars" ]]; then
log_info "Extra vars: $extra_vars"
ansible-playbook -i "$INVENTORY" "$playbook" -e "$extra_vars" -v
else
ansible-playbook -i "$INVENTORY" "$playbook" -v
fi
if [[ $? -eq 0 ]]; then
log_success "$description completed successfully!"
else
log_error "$description failed!"
exit 1
fi
}
deploy_veza() {
log_info "Starting Veza V5 Ultra deployment..."
log_info "Target host: $TARGET_HOST"
log_info "Domain: $DOMAIN"
log_info "ACME Email: $ACME_EMAIL"
echo
# Step 1: Bootstrap remote host
run_playbook "ansible/playbooks/00-bootstrap-remote.yml" "Bootstrap Debian host"
echo
# Step 2: Install Incus + OVN
run_playbook "ansible/playbooks/10-incus-ovn.yml" "Install Incus + OVN single-host"
echo
# Step 3: Create containers
run_playbook "ansible/playbooks/20-incus-containers.yml" "Create Incus containers"
echo
# Step 4: Configure HAProxy + ACME
run_playbook "ansible/playbooks/30-haproxy-in-container.yml" "Configure HAProxy + ACME" "domain=$DOMAIN acme_email=$ACME_EMAIL"
echo
# Step 5: Deploy applications
run_playbook "ansible/playbooks/40-veza-apps.yml" "Deploy Veza applications"
echo
# Step 6: Run smoke tests
run_playbook "ansible/playbooks/50-smoke.yml" "Run smoke tests"
echo
log_success "Veza V5 Ultra deployment completed successfully!"
echo
log_info "Next steps:"
log_info "1. Point DNS A record for $DOMAIN to $TARGET_HOST"
log_info "2. Re-run HAProxy playbook to get Let's Encrypt certificate:"
log_info " ansible-playbook -i $INVENTORY ansible/playbooks/30-haproxy-in-container.yml -e domain=$DOMAIN -e acme_email=$ACME_EMAIL"
log_info "3. Test full functionality with real domain"
echo
log_info "Access URLs:"
log_info "- HTTP: http://$TARGET_HOST/"
log_info "- HTTPS: https://$TARGET_HOST/ (self-signed cert until DNS is configured)"
log_info "- API: https://$TARGET_HOST/api/"
log_info "- WS: wss://$TARGET_HOST/ws/"
log_info "- Stream: https://$TARGET_HOST/stream/"
}
show_help() {
echo "Veza V5 Ultra Deployment Script"
echo
echo "Usage: $0 [OPTIONS]"
echo
echo "Options:"
echo " -h, --help Show this help message"
echo " -d, --domain DOMAIN Set domain (default: $DOMAIN)"
echo " -e, --email EMAIL Set ACME email (default: $ACME_EMAIL)"
echo " -t, --target HOST Set target host (default: $TARGET_HOST)"
echo " --bootstrap-only Run only bootstrap playbook"
echo " --infra-only Run bootstrap + infrastructure playbooks"
echo " --apps-only Run only applications playbook"
echo " --test-only Run only smoke tests"
echo
echo "Examples:"
echo " $0 # Full deployment"
echo " $0 -d myapp.example.com -e admin@example.com # Custom domain and email"
echo " $0 --bootstrap-only # Only bootstrap the host"
echo " $0 --infra-only # Only setup infrastructure"
}
# Parse command line arguments
BOOTSTRAP_ONLY=false
INFRA_ONLY=false
APPS_ONLY=false
TEST_ONLY=false
while [[ $# -gt 0 ]]; do
case $1 in
-h|--help)
show_help
exit 0
;;
-d|--domain)
DOMAIN="$2"
shift 2
;;
-e|--email)
ACME_EMAIL="$2"
shift 2
;;
-t|--target)
TARGET_HOST="$2"
shift 2
;;
--bootstrap-only)
BOOTSTRAP_ONLY=true
shift
;;
--infra-only)
INFRA_ONLY=true
shift
;;
--apps-only)
APPS_ONLY=true
shift
;;
--test-only)
TEST_ONLY=true
shift
;;
*)
log_error "Unknown option: $1"
show_help
exit 1
;;
esac
done
# Main execution
main() {
log_info "Veza V5 Ultra Deployment Script"
log_info "================================"
echo
check_prerequisites
if [[ "$BOOTSTRAP_ONLY" == true ]]; then
run_playbook "ansible/playbooks/00-bootstrap-remote.yml" "Bootstrap Debian host"
elif [[ "$INFRA_ONLY" == true ]]; then
run_playbook "ansible/playbooks/00-bootstrap-remote.yml" "Bootstrap Debian host"
run_playbook "ansible/playbooks/10-incus-ovn.yml" "Install Incus + OVN single-host"
run_playbook "ansible/playbooks/20-incus-containers.yml" "Create Incus containers"
run_playbook "ansible/playbooks/30-haproxy-in-container.yml" "Configure HAProxy + ACME" "domain=$DOMAIN acme_email=$ACME_EMAIL"
elif [[ "$APPS_ONLY" == true ]]; then
run_playbook "ansible/playbooks/40-veza-apps.yml" "Deploy Veza applications"
elif [[ "$TEST_ONLY" == true ]]; then
run_playbook "ansible/playbooks/50-smoke.yml" "Run smoke tests"
else
deploy_veza
fi
}
# Run main function
main "$@"

View file

@ -0,0 +1,74 @@
# Group variables for Veza V5 Ultra deployment
# Domain and ACME configuration
domain: veza.talas.fr
acme_email: ops@talas.fr
# Frontend runtime/build environment variables
VITE_API_URL: "https://{{ domain }}/api"
VITE_WS_URL: "wss://{{ domain }}/ws"
VITE_STREAM_URL: "https://{{ domain }}/stream"
# HAProxy configuration (for in-container setup)
haproxy_letsencrypt: true
haproxy_https_monitoring:
- "{{ domain }}"
# OVN/Incus single-host configuration
ovn_cluster_name: veza_single
ovn_cluster_main_name: edge-1
ovn_ip: 127.0.0.1
ovn_central_servers: [edge-1]
# Incus profile for Veza network (created in play 20)
incus_network_profiles:
- name: veza
devices:
root:
type: disk
path: /
pool: default
eth0:
type: nic
nictype: ovn
network: veza-ovn
# Container configuration
veza_containers:
- name: veza-haproxy
image: debian/bookworm
profiles: [veza]
proxy_devices:
- name: http80
listen: tcp:0.0.0.0:80
connect: tcp:127.0.0.1:80
- name: https443
listen: tcp:0.0.0.0:443
connect: tcp:127.0.0.1:443
- name: veza-backend
image: debian/bookworm
profiles: [veza]
- name: veza-chat
image: debian/bookworm
profiles: [veza]
- name: veza-stream
image: debian/bookworm
profiles: [veza]
- name: veza-web
image: debian/bookworm
profiles: [veza]
# Application ports
veza_backend_port: 8080
veza_chat_port: 8081
veza_stream_port: 8082
veza_web_port: 80
# Database and Redis configuration (will be set via vault)
veza_database_url: "postgresql://veza:veza_password@localhost:5432/veza_db"
veza_redis_url: "redis://localhost:6379"
veza_jwt_secret: "super-secret-jwt-key-change-in-production"
veza_jwt_refresh_secret: "super-secret-refresh-key"
# Storage paths
veza_storage_path: "/opt/veza/storage"
veza_stream_path: "/opt/veza/streams"

View file

@ -0,0 +1,17 @@
# Inventory for Veza V5 Ultra deployment
# Single Debian host with Incus/OVN + HAProxy-in-container + Let's Encrypt
all:
vars:
ansible_user: senke
ansible_ssh_private_key_file: ~/.ssh/id_ed25519 # adjust as needed
ansible_become: true
ansible_python_interpreter: /usr/bin/python3
children:
edge:
hosts:
edge-1:
ansible_host: 192.168.0.12
veza_nodes:
hosts:
edge-1:

View file

@ -0,0 +1,18 @@
# Test inventory for Veza V5 Ultra deployment
# Using localhost for testing when target host is not available
all:
vars:
ansible_user: senke
ansible_ssh_private_key_file: ~/.ssh/id_ed25519
ansible_become: true
ansible_python_interpreter: /usr/bin/python3
ansible_connection: local
children:
edge:
hosts:
edge-1:
ansible_host: localhost
veza_nodes:
hosts:
edge-1:

View file

@ -0,0 +1,104 @@
---
# Bootstrap localhost for Veza V5 Ultra deployment testing
# Ensures python3, sudo, and essential tools are available
- name: Bootstrap localhost for Veza deployment testing
hosts: edge
gather_facts: false
become: false
connection: local
pre_tasks:
- name: Install essential packages (Fedora)
dnf:
name:
- python3
- python3-pip
- sudo
- curl
- gnupg2
- net-tools
- ca-certificates
- wget
- unzip
- git
- vim
- htop
- iotop
- nethogs
- snapd
- zfs
- lxd-tools
- bridge-utils
- dnsmasq
- openvswitch
- openvswitch-ovn-central
- openvswitch-ovn-host
- openvswitch-ovn-common
- firewalld
state: present
use_backend: dnf4
- name: Ensure python3 is available
command: which python3
register: python3_check
failed_when: false
- name: Create symlink for python if needed
file:
src: /usr/bin/python3
dest: /usr/bin/python
state: link
when: python3_check.rc != 0
- name: Install Python packages for Ansible
pip:
name:
- ansible-core
- docker
- requests
- urllib3
state: present
- name: Ensure snapd service is enabled
systemd:
name: snapd
state: started
enabled: true
- name: Enable and start OpenVSwitch
systemd:
name: "{{ item }}"
state: started
enabled: true
loop:
- openvswitch-switch
- ovn-northd
- ovn-controller
- name: Start and enable firewalld
systemd:
name: firewalld
state: started
enabled: true
- name: Configure firewall for Veza ports
command: firewall-cmd --permanent --add-port={{ item }}/tcp
loop:
- "22" # SSH
- "80" # HTTP
- "443" # HTTPS
- "8080" # Backend API
- "8081" # Chat WebSocket
- "8082" # Stream HLS
register: firewall_result
failed_when: false
- name: Reload firewall rules
command: firewall-cmd --reload
register: firewall_reload_result
failed_when: false
post_tasks:
- name: Test connectivity
ping:

View file

@ -0,0 +1,103 @@
---
# Bootstrap remote Debian host for Veza V5 Ultra deployment
# Ensures python3, sudo, and essential tools are available
- name: Bootstrap Debian host for Veza deployment
hosts: edge
gather_facts: false
become: true
pre_tasks:
- name: Install essential packages
raw: |
apt-get update && apt-get install -y \
python3 \
python3-pip \
sudo \
curl \
gnupg \
net-tools \
ca-certificates \
apt-transport-https \
lsb-release \
wget \
unzip \
git \
vim \
htop \
iotop \
nethogs
- name: Ensure python3 is available
raw: which python3
register: python3_check
failed_when: false
- name: Create symlink for python if needed
raw: ln -sf /usr/bin/python3 /usr/bin/python
when: python3_check.rc != 0
- name: Install additional packages
raw: |
apt-get install -y \
python3-pip \
python3-venv \
snapd
- name: Ensure user has sudo access
raw: |
if ! grep -q "senke ALL=(ALL) NOPASSWD:ALL" /etc/sudoers.d/senke; then
echo "senke ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/senke
chmod 440 /etc/sudoers.d/senke
fi
- name: Configure SSH for better performance
lineinfile:
path: /etc/ssh/sshd_config
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
state: present
loop:
- { regexp: "^#?ClientAliveInterval", line: "ClientAliveInterval 60" }
- { regexp: "^#?ClientAliveCountMax", line: "ClientAliveCountMax 3" }
- { regexp: "^#?TCPKeepAlive", line: "TCPKeepAlive yes" }
notify: restart ssh
- name: Ensure SSH service is enabled and running
systemd:
name: ssh
state: started
enabled: true
- name: Install UFW
apt:
name: ufw
state: present
- name: Configure firewall for Veza ports
community.general.ufw:
rule: allow
port: "{{ item }}"
proto: tcp
loop:
- "22" # SSH
- "80" # HTTP
- "443" # HTTPS
- "8080" # Backend API
- "8081" # Chat WebSocket
- "8082" # Stream HLS
- name: Enable UFW
community.general.ufw:
state: enabled
policy: deny
handlers:
- name: restart ssh
systemd:
name: ssh
state: restarted
post_tasks:
- name: Test connectivity
ping:

View file

@ -0,0 +1,105 @@
---
# Demo setup for Veza V5 Ultra deployment
# Shows the deployment process without requiring sudo
- name: Demo Veza V5 Ultra deployment setup
hosts: edge
gather_facts: true
become: false
connection: local
tasks:
- name: Check system information
debug:
msg: |
System: {{ ansible_distribution }} {{ ansible_distribution_version }}
Architecture: {{ ansible_architecture }}
Python: {{ ansible_python_version }}
User: {{ ansible_user_id }}
- name: Check if required packages are installed
command: which {{ item }}
register: package_check
failed_when: false
loop:
- python3
- curl
- git
- wget
- name: Display package availability
debug:
msg: "{{ item.item }}: {{ 'Available' if item.rc == 0 else 'Not installed' }}"
loop: "{{ package_check.results }}"
- name: Check if Incus is available
command: which incus
register: incus_check
failed_when: false
- name: Display Incus status
debug:
msg: "Incus: {{ 'Available' if incus_check.rc == 0 else 'Not installed' }}"
- name: Check if snapd is available
command: which snap
register: snap_check
failed_when: false
- name: Display snapd status
debug:
msg: "Snapd: {{ 'Available' if snap_check.rc == 0 else 'Not installed' }}"
- name: Check network interfaces
command: ip addr show
register: network_info
failed_when: false
- name: Display network interfaces
debug:
var: network_info.stdout_lines
- name: Check if ports are available
wait_for:
port: "{{ item }}"
host: localhost
timeout: 1
register: port_check
failed_when: false
loop:
- 80
- 443
- 8080
- 8081
- 8082
- name: Display port availability
debug:
msg: "Port {{ item.item }}: {{ 'Available' if item.skipped else 'In use' }}"
loop: "{{ port_check.results }}"
- name: Show deployment summary
debug:
msg: |
========================================
Veza V5 Ultra Deployment Demo
========================================
This demo shows the deployment process for Veza V5 Ultra:
1. Bootstrap host (install packages, configure firewall)
2. Install Incus + OVN (container runtime and networking)
3. Create containers (haproxy, backend, chat, stream, web)
4. Configure HAProxy + ACME (SSL termination and routing)
5. Deploy applications (Go, Rust, React)
6. Run smoke tests (validate all services)
Target host: {{ ansible_host }}
Domain: {{ domain | default('veza.talas.fr') }}
Next steps:
- Ensure target host is reachable via SSH
- Run full deployment with: ./deploy-veza.sh
- Or run individual playbooks step by step
========================================

View file

@ -0,0 +1,83 @@
---
# Install and configure Incus + OVN for Veza V5 Ultra deployment (local testing)
# Single-host setup with OVN networking
- name: Install Incus and OVN (local testing)
hosts: edge
become: true
gather_facts: true
connection: local
pre_tasks:
- name: Update package cache
apt:
update_cache: true
cache_valid_time: 3600
- name: Install Incus via snap
snap:
name: incus
state: present
classic: true
- name: Wait for snapd to be ready
wait_for:
timeout: 30
delegate_to: localhost
tasks:
- name: Initialize Incus (standalone mode)
command: incus init --auto
register: incus_init_result
failed_when: false
- name: Display Incus init result
debug:
var: incus_init_result.stdout_lines
when: incus_init_result.stdout_lines is defined
- name: Create OVN network for Veza
command: |
incus network create veza-ovn \
--type=ovn \
--config network=veza-ovn \
--config ipv4.address=10.10.0.1/24 \
--config ipv4.nat=true \
--config ipv6.address=fd42:veza::1/64 \
--config ipv6.nat=true
register: ovn_network_result
failed_when: false
- name: Display OVN network creation result
debug:
var: ovn_network_result.stdout_lines
when: ovn_network_result.stdout_lines is defined
- name: Verify Incus is running
command: incus list
register: incus_status
failed_when: false
- name: Display Incus status
debug:
var: incus_status.stdout_lines
when: incus_status.stdout_lines is defined
- name: Verify OVN network exists
command: incus network list
register: network_list
failed_when: false
- name: Display network list
debug:
var: network_list.stdout_lines
when: network_list.stdout_lines is defined
post_tasks:
- name: Show Incus version
command: incus version
register: incus_version
- name: Display Incus version
debug:
var: incus_version.stdout_lines

View file

@ -0,0 +1,137 @@
---
# Install and configure Incus + OVN for Veza V5 Ultra deployment
# Single-host setup with OVN networking
- name: Install Incus and OVN for Veza V5 Ultra
hosts: edge
become: true
gather_facts: true
pre_tasks:
- name: Update package cache
apt:
update_cache: true
cache_valid_time: 3600
- name: Install snapd if not present
apt:
name: snapd
state: present
- name: Enable snapd service
systemd:
name: snapd
state: started
enabled: true
- name: Create snapd socket symlink
file:
src: /var/lib/snapd/snapd.socket
dest: /run/snapd.socket
state: link
failed_when: false
- name: Wait for snapd to be ready
wait_for:
path: /run/snapd.socket
timeout: 30
tasks:
- name: Install Incus via snap
command: snap install incus --classic
register: incus_install_result
failed_when: false
- name: Wait for Incus to initialize
wait_for:
timeout: 30
delegate_to: localhost
- name: Initialize Incus (standalone mode)
command: incus init --auto
register: incus_init_result
failed_when: false
- name: Display Incus init result
debug:
var: incus_init_result.stdout_lines
when: incus_init_result.stdout_lines is defined
- name: Create OVN network for Veza
command: |
incus network create veza-ovn \
--type=ovn \
--config network=veza-ovn \
--config ipv4.address=10.10.0.1/24 \
--config ipv4.nat=true \
--config ipv6.address=fd42:veza::1/64 \
--config ipv6.nat=true
register: ovn_network_result
failed_when: false
- name: Display OVN network creation result
debug:
var: ovn_network_result.stdout_lines
when: ovn_network_result.stdout_lines is defined
- name: Create Veza network profile
command: |
incus profile create veza || true
incus profile set veza security.nesting=true
incus profile set veza security.privileged=false
incus profile device add veza root disk path=/ pool=default
incus profile device add veza eth0 nic nictype=ovn parent=veza-ovn
register: profile_result
failed_when: false
- name: Display profile creation result
debug:
var: profile_result.stdout_lines
when: profile_result.stdout_lines is defined
- name: Verify Incus is running
command: incus list
register: incus_status
failed_when: false
- name: Display Incus status
debug:
var: incus_status.stdout_lines
when: incus_status.stdout_lines is defined
- name: Verify OVN network exists
command: incus network list
register: network_list
failed_when: false
- name: Display network list
debug:
var: network_list.stdout_lines
when: network_list.stdout_lines is defined
- name: Verify Veza profile exists
command: incus profile list
register: profile_list
failed_when: false
- name: Display profile list
debug:
var: profile_list.stdout_lines
when: profile_list.stdout_lines is defined
post_tasks:
- name: Show Incus version
command: incus version
register: incus_version
- name: Display Incus version
debug:
var: incus_version.stdout_lines
- name: Show system resources
command: incus info
register: incus_info
- name: Display Incus info
debug:
var: incus_info.stdout_lines

View file

@ -0,0 +1,150 @@
---
# Create Incus containers for Veza V5 Ultra deployment
# Creates all necessary containers with proper networking
- name: Create Incus containers for Veza V5 Ultra
hosts: edge
become: true
gather_facts: true
vars:
containers:
- name: veza-haproxy
image: debian/bookworm
profile: veza
cpu: 2
memory: 2GB
disk: 10GB
ip: 10.10.0.100
ports:
- "80:80"
- "443:443"
- name: veza-backend
image: debian/bookworm
profile: veza
cpu: 4
memory: 4GB
disk: 20GB
ip: 10.10.0.101
ports:
- "8080:8080"
- name: veza-chat
image: debian/bookworm
profile: veza
cpu: 2
memory: 2GB
disk: 10GB
ip: 10.10.0.102
ports:
- "8081:8081"
- name: veza-stream
image: debian/bookworm
profile: veza
cpu: 2
memory: 2GB
disk: 20GB
ip: 10.10.0.103
ports:
- "8082:8082"
- name: veza-web
image: debian/bookworm
profile: veza
cpu: 2
memory: 2GB
disk: 10GB
ip: 10.10.0.104
ports:
- "3000:3000"
tasks:
- name: Create Veza containers
command: |
incus launch {{ item.image }} {{ item.name }} \
--profile {{ item.profile }} \
--config limits.cpu={{ item.cpu }} \
--config limits.memory={{ item.memory }} \
--config limits.disk={{ item.disk }} \
--config boot.autostart=true \
--config boot.autostart.delay=10
register: container_create_result
failed_when: false
loop: "{{ containers }}"
- name: Display container creation results
debug:
msg: "Container {{ item.item.name }}: {{ 'Created' if item.rc == 0 else 'Failed' }}"
loop: "{{ container_create_result.results }}"
- name: Configure container networking
command: |
incus config device set {{ item.name }} eth0 ipv4.address={{ item.ip }}/24
register: network_config_result
failed_when: false
loop: "{{ containers }}"
- name: Display networking results
debug:
msg: "Network config {{ item.item.name }}: {{ 'Success' if item.rc == 0 else 'Failed' }}"
loop: "{{ network_config_result.results }}"
- name: Add proxy devices for external access
command: |
incus config device add {{ item.name }} proxy{{ loop.index0 }} proxy \
listen=tcp:0.0.0.0:{{ port.split(':')[0] }} \
connect=tcp:127.0.0.1:{{ port.split(':')[1] }}
register: proxy_result
failed_when: false
loop: "{{ containers }}"
vars:
port_list: "{{ item.ports | default([]) }}"
when: item.ports is defined and item.ports | length > 0
- name: Start all containers
command: incus start {{ item.name }}
register: start_result
failed_when: false
loop: "{{ containers }}"
- name: Display start results
debug:
msg: "Container {{ item.item.name }}: {{ 'Started' if item.rc == 0 else 'Failed to start' }}"
loop: "{{ start_result.results }}"
- name: Wait for containers to be ready
wait_for:
port: 22
host: "{{ item.ip }}"
timeout: 60
register: container_ready
failed_when: false
loop: "{{ containers }}"
- name: Display container readiness
debug:
msg: "Container {{ item.item.name }} ({{ item.item.ip }}): {{ 'Ready' if item.skipped else 'Not ready' }}"
loop: "{{ container_ready.results }}"
- name: List all containers
command: incus list
register: container_list
- name: Display container list
debug:
var: container_list.stdout_lines
- name: Show container network configuration
command: incus network show veza-ovn
register: network_show
- name: Display network configuration
debug:
var: network_show.stdout_lines
post_tasks:
- name: Verify all containers are running
command: incus list --format=json
register: containers_json
- name: Display running containers
debug:
msg: "Running containers: {{ containers_json.stdout | from_json | map(attribute='name') | list }}"

View file

@ -0,0 +1,286 @@
---
# Configure HAProxy + Let's Encrypt ACME in container
# Sets up SSL termination and request routing
- name: Configure HAProxy + Let's Encrypt ACME for Veza V5 Ultra
hosts: edge
become: true
gather_facts: true
vars:
domain: "{{ domain | default('veza.talas.fr') }}"
acme_email: "{{ acme_email | default('ops@talas.fr') }}"
haproxy_container: "veza-haproxy"
webroot_port: 8888
tasks:
- name: Install HAProxy and ACME tools in container
command: |
incus exec {{ haproxy_container }} -- apt update
incus exec {{ haproxy_container }} -- apt install -y haproxy dehydrated nginx-light
register: install_result
failed_when: false
- name: Display installation result
debug:
var: install_result.stdout_lines
- name: Create ACME webroot directory
command: |
incus exec {{ haproxy_container }} -- mkdir -p /var/www/acme-challenge
incus exec {{ haproxy_container }} -- chown -R www-data:www-data /var/www/acme-challenge
register: webroot_result
failed_when: false
- name: Configure nginx for ACME challenges
command: |
incus exec {{ haproxy_container }} -- tee /etc/nginx/sites-available/acme << 'EOF'
server {
listen 127.0.0.1:{{ webroot_port }};
server_name _;
root /var/www/acme-challenge;
location /.well-known/acme-challenge/ {
try_files $uri =404;
}
}
EOF
register: nginx_config_result
failed_when: false
- name: Enable nginx ACME site
command: |
incus exec {{ haproxy_container }} -- ln -sf /etc/nginx/sites-available/acme /etc/nginx/sites-enabled/
incus exec {{ haproxy_container }} -- rm -f /etc/nginx/sites-enabled/default
incus exec {{ haproxy_container }} -- systemctl restart nginx
register: nginx_enable_result
failed_when: false
- name: Configure dehydrated for Let's Encrypt
command: incus exec {{ haproxy_container }} -- bash -c 'echo "CA=\"https://acme-v02.api.letsencrypt.org/directory\"" > /etc/dehydrated/config'
register: dehydrated_config_result
failed_when: false
- name: Add CHALLENGETYPE to dehydrated config
command: incus exec {{ haproxy_container }} -- bash -c 'echo "CHALLENGETYPE=\"http-01\"" >> /etc/dehydrated/config'
register: dehydrated_config_result2
failed_when: false
- name: Add WELLKNOWN to dehydrated config
command: incus exec {{ haproxy_container }} -- bash -c 'echo "WELLKNOWN=\"/var/www/acme-challenge\"" >> /etc/dehydrated/config'
register: dehydrated_config_result3
failed_when: false
- name: Add CONTACT_EMAIL to dehydrated config
command: incus exec {{ haproxy_container }} -- bash -c 'echo "CONTACT_EMAIL=\"{{ acme_email }}\"" >> /etc/dehydrated/config'
register: dehydrated_config_result4
failed_when: false
- name: Add HOOK to dehydrated config
command: incus exec {{ haproxy_container }} -- bash -c 'echo "HOOK=\"/etc/dehydrated/hook.sh\"" >> /etc/dehydrated/config'
register: dehydrated_config_result
failed_when: false
- name: Create dehydrated hook script
command: |
incus exec {{ haproxy_container }} -- bash -c 'cat > /etc/dehydrated/hook.sh << "EOF"
#!/bin/bash
# Dehydrated hook for HAProxy certificate management
case "$1" in
"deploy_cert")
# Deploy certificate to HAProxy
cat "$3" "$5" > /etc/haproxy/certs/${2}.pem
systemctl reload haproxy
;;
"clean_challenge")
# Clean up challenge files
rm -f /var/www/acme-challenge/*
;;
"deploy_challenge")
# Deploy challenge file
cp "$2" "/var/www/acme-challenge/$3"
;;
"unchanged_cert")
# Certificate unchanged
;;
esac
EOF'
register: hook_script_result
failed_when: false
- name: Make hook script executable
command: |
incus exec {{ haproxy_container }} -- chmod +x /etc/dehydrated/hook.sh
register: hook_executable_result
failed_when: false
- name: Create HAProxy configuration
command: |
incus exec {{ haproxy_container }} -- tee /etc/haproxy/haproxy.cfg << 'EOF'
global
daemon
user haproxy
group haproxy
log stdout local0
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
stats timeout 30s
tune.ssl.default-dh-param 2048
defaults
mode http
log global
option httplog
option dontlognull
option log-health-checks
option forwardfor
option httpchk GET /health
timeout connect 5000
timeout client 50000
timeout server 50000
errorfile 400 /etc/haproxy/errors/400.http
errorfile 403 /etc/haproxy/errors/403.http
errorfile 408 /etc/haproxy/errors/408.http
errorfile 500 /etc/haproxy/errors/500.http
errorfile 502 /etc/haproxy/errors/502.http
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
# ACME challenge backend
backend acme
server acme 127.0.0.1:{{ webroot_port }} check
# API backend
backend be_api
balance roundrobin
option httpchk GET /api/health
http-check expect status 200
server api1 10.10.0.101:8080 check inter 2000 rise 2 fall 3
# WebSocket backend
backend be_ws
mode tcp
balance roundrobin
server ws1 10.10.0.102:8081 check inter 2000 rise 2 fall 3
# Stream backend
backend be_stream
balance roundrobin
option httpchk GET /stream/health
http-check expect status 200
server stream1 10.10.0.103:8082 check inter 2000 rise 2 fall 3
# Web frontend backend
backend be_web
balance roundrobin
option httpchk GET /
http-check expect status 200
server web1 10.10.0.104:3000 check inter 2000 rise 2 fall 3
# HTTP frontend (redirect to HTTPS)
frontend http_frontend
bind *:80
acl acme_challenge path_beg /.well-known/acme-challenge/
use_backend acme if acme_challenge
redirect scheme https code 301 if !acme_challenge
# HTTPS frontend
frontend https_frontend
bind *:443 ssl crt /etc/haproxy/certs/{{ domain }}.pem alpn h2,http/1.1
# Security headers
http-response set-header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
http-response set-header X-Content-Type-Options "nosniff"
http-response set-header X-Frame-Options "DENY"
http-response set-header X-XSS-Protection "1; mode=block"
http-response set-header Referrer-Policy "no-referrer"
http-response set-header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; connect-src 'self' wss:;"
# Routing rules
acl is_api path_beg /api
acl is_ws path_beg /ws
acl is_stream path_beg /stream
use_backend be_api if is_api
use_backend be_ws if is_ws
use_backend be_stream if is_stream
default_backend be_web
# Statistics
listen stats
bind *:8404
stats enable
stats uri /stats
stats refresh 30s
stats admin if TRUE
EOF
register: haproxy_config_result
failed_when: false
- name: Create HAProxy certificates directory
command: |
incus exec {{ haproxy_container }} -- mkdir -p /etc/haproxy/certs
register: certs_dir_result
failed_when: false
- name: Generate self-signed certificate (temporary)
command: |
incus exec {{ haproxy_container }} -- openssl req -x509 -newkey rsa:4096 -keyout /etc/haproxy/certs/{{ domain }}.pem -out /etc/haproxy/certs/{{ domain }}.pem -days 365 -nodes -subj "/C=FR/ST=France/L=Paris/O=Veza/OU=IT/CN={{ domain }}"
register: self_signed_result
failed_when: false
- name: Start HAProxy service
command: |
incus exec {{ haproxy_container }} -- systemctl enable haproxy
incus exec {{ haproxy_container }} -- systemctl start haproxy
register: haproxy_start_result
failed_when: false
- name: Check HAProxy status
command: |
incus exec {{ haproxy_container }} -- systemctl status haproxy
register: haproxy_status
failed_when: false
- name: Display HAProxy status
debug:
var: haproxy_status.stdout_lines
- name: Request Let's Encrypt certificate
command: |
incus exec {{ haproxy_container }} -- dehydrated -c -d {{ domain }}
register: acme_cert_result
failed_when: false
- name: Display ACME certificate result
debug:
var: acme_cert_result.stdout_lines
- name: Setup certificate renewal cron
command: |
incus exec {{ haproxy_container }} -- tee /etc/cron.d/dehydrated << 'EOF'
0 12 * * * root /usr/bin/dehydrated -c
EOF
register: cron_result
failed_when: false
- name: Test HAProxy configuration
command: |
incus exec {{ haproxy_container }} -- haproxy -c -f /etc/haproxy/haproxy.cfg
register: haproxy_test_result
failed_when: false
- name: Display HAProxy test result
debug:
var: haproxy_test_result.stdout_lines
post_tasks:
- name: Show HAProxy statistics
command: |
incus exec {{ haproxy_container }} -- curl -s http://localhost:8404/stats
register: haproxy_stats
failed_when: false
- name: Display HAProxy statistics
debug:
var: haproxy_stats.stdout_lines

View file

@ -0,0 +1,269 @@
---
# Configure HAProxy inside container with Let's Encrypt ACME HTTP-01
# Handles SSL termination and routing for Veza V5 Ultra
- name: Configure HAProxy in container with ACME
hosts: edge
become: true
gather_facts: true
vars:
haproxy_container: veza-haproxy
acme_webroot_port: 8888
haproxy_certs_dir: /etc/haproxy/certs
acme_webroot_dir: /var/www/acme-challenge
tasks:
- name: Install HAProxy and ACME tools in container
command: |
incus exec {{ haproxy_container }} -- bash -c "
apt update && apt install -y \
haproxy \
curl \
wget \
socat \
cron \
nginx-light \
openssl
"
register: haproxy_install_result
failed_when: false
- name: Display HAProxy installation result
debug:
var: haproxy_install_result.stdout_lines
when: haproxy_install_result.stdout_lines is defined
- name: Create ACME webroot directory
command: |
incus exec {{ haproxy_container }} -- mkdir -p {{ acme_webroot_dir }}
register: webroot_create_result
failed_when: false
- name: Create HAProxy certificates directory
command: |
incus exec {{ haproxy_container }} -- mkdir -p {{ haproxy_certs_dir }}
register: certs_dir_result
failed_when: false
- name: Install dehydrated for ACME
command: |
incus exec {{ haproxy_container }} -- bash -c "
cd /opt && \
git clone https://github.com/dehydrated-io/dehydrated.git && \
chmod +x dehydrated/dehydrated
"
register: dehydrated_install_result
failed_when: false
- name: Create dehydrated configuration
command: |
incus exec {{ haproxy_container }} -- bash -c "
cat > /opt/dehydrated/config << 'EOF'
WELLKNOWN={{ acme_webroot_dir }}
DOMAINS_TXT=/opt/dehydrated/domains.txt
HOOK=/opt/dehydrated/hook.sh
CHALLENGETYPE=http-01
EOF
"
register: dehydrated_config_result
failed_when: false
- name: Create domains file for ACME
command: |
incus exec {{ haproxy_container }} -- bash -c "
echo '{{ domain }}' > /opt/dehydrated/domains.txt
"
register: domains_file_result
failed_when: false
- name: Create ACME hook script
command: |
incus exec {{ haproxy_container }} -- bash -c "
cat > /opt/dehydrated/hook.sh << 'EOF'
#!/bin/bash
case \"\$1\" in
deploy_challenge)
# Start nginx for ACME challenge
nginx -c /etc/nginx/nginx.conf -g 'daemon on;'
;;
clean_challenge)
# Stop nginx after challenge
nginx -s quit
;;
deploy_cert)
# Combine cert and key for HAProxy
cat \$3 \$5 > {{ haproxy_certs_dir }}/{{ domain }}.pem
# Reload HAProxy
systemctl reload haproxy
;;
esac
EOF
chmod +x /opt/dehydrated/hook.sh
"
register: hook_script_result
failed_when: false
- name: Create nginx config for ACME webroot
command: |
incus exec {{ haproxy_container }} -- bash -c "
cat > /etc/nginx/nginx.conf << 'EOF'
events { worker_connections 1024; }
http {
server {
listen {{ acme_webroot_port }};
location /.well-known/acme-challenge/ {
root {{ acme_webroot_dir }};
}
}
}
EOF
"
register: nginx_config_result
failed_when: false
- name: Create HAProxy configuration
command: |
incus exec {{ haproxy_container }} -- bash -c "
cat > /etc/haproxy/haproxy.cfg << 'EOF'
global
log stdout local0
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
daemon
maxconn 20000
ssl-default-bind-options no-sslv3 no-tls-tickets
ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
defaults
log global
mode http
option httplog
option dontlognull
option forwardfor
timeout connect 5s
timeout client 50s
timeout server 50s
timeout http-request 5s
errorfile 400 /etc/haproxy/errors/400.http
errorfile 403 /etc/haproxy/errors/403.http
errorfile 408 /etc/haproxy/errors/408.http
errorfile 500 /etc/haproxy/errors/500.http
errorfile 502 /etc/haproxy/errors/502.http
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
frontend http_frontend
bind *:80
acl acme_challenge path_beg /.well-known/acme-challenge/
use_backend acme_backend if acme_challenge
redirect scheme https code 301 if !acme_challenge
frontend https_frontend
bind *:443 ssl crt {{ haproxy_certs_dir }}/{{ domain }}.pem alpn h2,http/1.1
http-response set-header Strict-Transport-Security \"max-age=31536000; includeSubDomains; preload\"
http-response set-header X-Content-Type-Options nosniff
http-response set-header X-Frame-Options DENY
http-response set-header Referrer-Policy no-referrer
http-response set-header Content-Security-Policy \"default-src 'self'; connect-src 'self' wss://{{ domain }} https://{{ domain }}; img-src 'self' data:; script-src 'self'; style-src 'self' 'unsafe-inline'\"
acl is_api path_beg /api
acl is_ws path_beg /ws
acl is_stream path_beg /stream
use_backend api_backend if is_api
use_backend ws_backend if is_ws
use_backend stream_backend if is_stream
default_backend web_backend
backend acme_backend
server acme_server 127.0.0.1:{{ acme_webroot_port }}
backend api_backend
server api_server veza-backend:{{ veza_backend_port }} check
backend ws_backend
mode tcp
server ws_server veza-chat:{{ veza_chat_port }} check
backend stream_backend
server stream_server veza-stream:{{ veza_stream_port }} check
backend web_backend
server web_server veza-web:{{ veza_web_port }} check
EOF
"
register: haproxy_config_result
failed_when: false
- name: Create self-signed certificate for initial setup
command: |
incus exec {{ haproxy_container }} -- bash -c "
openssl req -x509 -newkey rsa:2048 -keyout {{ haproxy_certs_dir }}/{{ domain }}.key -out {{ haproxy_certs_dir }}/{{ domain }}.crt -days 365 -nodes -subj '/CN={{ domain }}' && \
cat {{ haproxy_certs_dir }}/{{ domain }}.crt {{ haproxy_certs_dir }}/{{ domain }}.key > {{ haproxy_certs_dir }}/{{ domain }}.pem
"
register: selfsigned_cert_result
failed_when: false
- name: Start HAProxy service
command: |
incus exec {{ haproxy_container }} -- systemctl enable haproxy && \
incus exec {{ haproxy_container }} -- systemctl start haproxy
register: haproxy_start_result
failed_when: false
- name: Display HAProxy start result
debug:
var: haproxy_start_result.stdout_lines
when: haproxy_start_result.stdout_lines is defined
- name: Check HAProxy status
command: |
incus exec {{ haproxy_container }} -- systemctl status haproxy
register: haproxy_status_result
failed_when: false
- name: Display HAProxy status
debug:
var: haproxy_status_result.stdout_lines
when: haproxy_status_result.stdout_lines is defined
- name: Create ACME renewal cron job
command: |
incus exec {{ haproxy_container }} -- bash -c "
echo '0 2 * * * /opt/dehydrated/dehydrated -c' | crontab -
"
register: cron_setup_result
failed_when: false
- name: Display cron setup result
debug:
var: cron_setup_result.stdout_lines
when: cron_setup_result.stdout_lines is defined
post_tasks:
- name: Test HAProxy configuration
command: |
incus exec {{ haproxy_container }} -- haproxy -c -f /etc/haproxy/haproxy.cfg
register: haproxy_test_result
failed_when: false
- name: Display HAProxy test result
debug:
var: haproxy_test_result.stdout_lines
when: haproxy_test_result.stdout_lines is defined
- name: Show final HAProxy status
command: |
incus exec {{ haproxy_container }} -- netstat -tlnp | grep haproxy
register: final_haproxy_status
failed_when: false
- name: Display final HAProxy status
debug:
var: final_haproxy_status.stdout_lines
when: final_haproxy_status.stdout_lines is defined

View file

@ -0,0 +1,131 @@
---
- name: Configurer HAProxy avec Let's Encrypt (version fixée)
hosts: edge
become: true
gather_facts: true
vars:
domain: "{{ domain | default('veza.talas.fr') }}"
acme_email: "{{ acme_email | default('ops@talas.fr') }}"
haproxy_container: "veza-haproxy"
tasks:
- name: Installer les packages de base dans HAProxy
command: |
incus exec {{ haproxy_container }} -- apt update
incus exec {{ haproxy_container }} -- apt install -y haproxy certbot nginx-light curl
register: install_result
failed_when: false
- name: Créer les répertoires nécessaires
command: |
incus exec {{ haproxy_container }} -- mkdir -p /etc/haproxy/certs /var/www/acme
- name: Créer la configuration HAProxy directement dans le conteneur
command: |
incus exec {{ haproxy_container }} -- bash -c 'cat > /etc/haproxy/haproxy.cfg << EOF
global
daemon
maxconn 2000
log stdout local0
tune.ssl.default-dh-param 2048
defaults
mode http
log global
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
frontend http_front
bind *:80
acl letsencrypt path_beg /.well-known/acme-challenge/
use_backend letsencrypt if letsencrypt
redirect scheme https code 301 if !letsencrypt
backend letsencrypt
server certbot 127.0.0.1:8888
frontend https_front
bind *:443 ssl crt /etc/haproxy/certs/{{ domain }}.pem alpn h2,http/1.1
http-response set-header Strict-Transport-Security "max-age=31536000; includeSubDomains"
acl is_api path_beg /api
acl is_ws path_beg /ws
acl is_stream path_beg /stream
use_backend be_api if is_api
use_backend be_ws if is_ws
use_backend be_stream if is_stream
default_backend be_web
backend be_api
balance roundrobin
server api1 10.20.0.101:8080 check
backend be_ws
balance roundrobin
server ws1 10.20.0.102:8081 check
backend be_stream
balance roundrobin
server stream1 10.20.0.103:8082 check
backend be_web
balance roundrobin
server web1 10.20.0.104:3000 check
EOF'
- name: Créer certificat auto-signé temporaire
command: |
incus exec {{ haproxy_container }} -- openssl req -x509 -newkey rsa:2048 \
-keyout /etc/haproxy/certs/{{ domain }}.pem \
-out /etc/haproxy/certs/{{ domain }}.pem \
-days 365 -nodes -subj "/CN={{ domain }}"
- name: Démarrer HAProxy
command: |
incus exec {{ haproxy_container }} -- systemctl enable haproxy
incus exec {{ haproxy_container }} -- systemctl restart haproxy
- name: Configurer nginx pour ACME
command: |
incus exec {{ haproxy_container }} -- bash -c 'cat > /etc/nginx/sites-available/acme << EOF
server {
listen 127.0.0.1:8888;
root /var/www/acme;
location /.well-known/acme-challenge/ {
try_files \$uri =404;
}
}
EOF'
- name: Activer le site nginx
command: |
incus exec {{ haproxy_container }} -- ln -sf /etc/nginx/sites-available/acme /etc/nginx/sites-enabled/
incus exec {{ haproxy_container }} -- rm -f /etc/nginx/sites-enabled/default
incus exec {{ haproxy_container }} -- systemctl restart nginx
- name: Obtenir certificat Let's Encrypt
command: |
incus exec {{ haproxy_container }} -- certbot certonly \
--webroot -w /var/www/acme \
-d {{ domain }} \
--email {{ acme_email }} \
--agree-tos --non-interactive
register: certbot_result
failed_when: false
- name: Créer le PEM pour HAProxy
command: |
incus exec {{ haproxy_container }} -- bash -c \
'cat /etc/letsencrypt/live/{{ domain }}/fullchain.pem \
/etc/letsencrypt/live/{{ domain }}/privkey.pem \
> /etc/haproxy/certs/{{ domain }}.pem'
when: certbot_result.rc == 0
- name: Recharger HAProxy
command: |
incus exec {{ haproxy_container }} -- systemctl reload haproxy

View file

@ -0,0 +1,298 @@
---
# Deploy Veza V5 Ultra applications in containers (simplified version)
# Builds and runs backend, chat, stream, and web services
- name: Deploy Veza V5 Ultra applications
hosts: edge
become: true
gather_facts: true
vars:
domain: "{{ domain | default('veza.talas.fr') }}"
backend_container: "veza-backend"
chat_container: "veza-chat"
stream_container: "veza-stream"
web_container: "veza-web"
tasks:
- name: Deploy Go Backend API
block:
- name: Install Go in backend container
command: |
incus exec {{ backend_container }} -- apt update
incus exec {{ backend_container }} -- apt install -y wget git
incus exec {{ backend_container }} -- wget https://go.dev/dl/go1.21.5.linux-amd64.tar.gz
incus exec {{ backend_container }} -- tar -C /usr/local -xzf go1.21.5.linux-amd64.tar.gz
incus exec {{ backend_container }} -- echo 'export PATH=$PATH:/usr/local/go/bin' >> /root/.bashrc
register: go_install_result
failed_when: false
- name: Create backend application directory
command: |
incus exec {{ backend_container }} -- mkdir -p /opt/veza-backend
register: backend_dir_result
failed_when: false
- name: Create simple backend server
copy:
content: |
package main
import (
"fmt"
"log"
"net/http"
"os"
)
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
http.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{"status":"ok","service":"veza-backend"}`)
})
http.HandleFunc("/api/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{"message":"Veza V5 Ultra Backend API","version":"1.0.0"}`)
})
log.Printf("Backend API server starting on port %s", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
dest: /tmp/main.go
delegate_to: localhost
- name: Copy backend code to container
command: |
incus file push /tmp/main.go {{ backend_container }}/opt/veza-backend/main.go
register: backend_code_result
failed_when: false
- name: Build backend application
command: |
incus exec {{ backend_container }} -- bash -c "cd /opt/veza-backend && /usr/local/go/bin/go mod init veza-backend && /usr/local/go/bin/go build -ldflags '-s -w' -o veza-backend main.go"
register: backend_build_result
failed_when: false
- name: Create backend systemd service
copy:
content: |
[Unit]
Description=Veza V5 Ultra Backend API
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/veza-backend
ExecStart=/opt/veza-backend/veza-backend
Restart=always
RestartSec=5
Environment=PORT=8080
Environment=DATABASE_URL=postgresql://veza:password@localhost:5432/veza_db
Environment=REDIS_URL=redis://localhost:6379
Environment=JWT_SECRET=super-secret-jwt-key
Environment=JWT_REFRESH_SECRET=super-secret-refresh-key
[Install]
WantedBy=multi-user.target
dest: /tmp/veza-backend.service
delegate_to: localhost
- name: Copy systemd service to container
command: |
incus file push /tmp/veza-backend.service {{ backend_container }}/etc/systemd/system/veza-backend.service
register: backend_service_result
failed_when: false
- name: Start backend service
command: |
incus exec {{ backend_container }} -- systemctl daemon-reload
incus exec {{ backend_container }} -- systemctl enable veza-backend
incus exec {{ backend_container }} -- systemctl start veza-backend
register: backend_start_result
failed_when: false
- name: Check backend service status
command: |
incus exec {{ backend_container }} -- systemctl status veza-backend
register: backend_status
failed_when: false
- name: Display backend status
debug:
var: backend_status.stdout_lines
rescue:
- name: Backend deployment failed
debug:
msg: "Backend deployment failed, continuing with other services"
- name: Deploy simple web application
block:
- name: Install Node.js in web container
command: |
incus exec {{ web_container }} -- apt update
incus exec {{ web_container }} -- apt install -y curl nginx
incus exec {{ web_container }} -- curl -fsSL https://deb.nodesource.com/setup_18.x | bash -
incus exec {{ web_container }} -- apt install -y nodejs
register: node_install_result
failed_when: false
- name: Create web application directory
command: |
incus exec {{ web_container }} -- mkdir -p /var/www/veza
register: web_dir_result
failed_when: false
- name: Create simple web page
copy:
content: |
<!DOCTYPE html>
<html>
<head>
<title>Veza V5 Ultra</title>
<style>
body { font-family: Arial, sans-serif; margin: 40px; }
.container { max-width: 800px; margin: 0 auto; }
.header { background: #2c3e50; color: white; padding: 20px; border-radius: 5px; }
.content { padding: 20px; }
.status { background: #27ae60; color: white; padding: 10px; border-radius: 3px; margin: 10px 0; }
</style>
</head>
<body>
<div class="container">
<div class="header">
<h1>🎵 Veza V5 Ultra</h1>
<p>Collaborative Audio Streaming Platform</p>
</div>
<div class="content">
<div class="status">✅ System Online</div>
<h2>Services Status</h2>
<ul>
<li>Backend API: <span id="api-status">Checking...</span></li>
<li>Chat WebSocket: <span id="chat-status">Checking...</span></li>
<li>Stream HLS: <span id="stream-status">Checking...</span></li>
</ul>
<h2>Features</h2>
<ul>
<li>Real-time collaborative audio streaming</li>
<li>WebSocket chat integration</li>
<li>HLS video streaming</li>
<li>Modern React frontend</li>
</ul>
</div>
</div>
<script>
// Simple health checks
fetch('/api/health').then(r => r.json()).then(d => {
document.getElementById('api-status').textContent = '✅ Online';
}).catch(() => {
document.getElementById('api-status').textContent = '❌ Offline';
});
</script>
</body>
</html>
dest: /tmp/index.html
delegate_to: localhost
- name: Copy web page to container
command: |
incus file push /tmp/index.html {{ web_container }}/var/www/veza/index.html
register: web_page_result
failed_when: false
- name: Configure nginx
copy:
content: |
server {
listen 3000;
server_name _;
root /var/www/veza;
index index.html;
location / {
try_files $uri $uri/ =404;
}
location /api/ {
proxy_pass http://10.10.0.101:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
location /ws {
proxy_pass http://10.10.0.102:8081;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
}
location /stream/ {
proxy_pass http://10.10.0.103:8082;
proxy_set_header Host $host;
}
}
dest: /tmp/veza-nginx.conf
delegate_to: localhost
- name: Copy nginx config to container
command: |
incus file push /tmp/veza-nginx.conf {{ web_container }}/etc/nginx/sites-available/veza
register: nginx_config_result
failed_when: false
- name: Enable nginx site
command: |
incus exec {{ web_container }} -- ln -sf /etc/nginx/sites-available/veza /etc/nginx/sites-enabled/
incus exec {{ web_container }} -- rm -f /etc/nginx/sites-enabled/default
incus exec {{ web_container }} -- systemctl restart nginx
register: nginx_enable_result
failed_when: false
- name: Check web service status
command: |
incus exec {{ web_container }} -- systemctl status nginx
register: web_status
failed_when: false
- name: Display web status
debug:
var: web_status.stdout_lines
rescue:
- name: Web deployment failed
debug:
msg: "Web deployment failed"
post_tasks:
- name: Clean up temporary files
file:
path: "{{ item }}"
state: absent
loop:
- /tmp/main.go
- /tmp/veza-backend.service
- /tmp/index.html
- /tmp/veza-nginx.conf
delegate_to: localhost
failed_when: false
- name: Show all running services
command: |
incus exec {{ backend_container }} -- systemctl list-units --type=service --state=running | grep veza || true
incus exec {{ web_container }} -- systemctl list-units --type=service --state=running | grep nginx || true
register: all_services
failed_when: false
- name: Display all services
debug:
var: all_services.stdout_lines

View file

@ -0,0 +1,599 @@
---
# Deploy Veza V5 Ultra applications in containers
# Builds and runs backend, chat, stream, and web services
- name: Deploy Veza V5 Ultra applications
hosts: edge
become: true
gather_facts: true
vars:
domain: "{{ domain | default('veza.talas.fr') }}"
backend_container: "veza-backend"
chat_container: "veza-chat"
stream_container: "veza-stream"
web_container: "veza-web"
tasks:
- name: Deploy Go Backend API
block:
- name: Install Go in backend container
command: |
incus exec {{ backend_container }} -- apt update
incus exec {{ backend_container }} -- apt install -y wget git
incus exec {{ backend_container }} -- wget https://go.dev/dl/go1.21.5.linux-amd64.tar.gz
incus exec {{ backend_container }} -- tar -C /usr/local -xzf go1.21.5.linux-amd64.tar.gz
incus exec {{ backend_container }} -- echo 'export PATH=$PATH:/usr/local/go/bin' >> /root/.bashrc
register: go_install_result
failed_when: false
- name: Display Go installation result
debug:
var: go_install_result.stdout_lines
- name: Create backend application directory
command: |
incus exec {{ backend_container }} -- mkdir -p /opt/veza-backend
register: backend_dir_result
failed_when: false
- name: Copy backend source code (placeholder)
command: |
incus exec {{ backend_container }} -- bash -c 'cat > /opt/veza-backend/main.go << "EOF"
package main
import (
"fmt"
"log"
"net/http"
"os"
)
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
http.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{"status":"ok","service":"veza-backend"}`)
})
http.HandleFunc("/api/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{"message":"Veza V5 Ultra Backend API","version":"1.0.0"}`)
})
log.Printf("Backend API server starting on port %s", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
EOF'
register: backend_code_result
failed_when: false
- name: Build backend application
command: |
incus exec {{ backend_container }} -- bash -c "cd /opt/veza-backend && /usr/local/go/bin/go mod init veza-backend && /usr/local/go/bin/go build -ldflags '-s -w' -o veza-backend main.go"
register: backend_build_result
failed_when: false
- name: Create backend systemd service
command: |
incus exec {{ backend_container }} -- bash -c 'cat > /etc/systemd/system/veza-backend.service << "EOF"
[Unit]
Description=Veza V5 Ultra Backend API
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/veza-backend
ExecStart=/opt/veza-backend/veza-backend
Restart=always
RestartSec=5
Environment=PORT=8080
Environment=DATABASE_URL=postgresql://veza:password@localhost:5432/veza_db
Environment=REDIS_URL=redis://localhost:6379
Environment=JWT_SECRET=super-secret-jwt-key
Environment=JWT_REFRESH_SECRET=super-secret-refresh-key
[Install]
WantedBy=multi-user.target
EOF'
register: backend_service_result
failed_when: false
- name: Start backend service
command: |
incus exec {{ backend_container }} -- systemctl daemon-reload
incus exec {{ backend_container }} -- systemctl enable veza-backend
incus exec {{ backend_container }} -- systemctl start veza-backend
register: backend_start_result
failed_when: false
- name: Check backend service status
command: |
incus exec {{ backend_container }} -- systemctl status veza-backend
register: backend_status
failed_when: false
- name: Display backend status
debug:
var: backend_status.stdout_lines
rescue:
- name: Backend deployment failed
debug:
msg: "Backend deployment failed, continuing with other services"
- name: Deploy Rust Chat Server
block:
- name: Install Rust in chat container
command: |
incus exec {{ chat_container }} -- apt update
incus exec {{ chat_container }} -- apt install -y curl git
incus exec {{ chat_container }} -- curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
incus exec {{ chat_container }} -- bash -c "source /root/.cargo/env && cargo --version"
register: rust_install_result
failed_when: false
- name: Display Rust installation result
debug:
var: rust_install_result.stdout_lines
- name: Create chat application directory
command: |
incus exec {{ chat_container }} -- mkdir -p /opt/veza-chat
register: chat_dir_result
failed_when: false
- name: Copy chat source code (placeholder)
command: |
incus exec {{ chat_container }} -- bash -c 'cat > /opt/veza-chat/Cargo.toml << "EOF"
[package]
name = "veza-chat"
version = "0.1.0"
edition = "2021"
[dependencies]
tokio = { version = "1.0", features = ["full"] }
axum = "0.7"
tower = "0.4"
tower-http = { version = "0.5", features = ["cors"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
uuid = { version = "1.0", features = ["v4"] }
tracing = "0.1"
tracing-subscriber = "0.3"
EOF'
register: chat_cargo_result
failed_when: false
- name: Create chat main.rs
command: |
incus exec {{ chat_container }} -- tee /opt/veza-chat/src/main.rs << 'EOF'
use axum::{
extract::ws::{Message, WebSocket, WebSocketUpgrade},
response::Response,
routing::get,
Router,
};
use std::net::SocketAddr;
use tokio::net::TcpListener;
use tracing::{info, warn};
#[tokio::main]
async fn main() {
tracing_subscriber::init();
let app = Router::new()
.route("/ws", get(websocket_handler))
.route("/health", get(health_handler));
let addr = SocketAddr::from(([0, 0, 0, 0], 8081));
info!("Chat server starting on {}", addr);
let listener = TcpListener::bind(addr).await.unwrap();
axum::serve(listener, app).await.unwrap();
}
async fn websocket_handler(ws: WebSocketUpgrade) -> Response {
ws.on_upgrade(handle_websocket)
}
async fn handle_websocket(socket: WebSocket) {
info!("New WebSocket connection");
// Simple echo server for now
let (mut sender, mut receiver) = socket.split();
while let Some(msg) = receiver.recv().await {
match msg {
Ok(Message::Text(text)) => {
info!("Received: {}", text);
if sender.send(Message::Text(format!("Echo: {}", text))).await.is_err() {
break;
}
}
Ok(Message::Close(_)) => break,
Err(e) => {
warn!("WebSocket error: {}", e);
break;
}
_ => {}
}
}
info!("WebSocket connection closed");
}
async fn health_handler() -> &'static str {
"OK"
}
EOF
register: chat_main_result
failed_when: false
- name: Build chat application
command: |
incus exec {{ chat_container }} -- bash -c "cd /opt/veza-chat && source /root/.cargo/env && cargo build --release"
register: chat_build_result
failed_when: false
- name: Create chat systemd service
command: |
incus exec {{ chat_container }} -- tee /etc/systemd/system/veza-chat.service << 'EOF'
[Unit]
Description=Veza V5 Ultra Chat Server
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/veza-chat
ExecStart=/opt/veza-chat/target/release/veza-chat
Restart=always
RestartSec=5
Environment=SQLX_OFFLINE=true
[Install]
WantedBy=multi-user.target
EOF
register: chat_service_result
failed_when: false
- name: Start chat service
command: |
incus exec {{ chat_container }} -- systemctl daemon-reload
incus exec {{ chat_container }} -- systemctl enable veza-chat
incus exec {{ chat_container }} -- systemctl start veza-chat
register: chat_start_result
failed_when: false
- name: Check chat service status
command: |
incus exec {{ chat_container }} -- systemctl status veza-chat
register: chat_status
failed_when: false
- name: Display chat status
debug:
var: chat_status.stdout_lines
rescue:
- name: Chat deployment failed
debug:
msg: "Chat deployment failed, continuing with other services"
- name: Deploy Rust Stream Server
block:
- name: Install Rust in stream container
command: |
incus exec {{ stream_container }} -- apt update
incus exec {{ stream_container }} -- apt install -y curl git
incus exec {{ stream_container }} -- curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
register: stream_rust_install_result
failed_when: false
- name: Create stream application directory
command: |
incus exec {{ stream_container }} -- mkdir -p /opt/veza-stream
register: stream_dir_result
failed_when: false
- name: Copy stream source code (placeholder)
command: |
incus exec {{ stream_container }} -- tee /opt/veza-stream/Cargo.toml << 'EOF'
[package]
name = "veza-stream"
version = "0.1.0"
edition = "2021"
[dependencies]
tokio = { version = "1.0", features = ["full"] }
axum = "0.7"
tower = "0.4"
tower-http = { version = "0.5", features = ["cors", "fs"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tracing = "0.1"
tracing-subscriber = "0.3"
EOF
register: stream_cargo_result
failed_when: false
- name: Create stream main.rs
command: |
incus exec {{ stream_container }} -- tee /opt/veza-stream/src/main.rs << 'EOF'
use axum::{
extract::Path,
http::StatusCode,
response::Response,
routing::get,
Router,
};
use std::net::SocketAddr;
use tokio::net::TcpListener;
use tracing::{info, warn};
#[tokio::main]
async fn main() {
tracing_subscriber::init();
let app = Router::new()
.route("/stream/health", get(health_handler))
.route("/stream/:file", get(stream_handler));
let addr = SocketAddr::from(([0, 0, 0, 0], 8082));
info!("Stream server starting on {}", addr);
let listener = TcpListener::bind(addr).await.unwrap();
axum::serve(listener, app).await.unwrap();
}
async fn health_handler() -> &'static str {
"OK"
}
async fn stream_handler(Path(file): Path<String>) -> Result<Response, StatusCode> {
info!("Stream request for: {}", file);
// Simple file serving for now
if file.ends_with(".m3u8") {
Ok(Response::builder()
.status(200)
.header("Content-Type", "application/vnd.apple.mpegurl")
.body(format!("#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X-TARGETDURATION:10\n#EXTINF:10.0,\n{}.ts\n#EXT-X-ENDLIST\n", file.replace(".m3u8", "")))
.unwrap())
} else {
Err(StatusCode::NOT_FOUND)
}
}
EOF
register: stream_main_result
failed_when: false
- name: Build stream application
command: |
incus exec {{ stream_container }} -- bash -c "cd /opt/veza-stream && source /root/.cargo/env && cargo build --release"
register: stream_build_result
failed_when: false
- name: Create stream systemd service
command: |
incus exec {{ stream_container }} -- tee /etc/systemd/system/veza-stream.service << 'EOF'
[Unit]
Description=Veza V5 Ultra Stream Server
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/veza-stream
ExecStart=/opt/veza-stream/target/release/veza-stream
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
register: stream_service_result
failed_when: false
- name: Start stream service
command: |
incus exec {{ stream_container }} -- systemctl daemon-reload
incus exec {{ stream_container }} -- systemctl enable veza-stream
incus exec {{ stream_container }} -- systemctl start veza-stream
register: stream_start_result
failed_when: false
- name: Check stream service status
command: |
incus exec {{ stream_container }} -- systemctl status veza-stream
register: stream_status
failed_when: false
- name: Display stream status
debug:
var: stream_status.stdout_lines
rescue:
- name: Stream deployment failed
debug:
msg: "Stream deployment failed, continuing with web service"
- name: Deploy React Web Application
block:
- name: Install Node.js in web container
command: |
incus exec {{ web_container }} -- apt update
incus exec {{ web_container }} -- apt install -y curl
incus exec {{ web_container }} -- curl -fsSL https://deb.nodesource.com/setup_18.x | bash -
incus exec {{ web_container }} -- apt install -y nodejs nginx
register: node_install_result
failed_when: false
- name: Display Node.js installation result
debug:
var: node_install_result.stdout_lines
- name: Create web application directory
command: |
incus exec {{ web_container }} -- mkdir -p /opt/veza-web
register: web_dir_result
failed_when: false
- name: Create simple React app (placeholder)
command: |
incus exec {{ web_container }} -- tee /opt/veza-web/package.json << 'EOF'
{
"name": "veza-web",
"version": "1.0.0",
"description": "Veza V5 Ultra Web Application",
"main": "index.js",
"scripts": {
"start": "node server.js",
"build": "echo 'Build completed'"
},
"dependencies": {
"express": "^4.18.2"
}
}
EOF
register: web_package_result
failed_when: false
- name: Create simple web server
command: |
incus exec {{ web_container }} -- tee /opt/veza-web/server.js << 'EOF'
const express = require('express');
const app = express();
const port = process.env.PORT || 3000;
app.use(express.static('public'));
app.get('/', (req, res) => {
res.send(`
<!DOCTYPE html>
<html>
<head>
<title>Veza V5 Ultra</title>
<style>
body { font-family: Arial, sans-serif; margin: 40px; }
.container { max-width: 800px; margin: 0 auto; }
.header { background: #2c3e50; color: white; padding: 20px; border-radius: 5px; }
.content { padding: 20px; }
.status { background: #27ae60; color: white; padding: 10px; border-radius: 3px; margin: 10px 0; }
</style>
</head>
<body>
<div class="container">
<div class="header">
<h1>🎵 Veza V5 Ultra</h1>
<p>Collaborative Audio Streaming Platform</p>
</div>
<div class="content">
<div class="status">✅ System Online</div>
<h2>Services Status</h2>
<ul>
<li>Backend API: <span id="api-status">Checking...</span></li>
<li>Chat WebSocket: <span id="chat-status">Checking...</span></li>
<li>Stream HLS: <span id="stream-status">Checking...</span></li>
</ul>
<h2>Features</h2>
<ul>
<li>Real-time collaborative audio streaming</li>
<li>WebSocket chat integration</li>
<li>HLS video streaming</li>
<li>Modern React frontend</li>
</ul>
</div>
</div>
<script>
// Simple health checks
fetch('/api/health').then(r => r.json()).then(d => {
document.getElementById('api-status').textContent = '✅ Online';
}).catch(() => {
document.getElementById('api-status').textContent = '❌ Offline';
});
</script>
</body>
</html>
`);
});
app.listen(port, '0.0.0.0', () => {
console.log(`Veza V5 Ultra web server running on port ${port}`);
});
EOF
register: web_server_result
failed_when: false
- name: Install web dependencies
command: |
incus exec {{ web_container }} -- bash -c "cd /opt/veza-web && npm install"
register: web_install_result
failed_when: false
- name: Create web systemd service
command: |
incus exec {{ web_container }} -- tee /etc/systemd/system/veza-web.service << 'EOF'
[Unit]
Description=Veza V5 Ultra Web Application
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/veza-web
ExecStart=/usr/bin/node server.js
Restart=always
RestartSec=5
Environment=PORT=3000
[Install]
WantedBy=multi-user.target
EOF
register: web_service_result
failed_when: false
- name: Start web service
command: |
incus exec {{ web_container }} -- systemctl daemon-reload
incus exec {{ web_container }} -- systemctl enable veza-web
incus exec {{ web_container }} -- systemctl start veza-web
register: web_start_result
failed_when: false
- name: Check web service status
command: |
incus exec {{ web_container }} -- systemctl status veza-web
register: web_status
failed_when: false
- name: Display web status
debug:
var: web_status.stdout_lines
rescue:
- name: Web deployment failed
debug:
msg: "Web deployment failed"
post_tasks:
- name: Show all running services
command: |
incus exec {{ backend_container }} -- systemctl list-units --type=service --state=running | grep veza || true
incus exec {{ chat_container }} -- systemctl list-units --type=service --state=running | grep veza || true
incus exec {{ stream_container }} -- systemctl list-units --type=service --state=running | grep veza || true
incus exec {{ web_container }} -- systemctl list-units --type=service --state=running | grep veza || true
register: all_services
failed_when: false
- name: Display all services
debug:
var: all_services.stdout_lines

View file

@ -0,0 +1,88 @@
---
- name: Déployer Backend Go
hosts: edge
become: true
tasks:
- name: Installer Go et dépendances
command: |
incus exec veza-backend -- bash -c 'apt update && apt install -y wget git build-essential'
- name: Télécharger et installer Go
command: |
incus exec veza-backend -- bash -c '
cd /tmp
wget https://go.dev/dl/go1.21.5.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.21.5.linux-amd64.tar.gz
echo "export PATH=\$PATH:/usr/local/go/bin" >> /root/.bashrc
'
- name: Créer l'application Backend
command: |
incus exec veza-backend -- bash -c 'cat > /opt/backend.go << EOF
package main
import (
"encoding/json"
"log"
"net/http"
"os"
)
func main() {
port := os.Getenv("PORT")
if port == "" { port = "8080" }
http.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]string{
"status": "ok",
"service": "veza-backend",
"version": "1.0.0",
})
})
http.HandleFunc("/api/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]string{
"message": "Veza V5 Ultra Backend API",
"version": "1.0.0",
"endpoint": r.URL.Path,
})
})
log.Printf("Backend starting on :%s", port)
http.ListenAndServe(":"+port, nil)
}
EOF'
- name: Compiler le backend
command: |
incus exec veza-backend -- bash -c '
cd /opt
/usr/local/go/bin/go mod init veza-backend
/usr/local/go/bin/go build -o veza-backend backend.go
'
- name: Créer le service systemd
command: |
incus exec veza-backend -- bash -c 'cat > /etc/systemd/system/veza-backend.service << EOF
[Unit]
Description=Veza Backend API
After=network.target
[Service]
Type=simple
ExecStart=/opt/veza-backend
Restart=always
Environment=PORT=8080
[Install]
WantedBy=multi-user.target
EOF'
- name: Démarrer le service
command: |
incus exec veza-backend -- systemctl daemon-reload
incus exec veza-backend -- systemctl enable veza-backend
incus exec veza-backend -- systemctl start veza-backend

View file

@ -0,0 +1,169 @@
---
- name: Déployer Frontend Web
hosts: edge
become: true
tasks:
- name: Installer Node.js et nginx
command: |
incus exec veza-web -- bash -c 'apt update && apt install -y curl nginx'
- name: Installer Node.js 18
command: |
incus exec veza-web -- bash -c '
curl -fsSL https://deb.nodesource.com/setup_18.x | bash -
apt install -y nodejs
'
- name: Créer l'application web
command: |
incus exec veza-web -- bash -c 'cat > /var/www/html/index.html << EOF
<!DOCTYPE html>
<html lang="fr">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Veza V5 Ultra</title>
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
min-height: 100vh;
display: flex;
align-items: center;
justify-content: center;
}
.container {
background: white;
border-radius: 20px;
padding: 40px;
box-shadow: 0 20px 60px rgba(0,0,0,0.3);
max-width: 600px;
width: 90%;
}
h1 {
color: #667eea;
font-size: 2.5rem;
margin-bottom: 10px;
}
.subtitle {
color: #666;
font-size: 1.1rem;
margin-bottom: 30px;
}
.status {
background: #10b981;
color: white;
padding: 15px;
border-radius: 10px;
margin-bottom: 30px;
font-weight: 600;
}
.services {
display: grid;
gap: 15px;
}
.service {
background: #f3f4f6;
padding: 15px;
border-radius: 10px;
display: flex;
justify-content: space-between;
align-items: center;
}
.service-name {
font-weight: 600;
color: #374151;
}
.service-status {
padding: 5px 15px;
border-radius: 20px;
font-size: 0.85rem;
font-weight: 600;
}
.online { background: #d1fae5; color: #065f46; }
.checking { background: #fef3c7; color: #92400e; }
.offline { background: #fee2e2; color: #991b1b; }
</style>
</head>
<body>
<div class="container">
<h1>🎵 Veza V5 Ultra</h1>
<div class="subtitle">Plateforme Audio Collaborative</div>
<div class="status">✅ Système en Ligne</div>
<div class="services">
<div class="service">
<span class="service-name">Backend API</span>
<span id="api-status" class="service-status checking">Vérification...</span>
</div>
<div class="service">
<span class="service-name">Chat WebSocket</span>
<span id="ws-status" class="service-status checking">Vérification...</span>
</div>
<div class="service">
<span class="service-name">Stream HLS</span>
<span id="stream-status" class="service-status checking">Vérification...</span>
</div>
</div>
</div>
<script>
// Test Backend API
fetch("/api/health")
.then(r => r.json())
.then(d => {
const el = document.getElementById("api-status");
el.textContent = "✅ En Ligne";
el.className = "service-status online";
})
.catch(() => {
const el = document.getElementById("api-status");
el.textContent = "❌ Hors Ligne";
el.className = "service-status offline";
});
// Test WebSocket (simulation)
setTimeout(() => {
const el = document.getElementById("ws-status");
el.textContent = "⚠️ En Maintenance";
el.className = "service-status checking";
}, 2000);
// Test Stream (simulation)
setTimeout(() => {
const el = document.getElementById("stream-status");
el.textContent = "⚠️ En Maintenance";
el.className = "service-status checking";
}, 3000);
</script>
</body>
</html>
EOF'
- name: Configurer nginx
command: |
incus exec veza-web -- bash -c 'cat > /etc/nginx/sites-available/default << EOF
server {
listen 3000 default_server;
root /var/www/html;
index index.html;
location / {
try_files \$uri \$uri/ =404;
}
location /api/ {
proxy_pass http://10.20.0.101:8080;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
}
EOF'
- name: Redémarrer nginx
command: |
incus exec veza-web -- systemctl restart nginx

View file

@ -0,0 +1,400 @@
---
# Comprehensive smoke tests for Veza V5 Ultra deployment
# Validates all services and endpoints
- name: Run smoke tests for Veza V5 Ultra
hosts: edge
become: false
gather_facts: true
vars:
domain: "{{ domain | default('veza.talas.fr') }}"
haproxy_container: "veza-haproxy"
backend_container: "veza-backend"
chat_container: "veza-chat"
stream_container: "veza-stream"
web_container: "veza-web"
tasks:
- name: Test container connectivity
block:
- name: Check if containers are running
command: incus list --format=json
register: containers_status
failed_when: false
- name: Display container status
debug:
msg: "Container {{ item.name }}: {{ 'Running' if item.status == 'Running' else item.status }}"
loop: "{{ containers_status.stdout | from_json }}"
- name: Verify all required containers are running
assert:
that:
- containers_status.stdout | from_json | selectattr('name', 'in', [haproxy_container, backend_container, chat_container, stream_container, web_container]) | selectattr('status', 'equalto', 'Running') | list | length == 5
fail_msg: "Not all required containers are running"
success_msg: "All required containers are running"
rescue:
- name: Container connectivity test failed
debug:
msg: "Container connectivity test failed, continuing with other tests"
- name: Test HAProxy service
block:
- name: Check HAProxy service status
command: |
incus exec {{ haproxy_container }} -- systemctl is-active haproxy
register: haproxy_active
failed_when: false
- name: Display HAProxy status
debug:
msg: "HAProxy service: {{ haproxy_active.stdout }}"
- name: Test HAProxy configuration
command: |
incus exec {{ haproxy_container }} -- haproxy -c -f /etc/haproxy/haproxy.cfg
register: haproxy_config_test
failed_when: false
- name: Display HAProxy config test
debug:
var: haproxy_config_test.stdout_lines
- name: Check HAProxy statistics
command: |
incus exec {{ haproxy_container }} -- curl -s http://localhost:8404/stats | head -10
register: haproxy_stats
failed_when: false
- name: Display HAProxy statistics
debug:
var: haproxy_stats.stdout_lines
rescue:
- name: HAProxy test failed
debug:
msg: "HAProxy test failed, continuing with other tests"
- name: Test Backend API service
block:
- name: Check backend service status
command: |
incus exec {{ backend_container }} -- systemctl is-active veza-backend
register: backend_active
failed_when: false
- name: Display backend status
debug:
msg: "Backend service: {{ backend_active.stdout }}"
- name: Test backend health endpoint
command: |
incus exec {{ backend_container }} -- curl -s http://localhost:8080/api/health
register: backend_health
failed_when: false
- name: Display backend health response
debug:
var: backend_health.stdout_lines
- name: Test backend API endpoint
command: |
incus exec {{ backend_container }} -- curl -s http://localhost:8080/api/
register: backend_api
failed_when: false
- name: Display backend API response
debug:
var: backend_api.stdout_lines
- name: Verify backend responses
assert:
that:
- backend_health.stdout | from_json | selectattr('status', 'equalto', 'ok') | list | length > 0
- backend_api.stdout | from_json | selectattr('message', 'defined') | list | length > 0
fail_msg: "Backend API responses are invalid"
success_msg: "Backend API is responding correctly"
rescue:
- name: Backend test failed
debug:
msg: "Backend test failed, continuing with other tests"
- name: Test Chat WebSocket service
block:
- name: Check chat service status
command: |
incus exec {{ chat_container }} -- systemctl is-active veza-chat
register: chat_active
failed_when: false
- name: Display chat status
debug:
msg: "Chat service: {{ chat_active.stdout }}"
- name: Test chat health endpoint
command: |
incus exec {{ chat_container }} -- curl -s http://localhost:8081/health
register: chat_health
failed_when: false
- name: Display chat health response
debug:
var: chat_health.stdout_lines
- name: Test WebSocket connection (basic)
command: |
incus exec {{ chat_container }} -- timeout 5 bash -c 'echo "test message" | nc localhost 8081' || true
register: websocket_test
failed_when: false
- name: Display WebSocket test result
debug:
var: websocket_test.stdout_lines
rescue:
- name: Chat test failed
debug:
msg: "Chat test failed, continuing with other tests"
- name: Test Stream HLS service
block:
- name: Check stream service status
command: |
incus exec {{ stream_container }} -- systemctl is-active veza-stream
register: stream_active
failed_when: false
- name: Display stream status
debug:
msg: "Stream service: {{ stream_active.stdout }}"
- name: Test stream health endpoint
command: |
incus exec {{ stream_container }} -- curl -s http://localhost:8082/stream/health
register: stream_health
failed_when: false
- name: Display stream health response
debug:
var: stream_health.stdout_lines
- name: Test HLS endpoint
command: |
incus exec {{ stream_container }} -- curl -s http://localhost:8082/stream/test.m3u8
register: hls_test
failed_when: false
- name: Display HLS test response
debug:
var: hls_test.stdout_lines
- name: Verify HLS response
assert:
that:
- hls_test.stdout is search('EXTM3U')
fail_msg: "HLS endpoint is not returning valid M3U8 content"
success_msg: "HLS endpoint is working correctly"
rescue:
- name: Stream test failed
debug:
msg: "Stream test failed, continuing with other tests"
- name: Test Web application
block:
- name: Check web service status
command: |
incus exec {{ web_container }} -- systemctl is-active veza-web
register: web_active
failed_when: false
- name: Display web status
debug:
msg: "Web service: {{ web_active.stdout }}"
- name: Test web application
command: |
incus exec {{ web_container }} -- curl -s http://localhost:3000/
register: web_test
failed_when: false
- name: Display web test response
debug:
msg: "Web response length: {{ web_test.stdout | length }} characters"
- name: Verify web response
assert:
that:
- web_test.stdout is search('Veza V5 Ultra')
fail_msg: "Web application is not returning expected content"
success_msg: "Web application is working correctly"
rescue:
- name: Web test failed
debug:
msg: "Web test failed, continuing with other tests"
- name: Test external access through HAProxy
block:
- name: Test HTTP redirect
uri:
url: "http://{{ domain }}"
method: GET
follow_redirects: none
status_code: 301
register: http_redirect
failed_when: false
- name: Display HTTP redirect result
debug:
msg: "HTTP redirect: {{ 'Working' if http_redirect.status == 301 else 'Failed' }}"
- name: Test HTTPS access (if certificate available)
uri:
url: "https://{{ domain }}"
method: GET
validate_certs: false
status_code: 200
register: https_test
failed_when: false
- name: Display HTTPS test result
debug:
msg: "HTTPS access: {{ 'Working' if https_test.status == 200 else 'Failed or certificate not available' }}"
- name: Test API through HAProxy
uri:
url: "https://{{ domain }}/api/health"
method: GET
validate_certs: false
status_code: 200
register: api_proxy_test
failed_when: false
- name: Display API proxy test result
debug:
msg: "API through HAProxy: {{ 'Working' if api_proxy_test.status == 200 else 'Failed' }}"
rescue:
- name: External access test failed
debug:
msg: "External access test failed (expected if DNS not configured)"
- name: Test network connectivity between containers
block:
- name: Test backend connectivity from web container
command: |
incus exec {{ web_container }} -- curl -s http://10.10.0.101:8080/api/health
register: backend_connectivity
failed_when: false
- name: Display backend connectivity
debug:
msg: "Backend connectivity from web: {{ 'Working' if backend_connectivity.rc == 0 else 'Failed' }}"
- name: Test chat connectivity from web container
command: |
incus exec {{ web_container }} -- curl -s http://10.10.0.102:8081/health
register: chat_connectivity
failed_when: false
- name: Display chat connectivity
debug:
msg: "Chat connectivity from web: {{ 'Working' if chat_connectivity.rc == 0 else 'Failed' }}"
- name: Test stream connectivity from web container
command: |
incus exec {{ web_container }} -- curl -s http://10.10.0.103:8082/stream/health
register: stream_connectivity
failed_when: false
- name: Display stream connectivity
debug:
msg: "Stream connectivity from web: {{ 'Working' if stream_connectivity.rc == 0 else 'Failed' }}"
rescue:
- name: Network connectivity test failed
debug:
msg: "Network connectivity test failed"
- name: Performance and resource checks
block:
- name: Check container resource usage
command: |
incus exec {{ haproxy_container }} -- free -h
incus exec {{ backend_container }} -- free -h
incus exec {{ chat_container }} -- free -h
incus exec {{ stream_container }} -- free -h
incus exec {{ web_container }} -- free -h
register: resource_usage
failed_when: false
- name: Display resource usage
debug:
var: resource_usage.stdout_lines
- name: Check disk usage
command: |
incus exec {{ haproxy_container }} -- df -h
incus exec {{ backend_container }} -- df -h
incus exec {{ chat_container }} -- df -h
incus exec {{ stream_container }} -- df -h
incus exec {{ web_container }} -- df -h
register: disk_usage
failed_when: false
- name: Display disk usage
debug:
var: disk_usage.stdout_lines
rescue:
- name: Performance check failed
debug:
msg: "Performance check failed"
post_tasks:
- name: Generate smoke test summary
debug:
msg: |
========================================
Veza V5 Ultra Smoke Test Summary
========================================
Tests completed:
- Container connectivity: {{ 'PASS' if containers_status is defined and containers_status.rc == 0 else 'FAIL' }}
- HAProxy service: {{ 'PASS' if haproxy_active is defined and haproxy_active.stdout == 'active' else 'FAIL' }}
- Backend API: {{ 'PASS' if backend_health is defined and backend_health.rc == 0 else 'FAIL' }}
- Chat WebSocket: {{ 'PASS' if chat_health is defined and chat_health.rc == 0 else 'FAIL' }}
- Stream HLS: {{ 'PASS' if stream_health is defined and stream_health.rc == 0 else 'FAIL' }}
- Web application: {{ 'PASS' if web_test is defined and web_test.rc == 0 else 'FAIL' }}
- External access: {{ 'PASS' if https_test is defined and https_test.status == 200 else 'FAIL (expected if DNS not configured)' }}
Next steps:
1. Configure DNS A record for {{ domain }} to point to this host
2. Re-run HAProxy playbook to get Let's Encrypt certificate
3. Re-run smoke tests to verify HTTPS access
4. Monitor application logs for any issues
========================================
- name: Show container logs (last 10 lines each)
command: |
echo "=== HAProxy Logs ==="
incus exec {{ haproxy_container }} -- journalctl -u haproxy --no-pager -n 10 || true
echo "=== Backend Logs ==="
incus exec {{ backend_container }} -- journalctl -u veza-backend --no-pager -n 10 || true
echo "=== Chat Logs ==="
incus exec {{ chat_container }} -- journalctl -u veza-chat --no-pager -n 10 || true
echo "=== Stream Logs ==="
incus exec {{ stream_container }} -- journalctl -u veza-stream --no-pager -n 10 || true
echo "=== Web Logs ==="
incus exec {{ web_container }} -- journalctl -u veza-web --no-pager -n 10 || true
register: container_logs
failed_when: false
- name: Display container logs
debug:
var: container_logs.stdout_lines

View file

@ -0,0 +1,276 @@
---
# Smoke tests for Veza V5 Ultra deployment
# Validates all services are running and accessible
- name: Run smoke tests for Veza deployment
hosts: edge
become: true
gather_facts: true
vars:
test_timeout: 30
retry_count: 5
retry_delay: 10
tasks:
- name: Wait for all containers to be ready
wait_for:
timeout: "{{ test_timeout }}"
delegate_to: localhost
- name: Check container status
command: incus list --format json
register: container_status
failed_when: false
- name: Display container status
debug:
var: container_status.stdout
when: container_status.stdout is defined
- name: Test HAProxy container is running
command: |
incus exec veza-haproxy -- systemctl is-active haproxy
register: haproxy_status
failed_when: false
- name: Test backend container is running
command: |
incus exec veza-backend -- systemctl is-active veza-backend
register: backend_status
failed_when: false
- name: Test chat container is running
command: |
incus exec veza-chat -- systemctl is-active veza-chat
register: chat_status
failed_when: false
- name: Test stream container is running
command: |
incus exec veza-stream -- systemctl is-active veza-stream
register: stream_status
failed_when: false
- name: Test web container is running
command: |
incus exec veza-web -- systemctl is-active nginx
register: web_status
failed_when: false
- name: Display service status
debug:
msg: |
HAProxy: {{ haproxy_status.stdout }}
Backend: {{ backend_status.stdout }}
Chat: {{ chat_status.stdout }}
Stream: {{ stream_status.stdout }}
Web: {{ web_status.stdout }}
- name: Test internal connectivity between containers
command: |
incus exec veza-backend -- curl -f http://veza-web:{{ veza_web_port }}/ || echo "Web container not reachable"
register: internal_web_test
failed_when: false
- name: Test internal API connectivity
command: |
incus exec veza-web -- curl -f http://veza-backend:{{ veza_backend_port }}/health || echo "Backend API not reachable"
register: internal_api_test
failed_when: false
- name: Test internal WebSocket connectivity
command: |
incus exec veza-web -- curl -f http://veza-chat:{{ veza_chat_port }}/ || echo "Chat server not reachable"
register: internal_ws_test
failed_when: false
- name: Test internal stream connectivity
command: |
incus exec veza-web -- curl -f http://veza-stream:{{ veza_stream_port }}/ || echo "Stream server not reachable"
register: internal_stream_test
failed_when: false
- name: Display internal connectivity test results
debug:
msg: |
Internal Web: {{ internal_web_test.stdout }}
Internal API: {{ internal_api_test.stdout }}
Internal WS: {{ internal_ws_test.stdout }}
Internal Stream: {{ internal_stream_test.stdout }}
- name: Test external HTTP access (port 80)
uri:
url: "http://{{ ansible_host }}:80/"
method: GET
status_code: [200, 301, 302]
timeout: "{{ test_timeout }}"
register: http_test
delegate_to: localhost
retries: "{{ retry_count }}"
delay: "{{ retry_delay }}"
failed_when: false
- name: Test external HTTPS access (port 443)
uri:
url: "https://{{ ansible_host }}:443/"
method: GET
status_code: [200, 301, 302]
timeout: "{{ test_timeout }}"
validate_certs: false
register: https_test
delegate_to: localhost
retries: "{{ retry_count }}"
delay: "{{ retry_delay }}"
failed_when: false
- name: Test API endpoint
uri:
url: "https://{{ ansible_host }}:443/api/health"
method: GET
status_code: [200, 404, 500] # 404/500 might be expected if health endpoint not implemented
timeout: "{{ test_timeout }}"
validate_certs: false
register: api_test
delegate_to: localhost
retries: "{{ retry_count }}"
delay: "{{ retry_delay }}"
failed_when: false
- name: Test WebSocket endpoint (basic connectivity)
uri:
url: "https://{{ ansible_host }}:443/ws"
method: GET
status_code: [101, 200, 400, 404] # 101 for successful WS upgrade
timeout: "{{ test_timeout }}"
validate_certs: false
register: ws_test
delegate_to: localhost
retries: "{{ retry_count }}"
delay: "{{ retry_delay }}"
failed_when: false
- name: Test stream endpoint
uri:
url: "https://{{ ansible_host }}:443/stream/"
method: GET
status_code: [200, 404, 500] # 404/500 might be expected if no content
timeout: "{{ test_timeout }}"
validate_certs: false
register: stream_test
delegate_to: localhost
retries: "{{ retry_count }}"
delay: "{{ retry_delay }}"
failed_when: false
- name: Display external test results
debug:
msg: |
HTTP (port 80): {{ http_test.status }} - {{ http_test.msg }}
HTTPS (port 443): {{ https_test.status }} - {{ https_test.msg }}
API (/api/health): {{ api_test.status }} - {{ api_test.msg }}
WebSocket (/ws): {{ ws_test.status }} - {{ ws_test.msg }}
Stream (/stream/): {{ stream_test.status }} - {{ stream_test.msg }}
- name: Test HAProxy configuration
command: |
incus exec veza-haproxy -- haproxy -c -f /etc/haproxy/haproxy.cfg
register: haproxy_config_test
failed_when: false
- name: Display HAProxy config test result
debug:
var: haproxy_config_test.stdout_lines
when: haproxy_config_test.stdout_lines is defined
- name: Check HAProxy logs for errors
command: |
incus exec veza-haproxy -- journalctl -u haproxy --no-pager -n 20
register: haproxy_logs
failed_when: false
- name: Display HAProxy logs
debug:
var: haproxy_logs.stdout_lines
when: haproxy_logs.stdout_lines is defined
- name: Check application logs
command: |
incus exec {{ item.name }} -- journalctl -u {{ item.service }} --no-pager -n 10
register: app_logs
failed_when: false
loop:
- { name: "veza-backend", service: "veza-backend" }
- { name: "veza-chat", service: "veza-chat" }
- { name: "veza-stream", service: "veza-stream" }
- { name: "veza-web", service: "nginx" }
- name: Display application logs
debug:
var: app_logs.results
- name: Test port accessibility
wait_for:
port: "{{ item }}"
host: "{{ ansible_host }}"
timeout: 10
register: port_test
delegate_to: localhost
failed_when: false
loop:
- 80
- 443
- name: Display port test results
debug:
var: port_test.results
- name: Final deployment summary
debug:
msg: |
========================================
Veza V5 Ultra Deployment Summary
========================================
Host: {{ ansible_host }}
Domain: {{ domain }}
Container Status:
- HAProxy: {{ haproxy_status.stdout }}
- Backend: {{ backend_status.stdout }}
- Chat: {{ chat_status.stdout }}
- Stream: {{ stream_status.stdout }}
- Web: {{ web_status.stdout }}
External Access:
- HTTP: {{ http_test.status }}
- HTTPS: {{ https_test.status }}
- API: {{ api_test.status }}
- WebSocket: {{ ws_test.status }}
- Stream: {{ stream_test.status }}
Next Steps:
1. Point DNS A record for {{ domain }} to {{ ansible_host }}
2. Re-run playbook 30-haproxy-in-container.yml to get Let's Encrypt cert
3. Test full functionality with real domain
========================================
handlers:
- name: restart haproxy
command: |
incus exec veza-haproxy -- systemctl reload haproxy
- name: restart backend
command: |
incus exec veza-backend -- systemctl restart veza-backend
- name: restart chat
command: |
incus exec veza-chat -- systemctl restart veza-chat
- name: restart stream
command: |
incus exec veza-stream -- systemctl restart veza-stream
- name: restart web
command: |
incus exec veza-web -- systemctl restart nginx

View file

@ -0,0 +1,5 @@
---
# file: crontab.yml
- hosts: crontab
roles:
- crontab

View file

@ -0,0 +1,6 @@
---
# file: docker.yml
- hosts:
- docker
roles:
- docker

View file

@ -0,0 +1,6 @@
---
# file: elasticsearch.yml
- hosts: elasticsearch
roles:
- elasticsearch

View file

@ -0,0 +1,5 @@
---
# file: element-web.yml
- hosts: element-web
roles:
- element-web

View file

@ -0,0 +1,6 @@
---
# file: filebeat.yml
- hosts: all !veza-stats
roles:
- { role: filebeat, when: ansible_os_family == "Debian" and ansible_service_mgr == "systemd" and (filebeat_install is not defined or filebeat_install)}

View file

@ -0,0 +1,5 @@
---
# file: gerrit.yml
- hosts: gerrit
roles:
- gerrit

View file

@ -0,0 +1,5 @@
---
# file: git_generic_deploy_files.yml
- hosts: git_generic_deploy_files
roles:
- git_generic_deploy_files

View file

@ -0,0 +1,6 @@
---
# file: haproxy.yml
- hosts: haproxy
roles:
- haproxy

View file

@ -0,0 +1,26 @@
# Ansible managed
# log executed commands on this server for admins (UID 10000 to 10999 inside containers)
-a always,exit -F arch=b64 -S execve -F auid>=10000 -F auid<=10999 -k exec_metal_admin
# log executed commands inside containers for admins (UID 10000 to 10999 inside containers)
-a always,exit -F arch=b64 -S execve -F auid>=1010000 -F auid<=1010999 -k exec_container_admin
# log executed commands inside containers for users (UID 12000 to 12999 inside containers)
-a always,exit -F arch=b64 -S execve -F auid>=1012000 -F auid<=1012999 -k exec_container_user
# Reduce the noise
-a exclude,always -F msgtype=CRED_ACQ
-a exclude,always -F msgtype=CRED_DISP
-a exclude,always -F msgtype=CRED_REFR
-a exclude,always -F msgtype=CWD
-a exclude,always -F msgtype=PATH
-a exclude,always -F msgtype=PROCTITLE
-a exclude,always -F msgtype=SERVICE_START
-a exclude,always -F msgtype=SERVICE_STOP
-a exclude,always -F msgtype=SOCKADDR
-a exclude,always -F msgtype=USER_ACCT
-a exclude,always -F msgtype=USER_AUTH
-a exclude,always -F msgtype=USER_END
-a exclude,always -F msgtype=USER_START
-a exclude,always -F auid=4294967295

View file

@ -0,0 +1,5 @@
# file: auditd/handlers/main.yml
- name: "augenrules_load"
ansible.builtin.command:
cmd: /usr/sbin/augenrules --load

View file

@ -0,0 +1,7 @@
---
# file: roles/auditd/meta/main.yml
dependencies:
- role: zabbix_template_assignment
zabbix_template_assignment_systemd_service_list:
- auditd

View file

@ -0,0 +1,92 @@
# Auditd
This roles installs auditd and activate it with 3 differents logging tags that are described bellow:
1. exec_metal_admin
1. exec_container_admin
1. exec_container_user
## 1. Logging Commands by Admins on the Host
```bash
-a always,exit -F arch=b64 -S execve -F auid>=10000 -F auid<=10999 -k exec_metal_admin
```
- `-a always,exit`: Always log on syscall exit.
- `-F arch=b64`: Specifies the 64-bit architecture (`b64`).
- `-S execve`: Monitors the `execve` syscall, capturing all program executions.
- `-F auid>=10000 -F auid<=10999`: Filters logs for admin accounts with `auid` (Audit User ID) in the specified range, typically representing admin users on the host.
- `-k exec_metal_admin`: Tags logs with the key `exec_metal_admin` for easier log filtering.
## 2. Logging Commands by Admins in Containers
```bash
-a always,exit -F arch=b64 -S execve -F auid>=1010000 -F auid<=1010999 -k exec_container_admin
```
- Similar to the first rule but applied to container environments.
- The `auid` range (`1010000` to `1010999`) is intended for admin users within containers using ID mapping.
## 3. Logging Commands by Non-Admin Users in Containers
```bash
-a always,exit -F arch=b64 -S execve -F auid>=1012000 -F auid<=1012999 -k exec_container_user
```
- Captures commands by container user accounts with `auid` between `1012000` and `1012999`.
- Uses the key `exec_container_user` to differentiate these logs from admin activities.
---
# Noise Reduction Rules
The following rules exclude specific message types to reduce unnecessary log entries:
```bash
-a exclude,always -F msgtype=CRED_ACQ
-a exclude,always -F msgtype=CRED_DISP
-a exclude,always -F msgtype=CRED_REFR
-a exclude,always -F msgtype=CWD
-a exclude,always -F msgtype=PATH
-a exclude,always -F msgtype=PROCTITLE
-a exclude,always -F msgtype=SERVICE_START
-a exclude,always -F msgtype=SERVICE_STOP
-a exclude,always -F msgtype=SOCKADDR
-a exclude,always -F msgtype=USER_ACCT
-a exclude,always -F msgtype=USER_AUTH
-a exclude,always -F msgtype=USER_END
-a exclude,always -F msgtype=USER_START
-a exclude,always -F auid=4294967295
```
- `-a exclude,always`: Excludes specified message types from logs.
- `msgtype=CRED_ACQ`, `CRED_DISP`, `CRED_REFR`: Suppresses logs related to credential acquisition, disposal, and refresh.
- `msgtype=CWD`: Suppresses 'current working directory' logs.
- `msgtype=PATH`: Prevents detailed file path logs.
- `msgtype=PROCTITLE`: Avoids logging full commands with arguments.
- `msgtype=SERVICE_START/STOP`: Reduces noise by ignoring service start/stop events.
- `msgtype=USER_START`, `USER_ACCT`, `USER_AUTH`, `USER_END`: Filters out general user login/authentication events.
- `msgtype=SOCKADDR`: Omits network-related socket address logs.
- `-F auid=4294967295`: Excludes logs from system processes with an unset audit user ID.
---
# Compliance and Validation
- Ensures all executed commands by admins and specific container users are logged.
- Provides clear user attribution through `auid` filtering, meeting ISO 27001 requirements.
- Noise reduction rules enhance the log signal-to-noise ratio, focusing on relevant events.
# Log Shipping
Filebeat is used to send the logs to Elasticsearch for easy access via Kibana.
# Auditd useful commands
Show current audit rules:
```
auditctl -l
```
Search logs by tags:
```
ausearch -k exec_metal_admin
```
Search by uid or uidnumber:
```
ausearch -ua adm-senke
```

View file

@ -0,0 +1,14 @@
---
# file: roles/auditd/tasks/main.yml
- name: "shadow from global_shadow variables"
ansible.builtin.apt:
name: auditd
tags: auditd
- name: "/etc/audit/rules.d/ansible.rules"
ansible.builtin.copy:
src: "ansible.rules"
dest: "/etc/audit/rules.d/ansible.rules"
notify: augenrules_load
tags: auditd

View file

@ -0,0 +1,87 @@
[Unit]
Description=Coraza WAF SPOA Daemon
Documentation=https://www.coraza.io
[Service]
ExecStart=/usr/local/bin/coraza-spoa -config=/etc/coraza/config.yaml
WorkingDirectory=/
Restart=always
Type=exec
User=coraza
Group=coraza
# Hardening
# Controls which capabilities to include in the ambient capability set for the executed process.
AmbientCapabilities=
#Takes a mount propagation setting: shared, slave or private.
MountFlags=private
# If true, kernel variables accessible through /proc/sys/, /sys/, /proc/sysrq-trigger, /proc/latency_stats, /proc/acpi, /proc/timer_stats, /proc/fs and /proc/irq will be made read-only and /proc/kallsyms as well as /proc/kcore will be inaccessible to all processes of the unit.
ProtectKernelTunables=yes
# If true, explicit module loading will be denied.
ProtectKernelModules=yes
# If true, access to the kernel log ring buffer will be denied.
ProtectKernelLogs=yes
# If true, the Linux Control Groups (cgroups(7)) hierarchies accessible through /sys/fs/cgroup/ will be made read-only to all processes of the unit.
ProtectControlGroups=yes
# when set to "noaccess" the ability to access most of other users' process metadata in /proc/ is taken away for processes of the service.
ProtectProc=noaccess
# If set, writes to the hardware clock or system clock will be denied.
ProtectClock=yes
# When set, sets up a new UTS namespace for the executed processes. In addition, changing hostname or domainname is prevented.
ProtectHostname=yes
# If set to "strict" the entire file system hierarchy is mounted read-only, except for the API file system subtrees /dev/, /proc/ and /sys/
ProtectSystem=strict
# If set, any attempts to set the set-user-ID (SUID) or set-group-ID (SGID) bits on files or directories will be denied
RestrictSUIDSGID=true
# If set, any attempts to enable realtime scheduling in a process of the unit are refused.
RestrictRealtime=true
# Controls the secure bits set for the executed process. See man capabilities.
SecureBits=no-setuid-fixup-locked noroot-locked
# frequently used repositories by other applicatons
InaccessiblePaths=-/opt
InaccessiblePaths=-/srv
# block all binary that are not usefull
InaccessiblePaths=-/bin
InaccessiblePaths=-/sbin
# locks down the personality(2) system call so that the kernel execution domain may not be changed
LockPersonality=true
# set the logs directory path
LogsDirectory=coraza
# set the configuration directory path
ConfigurationDirectory=coraza
# unsure taht the memory mapping is not editable. creation and alteration of memory segments to become writable or executable is not allowed
MemoryDenyWriteExecute=yes
# ensures that the service process and all its children can never gain new privileges through execve()
NoNewPrivileges=true
# the directories /home/, /root, and /run/user are made inaccessible and empty for processes invoked by this unit
ProtectHome=true
# sets up a new /dev/ mount for the executed processes and only adds API pseudo devices such as /dev/null, /dev/zero or /dev/random
PrivateDevices=true
# sets up a new user namespace for the executed processes and configures a user and group mapping.
PrivateUsers=true
# a new file system namespace set up for executed processes, /tmp/ and /var/tmp/ inside are not shared with processes outside of the namespace, all temporary files removed after service stopped.
PrivateTmp=true
# all System V and POSIX IPC objects owned by the user and group the processes of this unit are run as are removed when the unit is stopped
RemoveIPC=true
# Restricts the set of socket address families accessible to the processes of this unit. here ipv4 and ipv6
RestrictAddressFamilies=AF_INET AF_INET6
SystemCallArchitectures=native
SystemCallFilter=@system-service
SystemCallFilter=-@setuid -@ipc -@mount
IPAddressDeny=any
IPAddressAllow=localhost
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,7 @@
---
# file: roles/coraza/handlers/main.yml
- name: restart coraza
ansible.builtin.systemd:
name: coraza-spoa
state: restarted

View file

@ -0,0 +1,14 @@
---
# file: roles/coraza/meta/main.yml
dependencies:
- role: git_generic_deploy_files
vars:
git_generic_deploy_files_list:
- repository_url: "https://github.com/corazawaf/coraza-spoa.git"
branch: "main"
deploy_directory: "/usr/local/src/coraza-spoa"
- repository_url: "https://github.com/coreruleset/coreruleset"
branch: "main"
deploy_directory: "/usr/local/src/coreruleset"
- role: go

View file

@ -0,0 +1,59 @@
# Coraza role
This role installs the Coraza WAF SPOA connector, an HTTP filtering layer that integrates the OWASP Core Rule Set (CRS) via HAProxy's SPOE mechanism.
It is intended for production environments where applications require firewalling, and it supports tuning of security behavior through multiple paranoia levels and customizable directives.
<!-- TOC -->
* [Coraza role](#coraza-role)
* [Variable reference](#variable-reference)
* [Mandatory variables](#mandatory-variables)
* [Optional variables](#optional-variables)
* [Configuration](#configuration)
* [Usefull links](#usefull-links)
<!-- TOC -->
## Variable reference
### Optional variables
| Variable | Description | Type of variable | Default value | Other value |
|------------------------------------|--------------------------------------------------------------------|------------------|----------------------------------------------------|----------------------------------------------------|
| `coraza_spoa_transaction_ttl_ms` | Transaction lifetime in milliseconds | `integer` | `500` | `300`, `900`, `3000` |
| `coraza_directives` | Block of Coraza/ModSecurity directives to inject | `multiline` | _Default OWASP CRS directives block_ | `SecRuleEngine DetectionOnly`, custom directives |
| `coraza_sec_rule_engine` | Enables or disables Coraza traffic processing | `string` | `DetectionOnly` | `On`, `DetectionOnly`, `Off` |
| `coraza_paranoia_level` | OWASP CRS paranoia level: strictness & false positive sensitivity | `integer` | `1` | `1`, `2`, `3`, `4` |
## Configuration
By default, this role applies a moderate Coraza WAF configuration, using the lowest paranoia level and loading all available OWASP CRS rules and plugins:
```yaml
SecAction "id:1000001,phase:1,pass,t:none,nolog,setvar:tx.blocking_paranoia_level=1
Include /etc/coraza/coraza.conf
Include /etc/coraza/crs-setup.conf
Include /etc/coraza/plugins/*.conf
Include /etc/coraza/rules/*.conf
```
This default setup is safe for most production environments, with minimal risk of blocking legitimate traffic. However, if your application requires stricter protections, you can adjust the behavior using the `coraza_paranoia_level` variable, which supports **4 levels of rule strictness**:
* **1** - **Baseline** - Minimal false positives, safe for most applications. There should be no tuning needed.
* **2** - **Enhanced** - Rules that are adequate when real customer data is involved. Expect false positives, might require tuning.
* **3** - **Strict** - Online banking level security with many false positives, frequent tuning needed.
* **4** - **Aggressive** - Rules that are super aggressive. There will be a lot of false positives, lots of tuning needed (essential).
If you choose a paranoia level higher than 1, be aware that false positives are more likely, potentially blocking legitimate traffic. In such cases, it is strongly advised to tune the WAF directives for your specific application by overriding the default rules with the `coraza_directives` variable.
This allows you to include only selected rule sets or inject custom SecRule logic that satisfies your needs.
You can check [what's in the rules](https://coreruleset.org/docs/3-about-rules/rules/) in OWASP CRS documentation.
## Usefull links
* [Coraza SPOA repository](https://github.com/corazawaf/coraza-spoa)
* [Coraza SPOA documentation](https://coraza.io/connectors/coraza-spoa/)
* [Coraza documentation](https://coraza.io/docs/tutorials/introduction/)
* [Coraza/ModSecurity directives ](https://coraza.io/docs/seclang/directives/)
* [OWASP CRS repository](https://github.com/coreruleset/coreruleset)
* [OWASP CRS documentation](https://owasp.org/www-project-modsecurity-core-rule-set/)
* [Working with paranoia levels](https://coreruleset.org/20211028/working-with-paranoia-levels/)

View file

@ -0,0 +1,76 @@
---
# file: roles/coraza/tasks/main.yml
- name: "ensure coraza group exists"
ansible.builtin.group:
name: coraza
tags: coraza
- name: "ensure coraza user exists"
ansible.builtin.user:
name: coraza
group: coraza
system: true
create_home: false
tags: coraza
- name: "build coraza-spoa binary"
ansible.builtin.command: /usr/local/go/bin/go run mage.go build
args:
chdir: /usr/local/src/coraza-spoa
tags: coraza
- name: "ensure main coraza directory exist"
ansible.builtin.file:
path: /etc/coraza
state: directory
tags: coraza
- name: "ensure main coraza configuration files are present"
ansible.builtin.template:
src: "{{ item }}.j2"
dest: "/etc/coraza/{{ item }}"
notify: restart coraza
loop:
- config.yaml
- coraza.conf
tags: coraza
- name: "ensure coraza binary is installed in /usr/local/bin"
ansible.builtin.copy:
src: /usr/local/src/coraza-spoa/build/coraza-spoa
dest: /usr/local/bin/coraza-spoa
remote_src: true
mode: 755
tags: coraza
- name: "ensure crs configuration file exists"
ansible.builtin.copy:
src: /usr/local/src/coreruleset/crs-setup.conf.example
dest: /etc/coraza/crs-setup.conf
remote_src: true
notify: restart coraza
tags: coraza
- name: "ensure crs rules and plugins directories are present"
ansible.builtin.copy:
src: "/usr/local/src/coreruleset/{{ item }}"
dest: "/etc/coraza/{{ item }}"
remote_src: true
loop:
- rules
- plugins
tags: coraza
- name: "ensure coraza spoa service systemd file exists"
ansible.builtin.copy:
src: coraza-spoa.service
dest: /etc/systemd/system/coraza-spoa.service
tags: coraza
- name: "[always] coraza service started and enabled"
ansible.builtin.systemd_service:
name: coraza-spoa
state: started
enabled: true
tags: coraza

View file

@ -0,0 +1,37 @@
# {{ ansible_managed }}
# The SPOA server bind address
bind: 127.0.0.1:9000
# The log level configuration, one of: debug/info/warn/error/panic/fatal
log_level: warn
# The log file path
log_file: /var/log/coraza/coraza.log
# The log format, one of: console/json
log_format: json
applications:
- name: haproxy_waf
directives: |
SecAction "id:1000001,phase:1,pass,t:none,nolog,setvar:tx.blocking_paranoia_level={{ coraza_paranoia_level | default(1) }}"
Include /etc/coraza/coraza.conf
Include /etc/coraza/crs-setup.conf
{% if coraza_directives is defined %}
{{ coraza_directives | indent(6, true) }}
{% else %}
Include /etc/coraza/plugins/*.conf
Include /etc/coraza/rules/*.conf
{% endif %}
# HAProxy configured to send requests only, that means no cache required
response_check: false
# The transaction cache lifetime in milliseconds (60000ms = 60s)
transaction_ttl_ms: {{ coraza_spoa_transaction_ttl_ms | default(500) }}
# The log level configuration, one of: debug/info/warn/error/panic/fatal
log_level: warn
# The log file path
log_file: /var/log/coraza/coraza.log
# The log format, one of: console/json
log_format: json

View file

@ -0,0 +1,116 @@
# {{ ansible_managed }}
# -- Rule engine initialization ----------------------------------------------
# Enable Coraza, attaching it to every transaction. Use detection
# only to start with, because that minimises the chances of post-installation
# disruption.
#
SecRuleEngine {{ coraza_sec_rule_engine | default("DetectionOnly") }}
# -- Request body handling ---------------------------------------------------
# Allow Coraza to access request bodies. If you don't, Coraza
# won't be able to see any POST parameters, which opens a large security
# hole for attackers to exploit.
#
SecRequestBodyAccess On
# Enable XML request body parser.
# Initiate XML Processor in case of xml content-type
#
SecRule REQUEST_HEADERS:Content-Type "^(?:application(?:/soap\+|/)|text/)xml" \
"id:'200000',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML"
# Enable JSON request body parser.
# Initiate JSON Processor in case of JSON content-type; change accordingly
# if your application does not use 'application/json'
#
SecRule REQUEST_HEADERS:Content-Type "^application/json" \
"id:'200001',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=JSON"
# Enable JSON request body parser for more subtypes.
# Adapt this rule if you want to engage the JSON Processor for "+json" subtypes
#
SecRule REQUEST_HEADERS:Content-Type "^application/[a-z0-9.-]+[+]json" \
"id:'200006',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=JSON"
# Maximum request body size we will accept for buffering. If you support
# file uploads, this value must has to be as large as the largest file
# you are willing to accept.
SecRequestBodyLimit 13107200
# Maximum request body size that Coraza will store in memory. If the body
# size exceeds this value, it will be saved to a temporary file on disk.
SecRequestBodyInMemoryLimit 131072
# Maximum request body size we will accept for buffering, with files excluded.
# You want to keep that value as low as practical.
# Note: SecRequestBodyNoFilesLimit is currently NOT supported by Coraza
# SecRequestBodyNoFilesLimit 131072
# What to do if the request body size is above our configured limit.
# Keep in mind that this setting will automatically be set to ProcessPartial
# when SecRuleEngine is set to DetectionOnly mode in order to minimize
# disruptions when initially deploying Coraza.
# Warning: Setting this directive to ProcessPartial introduces a potential bypass
# risk, as attackers could prepend junk data equal to or greater than the inspected body size.
#
SecRequestBodyLimitAction Reject
# Verify that we've correctly processed the request body.
# As a rule of thumb, when failing to process a request body
# you should reject the request (when deployed in blocking mode)
# or log a high-severity alert (when deployed in detection-only mode).
#
SecRule REQBODY_ERROR "!@eq 0" \
"id:'200002', phase:2,t:none,log,deny,status:400,msg:'Failed to parse request body.',logdata:'%{reqbody_error_msg}',severity:2"
# By default be strict with what we accept in the multipart/form-data
# request body. If the rule below proves to be too strict for your
# environment consider changing it to detection-only.
# Do NOT remove it, as it will catch many evasion attempts.
#
SecRule MULTIPART_STRICT_ERROR "!@eq 0" \
"id:'200003',phase:2,t:none,log,deny,status:400, \
msg:'Multipart request body failed strict validation."
# -- Debug log configuration -------------------------------------------------
# Default debug log path
# Debug levels:
# 0: No logging (least verbose)
# 1: Error
# 2: Warn
# 3: Info
# 4-8: Debug
# 9: Trace (most verbose)
#
SecDebugLog /var/log/coraza/debug.log
SecDebugLogLevel 3
# -- Audit log configuration -------------------------------------------------
# Log the transactions that are marked by a rule, as well as those that
# trigger a server error (determined by a 5xx or 4xx, excluding 404,
# level response status codes).
#
SecAuditEngine RelevantOnly
SecAuditLogRelevantStatus "^(?:(5|4)(0|1)[0-9])$"
# Define which parts of the transaction are going to be recorded in the audit log
SecAuditLogParts ABIJDEFHZ
# Use a single file for logging. This is much easier to look at, but
# assumes that you will use the audit log only occasionally.
#
SecAuditLogType Serial
SecAuditLogDir /var/log/coraza/audit
SecAuditLog /var/log/coraza/audit.log
# The format used to write the audit log.
# Can be one of JSON|JsonLegacy|Native|OCSF
SecAuditLogFormat JSON

View file

@ -0,0 +1,36 @@
# Manage crontab
This role is very simple is use the same parameters of module cron (https://docs.ansible.com/ansible/latest/modules/cron_module.html).
<!-- TOC -->
* [Manage crontab](#manage-crontab)
* [Examples](#examples)
* [Silence `/etc/cron.d/` crons](#silence-etccrond-crons)
<!-- TOC -->
## Examples
Cron restart apache2 every 4 hours:
```yaml
cron_tasks:
- name: "Restart apache2 "
minute: "0"
hour: "*/4"
job: "systemctl restart apache2.service"
```
Environnement variable:
```yaml
cron_tasks:
- name: MAILTO
env: yes
value: ""
```
## Silence `/etc/cron.d/` crons
This is an edge case, crons souldn't be managed this way, but you can silence mails from crons inside `/etc/cron.d/*` files by adding `MAILTO=""` for root, e.g. with:
```yaml
crontab_silence_files: [sentry, belgique_demo]
```
N.B.: only existing files are updated.

View file

@ -0,0 +1,55 @@
---
# file: roles/crontab/tasks/main.yml
- name: "Install cron package"
apt:
name: cron
tags: crontab
- name: "Configuring cron tasks"
ansible.builtin.cron:
cron_file: "{{ item.cron_file | default(omit) }}"
day: "{{ item.day | default(omit) }}"
env: "{{ item.env | default(omit) }}"
hour: "{{ item.hour | default(omit) }}"
job: "{{ item.job | default(omit) }}"
minute: "{{ item.minute | default(omit) }}"
month: "{{ item.month | default(omit) }}"
name: "{{ item.name }}"
special_time: "{{ item.special_time | default(omit) }}"
state: "{{ item.state | default(omit) }}"
user: "{{ item.user | default(omit) }}"
value: "{{ item.value | default(omit) }}"
weekday: "{{ item.weekday | default(omit) }}"
disabled: "{{ item.disabled | default(omit) }}"
loop: "{{ cron_tasks }}"
when: cron_tasks is defined
tags: crontab
- name: "Silence selected root cron.d files via MAILTO"
block:
- name: "Check if cron files exist"
ansible.builtin.stat:
path: "/etc/cron.d/{{ item }}"
loop: "{{ crontab_silence_files }}"
register: crontab_file_stats
- name: "Keep only existing cron files"
ansible.builtin.set_fact:
crontab_silence_files_existing: >-
{{
crontab_file_stats.results
| selectattr('stat.exists', 'defined')
| selectattr('stat.exists')
| map(attribute='item')
| list
}}
- name: "Silence existing root cron.d files"
ansible.builtin.cron:
name: MAILTO
env: true
value: ""
cron_file: "{{ item }}"
user: root
loop: "{{ crontab_silence_files_existing }}"
when: crontab_silence_files is defined and (crontab_silence_files | length) > 0
tags: crontab

View file

@ -0,0 +1,8 @@
---
# file: roles/docker/defaults/main.yml
docker_compose: true
docker_user: root
docker_rootless: false
docker_compose_version: "latest"
docker_compose_update_now: false

View file

@ -0,0 +1,103 @@
# Docker role
This role will install Docker on a target machine running Debian or Ubuntu.
<!-- TOC -->
* [Docker role](#docker-role)
* [Variable reference](#variable-reference)
* [Optional variables](#optional-variables)
* [Example](#example)
* [Select the Docker version](#select-the-docker-version)
* [Select the Docker-compose version](#select-the-docker-compose-version)
* [Informations](#informations)
* [Important about the network](#important-about-the-network)
* [Update of docker-compose](#update-of-docker-compose)
<!-- TOC -->
## Variable reference
### Optional variables
| Variable | Description | Default value |
|------------------------|--------------------------------------------------------------------------------------------|---------------|
| docker_compose | install docker-compose | `true` |
| docker_user | name of the user who is going to use docker | `root` |
| docker_rootless | run the Docker daemon as a non-root user (Rootless mode) | `false` |
| docker_pinned | see section [Select the Docker version](#select-the-docker-version) bellow | None |
| docker_compose_version | see section [Select the Docker-compose version](#select-the-docker-compose-version) bellow | None |
| docker_registry_login | see bellow | None |
`docker_registry_login` is used when you need to define an url/username/password to access specific dockers registries.
The object is defined like this:
```
docker_registry_login:
- url: "docker.talas.dev"
username: "user"
password: "pass"
- url: "something"
username: "user"
password: "pass"
```
## Example
### Select the Docker version
By default, the latest version of Docker will be installed, but you can specify a version by setting this variable:
```
docker_pinned: "17.09.0~ce-0~debian"
# Or only pin the major version
docker_pinned: "27*"
```
To find out the list of available versions, use this command on the target server:
```
# apt-cache madison docker-ce
docker-ce | 17.09.0~ce-0~debian | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 17.06.2~ce-0~debian | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 17.06.1~ce-0~debian | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 17.06.0~ce-0~debian | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 17.03.2~ce-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 17.03.1~ce-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
docker-ce | 17.03.0~ce-0~debian-stretch | https://download.docker.com/linux/debian stretch/stable amd64 Packages
```
### Select the Docker-compose version
By default, this role will install the latest version of docker-compose.
You can also select a specific docker-compose version by setting this variable:
```
docker_compose_version: "1.17.1"
```
You can find the list of docker-compose release here: https://github.com/docker/compose/releases/
## Informations
### Important about the network
This role let docker create the docker0 bridge interface. This means that if docker sees a route for all the rfc1918 networks (10.0.0.0/8, 172.16.0.0/12 and 192.168.0.0/16), it will fail.
This basically is the case for the machines in the DMZ: all those routes are defined so that the default gateway can be the BGP router, typically you have such configuration for their DMZ interface:
```
# DMZ6
auto eth136
iface eth136 inet static
address 10.12.36.96
netmask 24
dns-nameservers 10.12.1.207 10.12.1.2
dns-search talas.com
# static route
up route add -net 10.0.0.0 netmask 255.0.0.0 gw 10.12.36.254 dev eth136
up route add -net 172.16.0.0 netmask 255.240.0.0 gw 10.12.36.254 dev eth136
up route add -net 192.168.0.0 netmask 255.255.0.0 gw 10.12.36.254 dev eth136
```
To allow doker to create the docker0 interface, you basically have to remove the last line. Currently we don't use any 192.168.0.0/16 network so it won't be an issue.
### Update of docker-compose
To perform an update, add this parameter: `--extra-vars "docker_compose_update_now=true"` , *true* is case-sensitive since it's evaluated as a string in this case.
This role will also update if this parameter is present: `--extra-vars "global_update_now=true"` , *true* is also case-sensitive since it's evaluated as a string in this case.
The update will be skipped if you already have the latest version of the binary.

View file

@ -0,0 +1,84 @@
---
# file: roles/docker/tasks/docker-rootless.yml
- name: "install dependencies"
apt:
name:
- uidmap
- docker-ce-rootless-extras
- slirp4netns
- name: "get uidnumber of user {{ docker_user }}"
ansible.builtin.command:
cmd: "id -u {{ docker_user }}"
changed_when: false
check_mode: false
register: rootless_uid
- name: "check if /run/docker.sock exists"
stat:
path: "/run/user/{{ rootless_uid.stdout }}/docker.sock"
register: rootless_conf
- name: "stop any running root instances of docker daemon"
systemd:
name: "{{ item }}"
state: stopped
enabled: false
loop:
- docker.service
- docker.socket
- name: "remove docker.sock file"
file:
path: /var/run/docker.sock
state: absent
- name: "set 65536 subordinate UIDs/GUIDs for the user"
lineinfile:
path: "/etc/{{ item }}"
insertafter: EOF
line: "{{ docker_user }}:100000:65536"
loop:
- subuid
- subgid
- name: "install rootless docker (ssh root@server 'machinectl -q shell {{ docker_user }}@ dockerd-rootless-setuptool.sh install)"
remote_user: root
become: true
become_method: community.general.machinectl
become_user: "{{ docker_user }}"
vars:
ansible_ssh_pipelining: false # https://github.com/ansible/ansible/issues/81254
ansible.builtin.command: /usr/bin/dockerd-rootless-setuptool.sh install
when: not rootless_conf.stat.exists
- name: "enable and start rootless docker"
remote_user: root
become: true
become_method: community.general.machinectl
become_user: "{{ docker_user }}"
vars:
ansible_ssh_pipelining: false # https://github.com/ansible/ansible/issues/81254
systemd:
name: docker.service
state: started
enabled: true
scope: user
ignore_errors: "{{ ansible_check_mode }}"
- name: "decouple rootless docker from user session"
remote_user: root
become: true
become_method: community.general.machinectl
become_user: "{{ docker_user }}"
vars:
ansible_ssh_pipelining: false # https://github.com/ansible/ansible/issues/81254
ansible.builtin.command: "loginctl enable-linger {{ docker_user }}"
when: not rootless_conf.stat.exists
- name: "DOCKER_HOST=unix:///run/user/{{ rootless_uid.stdout }}/docker.sock in /etc/environment"
lineinfile:
path: /etc/environment
insertafter: EOF
line: "DOCKER_HOST=unix:///run/user/{{ rootless_uid.stdout }}/docker.sock"

View file

@ -0,0 +1,215 @@
---
# file: roles/docker/tasks/main.yml
- name: "packages prerequisites"
apt:
name:
- ca-certificates
- curl
- software-properties-common
tags: docker
- name: "apt package for pip"
apt:
name:
- python3-pkg-resources
- python3-setuptools
tags: docker
- name: "[ubuntu and Debian 11-] module installation with pip needed for ansible control"
pip:
name:
- docker
- docker-compose
when: ansible_distribution == "Ubuntu" or ( ansible_distribution == "Debian" and ansible_distribution_major_version is version('12', '<'))
tags: docker
- name: "[Debian 12+] apt install python3-docker for ansible control"
apt:
name:
- python3-docker
when:
- ansible_distribution == "Debian"
- ansible_distribution_major_version is version('12', '>=')
tags: docker
- name: "apt install docker-compose v1 from debian package"
apt:
name:
- docker-compose
tags: docker
- name: "remove legacy key from apt-key"
apt_key:
id: "9DC858229FC7DD38854AE2D88D81803C0EBFCD88"
state: absent
when: ansible_distribution_major_version is version('13', '<') or ansible_distribution != "Debian"
tags: docker
- name: "download modern signature key"
get_url:
url: "https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg"
dest: "/dev/shm/docker.acs"
changed_when: false
tags: docker
- name: "check if {{ get_env_var.stdout }}/docker.sock exists"
file:
path: "/etc/apt/keyrings"
state: directory
- name: "install modern signature key"
shell:
cmd: "cat /dev/shm/docker.acs | gpg --dearmor -o /etc/apt/keyrings/docker.gpg"
creates: "/etc/apt/keyrings/docker.gpg"
tags: docker
- name: "repository file"
copy:
content: "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable\n"
dest: "/etc/apt/sources.list.d/docker.list"
register: repo
tags: docker
- name: "apt pin docker-ce* version"
ansible.builtin.copy:
content: |
Package: docker-ce*
Pin: version 5:{{ docker_pinned }}
# Note: priority of 1001 (greater than 1000) allows for downgrading.
# To make package downgrading impossible, use a value of 999
Pin-Priority: 1001
dest: "/etc/apt/preferences.d/docker"
when: docker_pinned is defined
tags: docker
- name: "apt make sure that docker-ce version is not pinned"
ansible.builtin.file:
path: "/etc/apt/preferences.d/docker"
state: absent
when: docker_pinned is undefined
tags: docker
- name: "refresh apt if repo was modified"
apt:
update_cache: true
when: repo.changed
tags: docker
- name: "apt install docker-ce (not pinned)"
apt:
name: "docker-ce"
when: docker_pinned is undefined
tags: docker
- name: "apt install docker-ce (pinned)"
apt:
name: "docker-ce"
state: latest
install_recommends: true
when: docker_pinned is defined
tags: docker
- name: "docker compose v2 package"
apt:
name: "docker-compose-plugin"
tags: docker
- name: "stat /usr/local/bin/docker-compose"
stat:
path: /usr/local/bin/docker-compose
register: docker_compose_binary
when:
- docker_compose
- docker_compose_version == "latest"
tags: docker
- name: "docker-compose: get the latest download link on github"
uri:
url: https://api.github.com/repos/docker/compose/releases/latest
return_content: true
check_mode: false
register: URL
delegate_to: localhost
become: false
run_once: true
when:
- docker_compose
- docker_compose_version == "latest"
- docker_compose_binary.stat.exists and ( docker_compose_update_now == "true" or global_update_now == "true" ) or not docker_compose_binary.stat.exists
tags: docker
# curl -s https://api.github.com/repos/docker/compose/releases/latest | jq -r '.assets[] | select(.name == "docker-compose-linux-x86_64") | .browser_download_url'
- name: "latest docker compose installation"
get_url:
url: "{{ URL.json | json_query(params) | first }}"
dest: "/usr/local/bin/docker-compose"
force: True
mode: 0755
vars:
params: "assets[?name=='docker-compose-linux-x86_64'].browser_download_url"
when:
- docker_compose
- docker_compose_version == "latest"
- ( docker_compose_update_now == "true" or global_update_now == "true" ) or not docker_compose_binary.stat.exists
tags: docker
- name: "docker compose version {{ docker_compose_version }} installation"
get_url:
url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-x86_64"
dest: "/usr/local/bin/docker-compose"
force: true
mode: 0755
when:
- docker_compose
- docker_compose_version != "latest"
tags: docker
- name: "install dependencies when docker_user is not root"
apt:
name:
- systemd-container
when: docker_user != "root"
tags: docker
- name: "make sure that {{ docker_user }} is a member of docker group"
ansible.builtin.user:
name: "{{ docker_user }}"
groups:
- docker
append: true
when: docker_user != "root"
tags: docker
- name: "setting up docker daemon as non-root"
import_tasks: docker-rootless.yml
when: docker_rootless
tags: docker
- name: "docker login user root to remote registry"
community.docker.docker_login:
registry_url: "{{ item.url }}"
username: "{{ item.username }}"
password: "{{ item.password }}"
loop: "{{ docker_registry_login }}"
when:
- docker_registry_login is defined
- docker_user == "root"
tags: docker
- name: "docker login user {{ docker_user }} to remote registry"
remote_user: root
become: true
become_method: community.general.machinectl
become_user: "{{ docker_user }}"
vars:
ansible_ssh_pipelining: false # https://github.com/ansible/ansible/issues/81254
community.docker.docker_login:
registry_url: "{{ item.url }}"
username: "{{ item.username }}"
password: "{{ item.password }}"
loop: "{{ docker_registry_login }}"
when:
- docker_registry_login is defined
- docker_user != "root"
tags: docker

View file

@ -0,0 +1,26 @@
---
# file: roles/filebeat/defaults/main.yml
filebeat_modules_detection:
- module: apache
path: /etc/apache2/apache2.conf
- module: auditd
path: /etc/audit/auditd.conf
- module: elasticsearch
path: /etc/elasticsearch/elasticsearch.yml
- module: haproxy
path: /etc/haproxy/haproxy.cfg
- module: kibana
path: /etc/kibana/kibana.yml
- module: logstash
path: /etc/logstash/logstash.yml
filebeat_logging_level: "warning"
filebeat_separate_system_logs: true
filebeat_enable_test_config: false
filebeat_ingest_firewall: false
filebeat_ingest_keycloak: false
filebeat_update: false

View file

@ -0,0 +1,22 @@
# Module: apache
# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-apache.html
# Ansible managed
- module: apache
# Access logs
access:
enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
var.paths:
- '/var/log/apache2/*access.log'
# Error logs
error:
enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
var.paths:
- '/var/log/apache2/*error.log'

View file

@ -0,0 +1,16 @@
# Module: haproxy
# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.6/filebeat-module-haproxy.html
- module: haproxy
# All logs
log:
enabled: true
var.paths: ["/var/log/haproxy.log*"]
var.input: "file"
# Set which input to use between syslog (default) or file.
#var.input:
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
#var.paths:

View file

@ -0,0 +1,13 @@
# Module: kibana
# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-kibana.html
# Ansible managed
- module: kibana
# All logs
log:
enabled: true
# Set custom paths for the log files. If left empty,
# Filebeat will choose the paths depending on your OS.
var.paths:
- '/var/log/kibana/kibana.log'

View file

@ -0,0 +1,12 @@
---
# file: roles/filebeat/handlers/main.yml
- name: systemctl daemon_reload
ansible.builtin.systemd:
daemon_reload: yes
changed_when: true
- name: restart filebeat
ansible.builtin.systemd:
name: filebeat
state: restarted

View file

@ -0,0 +1,62 @@
# Mandatory variables
Define the elastic repository version that will determine the version of filebeat:
```
elastic_major_version: "7.x"
```
Define the list of logstash endpoint where to send the logs:
Define the output for the logs, it can be either elasticsearch or logstash:
```
filebeat_output_elasticsearch_hosts:
- host1
- host2
```
or
```
filebeat_output_logstash_hosts:
- host1
- host2
```
If you use elasticsearch, the connection will use https and use the login/password of the server to authenticate itself.
You can change the protocol to http with:
```
filebeat_output_elasticsearch_protocol: "http"
```
# Optional variables
You can disable this filebeat role by setting this variable :
```yaml
filebeat_install: false
```
By default, filebeat will send the system logs to the index `logs-infra-system` and the other logs to `logs-{{ talas_project }}-{{ talas_group }}`.
Sometime, you want to send _all_ logs (even for the system), to the `logs-{{ talas_project }}-{{ talas_group }}` index.
If this is the case, you need to set this variable to false:
```
filebeat_separate_system_logs: false
```
You can define the loglevel of filebeat, the default is `warning`, possible values are error, warning, info, debug:
```
filebeat_logging_level: "warning"
```
# Modules
The `system` module is always enabled.
This role will automatically detect the installation of the following software and enable the correct modules:
- apache httpd
- elasticsearch
- haproxy
- kibana
- logstash
You can add more module by creating this list:
```
filebeat_modules_list:
- system
```
You can see the list of modules with `filebeat modules list`.
# Update
You can perform an update of filebeat by adding: `--extra-vars '{ "filebeat_update" : true }'`

View file

@ -0,0 +1,23 @@
---
# file: roles/filebeat/tasks/key.yml
- name: "make sure /etc/apt/keyrings exists"
ansible.builtin.file:
path: "/etc/apt/keyrings"
state: directory
- name: "modern signature key"
ansible.builtin.get_url:
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
dest: "/etc/apt/keyrings/elastic.asc"
- name: "repository file"
ansible.builtin.copy:
content: "deb [arch=amd64 signed-by=/etc/apt/keyrings/elastic.asc] https://artifacts.elastic.co/packages/{{ elastic_major_version }}/apt stable main\n"
dest: "/etc/apt/sources.list.d/elastic.list"
register: repo
- name: "apt update"
ansible.builtin.apt:
update_cache: true
when: repo.changed

View file

@ -0,0 +1,79 @@
---
# file: roles/filebeat/tasks/main.yml
- name: "fix IT-12220"
ansible.builtin.file:
state: absent
path: "{{ item }}"
loop:
- "/etc/apt/sources.list.d/artifacts_elastic_co_packages_7_x_apt.list"
- "/etc/apt/sources.list.d/artifacts_elastic_co_packages_6_x_apt.list"
- "/etc/apt/sources.list.d/filebeat.list"
- "/etc/apt/keyrings/filebeat.asc"
tags: filebeat
- name: "handle apt repository"
ansible.builtin.import_tasks: apt_repo.yml
tags: filebeat
- name: "Ensure any version of filebeat is installed"
ansible.builtin.apt:
name: filebeat
update_cache: true
when: not filebeat_update
tags: filebeat
- name: "Ensure the LATEST version of filebeat is installed"
ansible.builtin.apt:
name: filebeat
update_cache: true
state: latest
when: filebeat_update
tags: filebeat
- name: "check haproxy presence"
ansible.builtin.stat:
path: /etc/haproxy/haproxy.cfg
register: haproxy_cfg
- name: "expose boolean of haproxy presence"
ansible.builtin.set_fact:
haproxy_present: "{{ haproxy_cfg.stat.exists }}"
- name: "check coraza presence"
ansible.builtin.stat:
path: /etc/coraza/coraza.conf
register: coraza_conf
- name: "expose boolean of coraza presence"
ansible.builtin.set_fact:
coraza_present: "{{ coraza_conf.stat.exists }}"
- name: "/etc/filebeat/filebeat.yml"
ansible.builtin.template:
src: filebeat.yml.j2
dest: /etc/filebeat/filebeat.yml
backup: yes
notify: restart filebeat
tags: filebeat
- name: "import_tasks: modules.yml"
ansible.builtin.import_tasks: modules.yml
tags:
- filebeat
- filebeat_modules
- name: "/etc/systemd/system/filebeat.service"
ansible.builtin.template:
src: filebeat.service
dest: /etc/systemd/system/filebeat.service
notify:
- systemctl daemon_reload
- restart filebeat
tags: filebeat
- name: "make sure the filebeat service is enabled"
ansible.builtin.systemd_service:
name: filebeat
enabled: yes
tags: filebeat

View file

@ -0,0 +1,82 @@
---
# file: roles/filebeat/tasks/modules.yml
- name: "make sure the system module is enabled"
ansible.builtin.command:
cmd: "filebeat modules enable system"
creates: "/etc/filebeat/modules.d/system.yml"
notify: restart filebeat
tags:
- filebeat
- filebeat_modules
- name: "make sure the additional modules are enabled, if defined"
ansible.builtin.command:
cmd: "filebeat modules enable {{ item }}"
creates: "/etc/filebeat/modules.d/{{ item }}.yml"
loop: "{{ filebeat_modules_list }}"
when: filebeat_modules_list is defined
notify: restart filebeat
tags:
- filebeat
- filebeat_modules
- name: "change module name for elastic_major_version == '6.x'"
ansible.builtin.set_fact:
filebeat_modules_detection:
- module: apache2
path: /etc/apache2/apache2.conf
- module: elasticsearch
path: /etc/elasticsearch/elasticsearch.yml
- module: haproxy
path: /etc/haproxy/haproxy.cfg
- module: kibana
path: /etc/kibana/kibana.yml
- module: logstash
path: /etc/logstash/logstash.yml
when: elastic_major_version == "6.x"
tags:
- filebeat
- filebeat_modules
- name: "detect installed software to enable correct modules"
ansible.builtin.stat:
path: "{{ item['path'] }}"
loop: "{{ filebeat_modules_detection }}"
register: detection
tags:
- filebeat
- filebeat_modules
- name: "enable module for installed softwares"
ansible.builtin.command:
cmd: "filebeat modules enable {{ item.item.module }}"
creates: "/etc/filebeat/modules.d/{{ item.item.module }}.yml"
loop: "{{ detection.results }}"
when: item.stat.exists
notify: restart filebeat
tags:
- filebeat
- filebeat_modules
- name: "disable module for haproxy (this is temporary step to get rid of haproxy module, BB-673 related)"
ansible.builtin.command:
cmd: "filebeat modules disable haproxy"
creates: "/etc/filebeat/modules.d/haproxy.yml.disabled"
notify: restart filebeat
tags:
- filebeat
- filebeat_modules
- name: "module configuration: /etc/filebeat/modules.d/[module].yml"
ansible.builtin.copy:
src: "{{ item.item.module }}.yml"
dest: "/etc/filebeat/modules.d/{{ item.item.module }}.yml"
loop: "{{ detection.results }}"
when:
- item.stat.exists
- item.item.module == "apache" or item.item.module == "kibana" or item.item.module == "haproxy"
notify: restart filebeat
tags:
- filebeat
- filebeat_modules

View file

@ -0,0 +1,15 @@
[Unit]
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
Documentation=https://www.elastic.co/products/beats/filebeat
Wants=network-online.target
After=network-online.target
[Service]
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat.yml"
Environment="BEAT_PATH_OPTS=-path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat"
ExecStart=/usr/share/filebeat/bin/filebeat $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,177 @@
# {{ ansible_managed }}
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
enabled: false
paths:
- /var/log/*.log
{% if groups["veza_app_gen_2"] is defined and ansible_hostname in groups["veza_app_gen_2"] and filebeat_enable_test_config %}
- type: filestream
id: talas-tomcat-accesslog
enabled: true
tags:
- talas_tomcat_accesslog
paths:
- /applications/tomcat/logs/*.txt
- type: filestream
id: talas-veza-app
enabled: true
tags:
- talas_veza_app
paths:
- /applications/logs/cos-veza/*/LOG/log.log
multiline.pattern: '^\[[0-9]{4}-[0-9]{2}-[0-9]{2}'
multiline.negate: true
multiline.match: after
{% endif %}
{% if filebeat_ingest_firewall %}
- type: filestream
id: talas-firewall
enabled: true
tags:
- talas_firewall
paths:
- /var/log/ulog/*.log
processors:
- add_locale: ~
{% endif %}
{% if filebeat_ingest_keycloak %}
- type: filestream
id: talas-keycloak
enabled: true
tags:
- talas_keycloak
paths:
- /var/log/keycloak/keycloak.log
{% endif %}
{% if haproxy_present %}
- type: filestream
id: talas-haproxy-http
enabled: true
tags:
- talas_haproxy_http
paths:
- /var/log/haproxy/http.log
- type: filestream
id: talas-haproxy-tcp
enabled: true
tags:
- talas_haproxy_tcp
paths:
- /var/log/haproxy/tcp.log
{% if haproxy_present %}
- type: filestream
id: talas-haproxy-spoe
enabled: true
tags:
- talas_haproxy_spoe
paths:
- /var/log/haproxy/spoe.log
{% endif %}
{% endif %}
#============================= Filebeat modules ===============================
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 1
#================================ General =====================================
name: {{ ansible_hostname }}
{% if filebeat_setup_kibana_host is defined %}
#============================== Kibana =====================================
setup.kibana:
host: "{{ filebeat_setup_kibana_host }}"
{% endif %}
#================================ Outputs =====================================
output.elasticsearch:
hosts: [ "{{ filebeat_output_elasticsearch_hosts | join('", "') }}" ]
protocol: "{{ filebeat_output_elasticsearch_protocol | default('https') }}"
username: "{{ ansible_hostname }}"
password: "{{ ldappass }}"
pipelines:
- pipeline: "talas_tomcat_accesslog"
when:
contains:
tags: "talas_tomcat_accesslog"
- pipeline: "talas_veza_app"
when:
contains:
tags: "talas_veza_app"
- pipeline: "talas_samba_veza"
when:
contains:
tags: "talas_samba_veza"
- pipeline: "talas_samba_auditlog"
when:
contains:
tags: "talas_samba_auditlog"
- pipeline: "talas_firewall"
when:
contains:
tags: "talas_firewall"
- pipeline: "talas_keycloak_json"
when:
contains:
tags: "talas_keycloak"
{% if haproxy_present %}
- pipeline: "talas_haproxy_http"
when:
contains:
tags: "talas_haproxy_http"
- pipeline: "talas_haproxy_tcp"
when:
contains:
tags: "talas_haproxy_tcp"
{% if haproxy_present %}
- pipeline: "talas_haproxy_spoe"
when:
contains:
tags: "talas_haproxy_spoe"
{% endif %}
{% endif %}
indices:
{% if filebeat_separate_system_logs %}
- index: "logs-{{ talas_project | default('infra') }}-{{ talas_group | default('default') }}"
when:
or:
- equals:
event.module: "apache"
- equals:
event.module: "haproxy"
- contains:
tags: "talas_tomcat_accesslog"
- contains:
tags: "talas_samba_veza"
- contains:
tags: "talas_samba_auditlog"
- contains:
tags: "talas_firewall"
- contains:
tags: "talas_keycloak"
{% if haproxy_present %}
- contains:
tags: "talas_haproxy_http"
- contains:
tags: "talas_haproxy_tcp"
{% if haproxy_present %}
- contains:
tags: "talas_haproxy_spoe"
{% endif %}
{% endif %}
- index: "logs-infra-system"
{% else %}
- index: "logs-{{ talas_project | default('infra') }}-{{ talas_group | default('default') }}"
{% endif %}
#================================ Processors =====================================
processors:
- add_host_metadata: ~
#================================ Logging =====================================
logging.level: {{ filebeat_logging_level }}
logging.to_files: true
logging.files:
path: /var/log/filebeat
name: filebeat
keepfiles: 7
permissions: 0600

View file

@ -0,0 +1,47 @@
# Deploy files from a git repository
This role will localy clone any number of git repositories with any number of branches, then create an archive for each repositories/branches combinations.
Those archives will then be extracted to the remote server in the correct directory.
To make this work, you need to define an object with all the necessary variables, like this:
```
git_generic_deploy_files_list:
- repository_url: 'https://scmlab.talas.com/VirtualTryOn/talasTryOn_API_Booth.git'
branch: 'opticworld'
deploy_directory: '/var/www/opticworld.vto.talas.io/'
- repository_url: 'https://scmlab.talas.com/VirtualTryOn/talasTryOn_API_Booth.git'
branch: 'master'
deploy_directory: '/dev/shm/test/'
```
# Mandatory variable
| Variable | Description | Example |
|-----------------------------------|-------------|---------|
| repository_url | repo url | `https://scmlab.talas.com/VirtualTryOn/talasTryOn_API_Booth.git`
| branch | branch to clone | `master`
| deploy_directory | path to unpack archive | `/var/www/freescout`
# Optional variable
| Variable for git_generic_deploy_files_list | Description | Default value |
|-----------------------------------|-------------|---------------|
| deploy_directory_owner | Owner of deploy_directory path | none, same as user executing |
| deploy_directory_group | group owning of deploy_directory path | none, same as user executing |
| deploy_directory_mode | perms of deploy_directory path | |
| owner | owner of files inside the repo, same for all files | same as user executing |
| group | group owning files inside the repo, same for all files | same as user executing |
| mode | perms for files inside the repo | perserve those from repo |
The git_generic_deploy_copy variable is very simple and use the same parameters of module copy (https://docs.ansible.com/ansible/latest/modules/copy_module.html).
Example:
```
git_generic_deploy_copy:
- dest: "/var/www/prerequisites.talas.net/veza_prerequisites/configuration.php"
content: |
<?php
define('ACJS_URL', 'https://c1.talas.biz/43ACJS17007');
define('ACJS_USER', '{{ local_ACJS_USER }}');
define('ACJS_PASSWORD', '{{ local_ACJS_PASSWORD }}');
define('ACJS_SITE', 'm');
```

View file

@ -0,0 +1,69 @@
---
# file: roles/git_generic_deploy_files/tasks/main.yml
- name: "packages"
apt:
name: "unzip"
tags: git_generic_deploy_files
- name: "set fact for ansible_running_user"
set_fact:
ansible_running_user: "{{ lookup('env', 'USER') }}"
delegate_to: localhost
check_mode: no
tags: git_generic_deploy_files
- name: "[localhost] create the temporary directory /tmp/{{ ansible_running_user }}/ansible/git_generic_deploy_files/ansible_git_generic_deploy_files_archives/"
become: no
file:
path: /tmp/{{ ansible_running_user }}/ansible/git_generic_deploy_files/ansible_git_generic_deploy_files_archives/
state: directory
delegate_to: localhost
check_mode: no
tags: git_generic_deploy_files
- name: "[localhost] git clone and create archives"
become: no
git:
dest: "/tmp/{{ ansible_running_user }}/ansible/git_generic_deploy_files/ansible_git_generic_deploy_files_git/{{ item.repository_url | md5 }}_{{ item.branch | replace('/', '_') }}/"
repo: "{{ item.repository_url }}"
version: "{{ item.branch }}"
archive: "/tmp/{{ ansible_running_user }}/ansible/git_generic_deploy_files/ansible_git_generic_deploy_files_archives/{{ item.repository_url | md5 }}_{{ item.branch | replace('/', '_') }}.zip"
force: "{{ item.force | default(False) }}"
with_items: "{{ git_generic_deploy_files_list }}"
delegate_to: localhost
check_mode: no
tags: git_generic_deploy_files
- name: "www directories"
file:
path: "{{ item.deploy_directory }}"
owner: "{{ item.deploy_directory_owner | default(omit) }}"
group: "{{ item.deploy_directory_group | default(omit) }}"
mode: "{{ item.deploy_directory_mode | default(omit) }}"
state: directory
with_items: "{{ git_generic_deploy_files_list }}"
tags: git_generic_deploy_files
- name: "unarchive to destination"
unarchive:
src: "/tmp/{{ ansible_running_user }}/ansible/git_generic_deploy_files/ansible_git_generic_deploy_files_archives/{{ item.repository_url | md5 }}_{{ item.branch | replace('/', '_') }}.zip"
dest: "{{ item.deploy_directory }}"
owner: "{{ item.owner | default(omit) }}"
group: "{{ item.group | default(omit) }}"
mode: "{{ item.mode | default(omit) }}"
with_items: "{{ git_generic_deploy_files_list }}"
tags: git_generic_deploy_files
- name: "copy file(s)"
copy:
backup: "{{ item.backup | default(omit) }}"
content: "{{ item.content | default(omit) }}"
dest: "{{ item.dest | default(omit) }}"
owner: "{{ item.owner | default(omit) }}"
group: "{{ item.group | default(omit) }}"
mode: "{{ item.mode | default(omit) }}"
src: "{{ item.src | default(omit) }}"
with_items: '{{ git_generic_deploy_copy }}'
when: git_generic_deploy_copy is defined
tags: git_generic_deploy_files

View file

@ -0,0 +1,5 @@
# Go role
This is a basic role to install Go. By default this role will install the latest release of Go you can change the version by setting `go_version`.
To use the Go binary you need to specify the whole path : `/usr/local/go/bin/go`

View file

@ -0,0 +1,38 @@
---
# file: roles/go/tasks/main.yml
- name: "get go json release page"
ansible.builtin.uri:
url: https://go.dev/dl/?mode=json
return_content: true
register: go_json_response
when: go_version is not defined
tags: go
- name: "parse latest stable go version"
ansible.builtin.set_fact:
go_latest_version: "{{ go_json_response.json | community.general.json_query(query) | first | regex_replace('^go', '') }}"
vars:
query: "[?stable].version"
when: go_version is not defined
tags: go
- name: "download go {{ go_version | default(go_latest_version) }} archive"
ansible.builtin.get_url:
url: https://go.dev/dl/go{{ go_version | default(go_latest_version) }}.linux-amd64.tar.gz
dest: /dev/shm/go{{ go_version | default(go_latest_version) }}.linux-amd64.tar.gz
mode: '0644'
tags: go
- name: "remove existing go installation"
ansible.builtin.file:
path: /usr/local/go
state: absent
tags: go
- name: "extract go {{ go_version | default(go_latest_version) }} to /usr/local"
ansible.builtin.unarchive:
src: /dev/shm/go{{ go_version | default(go_latest_version) }}.linux-amd64.tar.gz
dest: /usr/local
remote_src: true
tags: go

View file

@ -0,0 +1,98 @@
---
# file: roles/haproxy/defaults/main.yml
haproxy_maxconn: "20000"
haproxy_default_frontend: True
haproxy_letsencrypt: false
# generated via https://ssl-config.mozilla.org/
haproxy_tls_modern:
ciphersuites:
- TLS_AES_128_GCM_SHA256
- TLS_AES_256_GCM_SHA384
- TLS_CHACHA20_POLY1305_SHA256
options:
- "ssl-min-ver TLSv1.3"
- "no-tls-tickets"
haproxy_tls_intermediate:
ciphers:
- ECDHE-ECDSA-AES128-GCM-SHA256
- ECDHE-RSA-AES128-GCM-SHA256
- ECDHE-ECDSA-AES256-GCM-SHA384
- ECDHE-RSA-AES256-GCM-SHA384
- ECDHE-ECDSA-CHACHA20-POLY1305
- ECDHE-RSA-CHACHA20-POLY1305
- DHE-RSA-AES128-GCM-SHA256
- DHE-RSA-AES256-GCM-SHA384
- DHE-RSA-CHACHA20-POLY1305
ciphersuites:
- TLS_AES_128_GCM_SHA256
- TLS_AES_256_GCM_SHA384
- TLS_CHACHA20_POLY1305_SHA256
options:
- "ssl-min-ver TLSv1.2"
- "no-tls-tickets"
haproxy_tls_old:
ciphers:
- ECDHE-ECDSA-AES128-GCM-SHA256
- ECDHE-RSA-AES128-GCM-SHA256
- ECDHE-ECDSA-AES256-GCM-SHA384
- ECDHE-RSA-AES256-GCM-SHA384
- ECDHE-ECDSA-CHACHA20-POLY1305
- ECDHE-RSA-CHACHA20-POLY1305
- DHE-RSA-AES128-GCM-SHA256
- DHE-RSA-AES256-GCM-SHA384
- DHE-RSA-CHACHA20-POLY1305
- ECDHE-ECDSA-AES128-SHA256
- ECDHE-RSA-AES128-SHA256
- ECDHE-ECDSA-AES128-SHA
- ECDHE-RSA-AES128-SHA
- ECDHE-ECDSA-AES256-SHA384
- ECDHE-RSA-AES256-SHA384
- ECDHE-ECDSA-AES256-SHA
- ECDHE-RSA-AES256-SHA
- DHE-RSA-AES128-SHA256
- DHE-RSA-AES256-SHA256
- AES128-GCM-SHA256
- AES256-GCM-SHA384
- AES128-SHA256
- AES256-SHA256
- AES128-SHA
- AES256-SHA
- DES-CBC3-SHA
ciphersuites:
- TLS_AES_128_GCM_SHA256
- TLS_AES_256_GCM_SHA384
- TLS_CHACHA20_POLY1305_SHA256
options:
- "ssl-min-ver TLSv1.0"
- "no-tls-tickets"
haproxy_compression_type:
- text/html
- text/plain
- text/xml
- text/css
- text/csv
- text/rtf
- text/richtext
- text/javascript
- application/x-javascript
- application/javascript
- application/ecmascript
- application/rss+xml
- application/xml
- application/json
- application/wasm
haproxy_check_interval: "2s"
haproxy_iis: false
haproxy_robotstxt: false

View file

@ -0,0 +1,9 @@
HTTP/1.0 200 OK
Cache-Control: no-cache
Connection: close
Content-Type: text/html
<html><body><h1>200 OK</h1>
OK
</body></html>

View file

@ -0,0 +1,9 @@
HTTP/1.0 404 Not Found
Cache-Control: no-cache
Connection: close
Content-Type: text/html
<html><body><h1>404 Not Found</h1>
The requested URL was not found on this server.
</body></html>

View file

@ -0,0 +1,32 @@
# https://github.com/haproxy/haproxy/blob/master/doc/SPOE.txt
# /usr/local/etc/haproxy/coraza.cfg
[coraza]
spoe-agent coraza-agent
# Process HTTP requests only (the responses are not evaluated)
messages coraza-req
# Comment the previous line and add coraza-res, to process responses also.
#messages coraza-req coraza-res
groups coraza-req coraza-res
option var-prefix coraza
option set-on-error error
timeout hello 2s
timeout idle 2m
timeout processing 500ms
use-backend coraza-spoa
log global
spoe-message coraza-req
# Arguments are required to be in this order
args app=var(txn.coraza.app) src-ip=src src-port=src_port dst-ip=dst dst-port=dst_port method=method path=path query=query version=req.ver headers=req.hdrs body=req.body
spoe-message coraza-res
# Arguments are required to be in this order
args app=var(txn.coraza.app) id=var(txn.coraza.id) version=res.ver status=status headers=res.hdrs body=res.body
event on-http-response
spoe-group coraza-req
messages coraza-req
spoe-group coraza-res
messages coraza-res

View file

@ -0,0 +1,14 @@
#!/bin/bash
# {{ ansible_managed }}
if [[ "$1" == "deploy_challenge" ]]; then
/bin/systemctl start http-letsencrypt.service
elif [[ "$1" == "clean_challenge" ]]; then
/bin/systemctl stop http-letsencrypt.service
elif [[ "$1" == "deploy_cert" ]]; then
domain=$2
key=$3
fullchain=$5
cat $fullchain $key > /usr/local/etc/tls/haproxy/${domain}.pem
echo "reloading haproxy"
/bin/systemctl reload haproxy.service
fi

View file

@ -0,0 +1,11 @@
# zabbix monitoring for haproxy
# every userparameters here suppose that you have a stat file at "/run/haproxy/monitoring.sock"
# General info that don't need discovery, uses a cache file that is automatically refreshed if it is older than 1 minute
UserParameter=haproxy.info[*],/etc/zabbix/scripts/haproxy_info.sh $1
# discovery for FRONTEND, BACKEND and SERVERS, no cache
UserParameter=haproxy.discovery[*],/etc/zabbix/scripts/haproxy_discovery.sh $1
# return a specific stat for a specific pxname and svname, uses a cache file that is automatically refreshed if it is older than 1 minute
UserParameter=haproxy.stats[*],/etc/zabbix/scripts/haproxy_stat.py --pxname $1 --svname $2 --stat $3

View file

@ -0,0 +1,36 @@
#!/bin/bash
# Ansible managed
# modified from https://raw.githubusercontent.com/anapsix/zabbix-haproxy/master/haproxy_discovery.sh
# Get list of Frontends and Backends from HAPROXY
# Example: ./haproxy_discovery.sh FRONTEND|BACKEND|SERVERS
# the argument should be either FRONTEND, BACKEND or SERVERS, will default to FRONTEND if not set
HAPROXY_SOCK="/run/haproxy/monitoring.sock"
query_stats() {
echo "show stat" | socat ${HAPROXY_SOCK} stdio 2>/dev/null
}
get_stats() {
echo "$(query_stats)" | grep -v "^#"
}
case $1 in
B*) END="BACKEND" ;;
F*) END="FRONTEND" ;;
S*)
for backend in $(get_stats | grep BACKEND | cut -d, -f1 | uniq); do
for server in $(get_stats | grep "^${backend}," | grep -v BACKEND | cut -d, -f2); do
serverlist="$serverlist, "'{ "{#BACKEND_NAME}": "'$backend'","{#SERVER_NAME}": "'$server'" }'
done
done
echo -e '{ "data": [ '${serverlist#,}'] }'
exit 0
;;
*) END="FRONTEND" ;;
esac
for frontend in $(get_stats | grep "$END" | cut -d, -f1 | uniq); do
felist="$felist,"'{ "{#'${END}'_NAME}": "'$frontend'" }'
done
echo -e '{ "data": [ '${felist#,}']}'

View file

@ -0,0 +1,35 @@
#!/bin/bash
# Ansible managed
set -euo pipefail
TMPDIR=/dev/shm
SOCKET=/run/haproxy/monitoring.sock
TMPFILE=$TMPDIR/haproxy_info.tmp
CACHEFILE=$TMPDIR/haproxy_info.txt
CACHE_EXPIRATION_TIME_SECONDS=60
METRIC=$1
refresh_cache_file () {
echo "show info" | socat /run/haproxy/monitoring.sock stdio > $TMPFILE
# rsync is atomic
rsync -t $TMPFILE $CACHEFILE
}
# if either the tmpfile or the cachefile is not here, do the refresh
if ! [ -f $TMPFILE ] || ! [ -f $CACHEFILE ]; then
refresh_cache_file
fi
# if the cache file is too old, do the refresh
CACHEFILE_TIMESTAMP=$(stat -c %Y $CACHEFILE)
if [ ${CACHEFILE_TIMESTAMP} -lt $(date -d "60 second ago" "+%s") ]; then
refresh_cache_file
fi
# we need a special case for "Unstoppable Jobs" because that's the only metric with a space in its name
if [ $METRIC == "UnstoppableJobs" ]; then
egrep "^Unstoppable Jobs: " $CACHEFILE | sed "s/^Unstoppable Jobs: //"
else
egrep "^$METRIC: " $CACHEFILE | sed "s/^$METRIC: //"
fi

View file

@ -0,0 +1,93 @@
#!/usr/bin/python3
import argparse
import datetime
import os
import subprocess
###########################################################################
# BEGIN FUNCTIONS
###########################################################################
def parse_haproxy_stats(line):
haproxy_dict = {}
line_as_dict = line.rstrip(",\n").split(",")
index = 0
for item in line_as_dict:
field_name = field_name_list[index]
haproxy_dict[field_name] = item
index = index + 1
return haproxy_dict
def refresh_cache_file():
CMD = 'echo "show stat" | socat /run/haproxy/monitoring.sock stdio > ' + stat_file + ".tmp" # noqa: N806
RESULT = subprocess.check_output(CMD, shell=True) # noqa: N806, F841
os.rename(stat_file + ".tmp", stat_file) # noqa: PTH104
###########################################################################
# END FUNCTIONS - BEGIN PARSER
###########################################################################
# begin parser
parser = argparse.ArgumentParser(
description="return a specific stat for a specific pxname and svname, it uses a cache file that is automatically refreshed if it is older than 1 minute"
)
parser.add_argument(
"--pxname",
help="this is the name of the backend, frontend or server exactly as it appears in the configuration",
type=str,
required=True,
)
parser.add_argument(
"--svname",
help="the type of the service, either FRONTEND, BACKEND or the name of the server",
type=str,
required=True,
)
parser.add_argument("--stat", help="the stat wanted", type=str, required=True)
args = parser.parse_args()
###########################################################################
# END PARSER - BEGIN PROGRAM
###########################################################################
stat_file = "/dev/shm/haproxy_stat.txt"
localtime = datetime.datetime.now()
maxage = datetime.timedelta(seconds=60)
if os.path.isfile(stat_file): # noqa: PTH113
last_modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(stat_file)) # noqa: PTH204
if localtime - last_modified_date > maxage:
# the cache file is too old, let's refresh it
refresh_cache_file()
else:
# the cache file doesn't exists, so we need to create it
refresh_cache_file()
# transform the stat file into a dict, line by line
with open(stat_file) as stat: # noqa: PTH123
stat_by_line = stat.readlines()
# the first element of the dict contains the name of the fields
field_name_list = stat_by_line[0].rstrip(",\n").lstrip("# ").split(",")
# we don't care about the first element now, remove it from the dict
del stat_by_line[0]
# the last element of the dict is an empty line, remove it too
del stat_by_line[-1]
# define the final object that we will query for the stats
STATS_AS_LIST = []
# populate the final object
for line in stat_by_line:
STATS_AS_LIST.append(parse_haproxy_stats(line))
# Search for the final object with pxname and svname
CORRECT_LINE = next(
(query for query in STATS_AS_LIST if query["pxname"] == args.pxname and query["svname"] == args.svname), None
)
# Print the stat if a matching element is found, otherwise print "non-existent"
print(CORRECT_LINE[args.stat] if CORRECT_LINE else "non-existent")

View file

@ -0,0 +1,9 @@
# Ansible managed
[Unit]
Description=very simple http server for letsencrypt challenge
[Service]
User=www-data
Group=www-data
ExecStart=/usr/bin/python3 -m http.server --bind 127.0.0.1 --directory /var/www/letsencrypt/ 8888

View file

@ -0,0 +1,4 @@
# Ansible managed
[Service]
BindReadOnlyPaths=/var/lib/haproxy/dev/log

View file

@ -0,0 +1,2 @@
User-agent: *
Disallow: /

View file

@ -0,0 +1,22 @@
---
# file: roles/haproxy/handlers/main.yml
- name: systemctl daemon_reload
ansible.builtin.systemd:
daemon_reload: yes
changed_when: true
- name: reload haproxy
systemd:
name: haproxy
state: reloaded
- name: restart haproxy
systemd:
name: haproxy
state: restarted
- name: restart zabbix_agent
service:
name: zabbix-agent
state: restarted

View file

@ -0,0 +1,6 @@
---
# file: roles/haproxy/meta/main.yml
dependencies:
- { role: openssl, when: haproxy_tls_profile is defined and haproxy_tls_profile == "old" }
- { role: coraza, when: haproxy_coraza is defined and haproxy_coraza }

View file

@ -0,0 +1,259 @@
This role will install haproxy from the official repository `http://haproxy.debian.net`.
<!-- TOC -->
* [Important](#important)
* [Mandatory variables](#mandatory-variables)
* [Optional variables](#optional-variables)
* [Frontends](#frontends)
* [letsencrypt automatic certificate generation](#letsencrypt-automatic-certificate-generation)
* [Coraza WAF installation](#coraza-waf-installation)
* [IIS specific headers for https](#iis-specific-headers-for-https)
* [Issues with compression](#issues-with-compression)
* [haproxy and journald](#haproxy-and-journald)
* [haproxy documentation](#haproxy-documentation)
<!-- TOC -->
# Important
This role consider that haproxy will always serve https.
This role currently doesn't handle the management of the https certificates and private keys. HAproxy looks for files in /usr/local/etc/tls/haproxy: each files here must contain the private key, the certificate and the full chain (yes, everything in one file!).
HAproxy will automatically answer https requests with SNI with the correct certificate.
# Mandatory variables
This role uses object to define configuration parameters.
The haproxy version is mandatory, but should already be defined in `group_vars/all/software_versions`, so except in very specific cases (like testing of new version), you don't need to override it:
```
haproxy_version: "2.8"
```
For the backends, you can define several of them this way:
```
haproxy_backend:
- name: "identity-test"
balance: "roundrobin" # this is the default and can be ommitted
server:
- name: "id-test-1" # if undefined, takes the value of the "fqdn"
fqdn: "identity-test-node-1.talas.com" # if undefined, takes the value of the "name""
port: "8080"
- name: "id-test-2"
fqdn: "identity-test-node-2.talas.com"
port: "8080"
proto: "h2"
check: "check inter 2s fastinter 2s downinter 2s" # default is "check"
options: "string containing the options for this server, this is optional"
```
Unfortunately, currently this role cannot find out which certificate are active and thus which ones should be seen by zabbix so you must list the https website with this list:
```
haproxy_https_monitoring:
- identity.talas.com
```
# TLS profiles
_Changelog of the TLS parameter:_
- 2025-01: The "old" profile cannot be used anymore because of audit from our customers. It is kept for historical reasons only.
- 2025-03: migration of the last service using the "old" profile to the "intermediate" profile
- TO DO: actually delete the "old" profile so that it cannot be used anymore
The TLS configuration is generated with https://ssl-config.mozilla.org/#server=haproxy&version=2.8.
The default profile is "intermediate" (which supports TLS 1.2+) but you can switch it to modern (which supports TLS 1.3+) via this variable:
```
haproxy_tls_profile: "modern"
```
# Optional variables
You can change the default backend of the frontend:
```
haproxy_frontend:
default_backend: "error404"
```
This roles has a default maximum number of connection set to 20000 (the default in vanilla haproxy is 500). You can adjust this with this variable:
```
haproxy_maxconn: 20000
```
You can also adjust the timeout values of haproxy, which are explained here:
- https://serverfault.com/questions/504308/by-what-criteria-do-you-tune-timeouts-in-ha-proxy-config / https://thehftguy.com/2016/05/22/configuring-timeouts-in-haproxy/
The default are:
```yml
haproxy_timeout_connect: "5s"
haproxy_timeout_client: "50s"
haproxy_timeout_server: "50s"
```
From [haproxy documentation](https://www.haproxy.org/download/2.8/doc/configuration.txt):
> In TCP mode (and to a lesser extent, in HTTP mode), it is highly recommended that the client timeout remains equal to the server timeout in order to avoid complex situations to debug.
You can handle all robots.txt for all frontends via this variable:
```
haproxy_robotstxt: True
```
When set to true, the url /robots.txt will return:
```
User-agent: *
Disallow: /
```
Is it usefull when backends should not be indexed.
You can also use the robots.txt backend in only some cases, for this, just reference the robots_txt acl. Example:
```
acl something hdr(host) something.example.org
use_backend robotstxt if is_robots_txt something
```
The default acl `robotstxt` is in the standard frontend.
You can define several user lists, to have one authentication page (basic_auth):
```yaml
haproxy_userlist:
mailcatcher:
- bolle_mailcatcher
- user2
```
In this example:
- `mailcatcher` is the userlist name which you can specify in your haproxy configuration
- `bolle_mailcatcher` and `user2` are the users
Passwords are automatically generated by the role and added to hashicorpvault. If you wish, you can define them in advance, respecting this name:
```yaml
haproxy_basicauth_%USERNAME%_password # replace %USERNAME% with the username you've defined
```
Information: the password is added to the haproxy configuration in clear text to avoid this:
http://docs.haproxy.org/2.9/configuration.html#3.4-user
```text
Attention: Be aware that using encrypted passwords might cause significantly increased CPU usage, depending on the number of requests, and the algorithm used.
For any of the hashed variants, the password for each request must be processed through the chosen algorithm, before it can be compared to the value specified in the config file.
Most current algorithms are deliberately designed to be expensive to compute to achieve resistance against brute force attacks. They do not simply salt/hash the clear text password once, but thousands of times.
This can quickly become a major factor in HAProxy's overall CPU consumption!
```
Example of haproxy configuration:
```yaml
haproxy_frontend_raw_config: |
acl mailcatcher.bollebrands.com hdr(host) -i mailcatcher.bollebrands.com
http-request auth if mailcatcher.bollebrands.com !{ http_auth(mailcatcher) } !acme-challenge
use_backend mailcatcher.bollebrands.com if mailcatcher.bollebrands.com { http_auth_group(mailcatcher) }
```
# Frontends
By default, this role create a frontend named "https" which has the following default configuration:
```
frontend https
filter compression
compression algo gzip
compression type text/html text/plain text/xml text/css text/csv text/rtf text/richtext application/x-javascript application/javascript application/ecmascript application/rss+xml application/xml application/json application/wasm
mode http
bind :443,:::443 v6only ssl crt /usr/local/etc/tls/haproxy alpn h2,http/1.1
bind :80,:::80 v6only
http-request set-header X-Forwarded-Proto https if { ssl_fc }
redirect scheme https code 301 if !{ ssl_fc }
option forwardfor
# block access to any git paths
acl git path,url_dec -m sub /.git
use_backend error404 if git
# block access to path begining by "/manager" except from 10.0.0.0/8
acl internal_network src 10.0.0.0/8
acl manager path,url_dec -m beg /manager
use_backend error404 if manager !internal_network
# redirect multiple traling slash to one slash
acl has_multiple_slash path_reg /{2,}
http-request set-path %[path,regsub(/+,/,g)] if has_multiple_slash
```
You can override the "bind" lines with this list:
```
haproxy_frontend:
bind_list:
- "127.0.0.1:443 ssl crt /usr/local/etc/tls/haproxy alpn h2,http/1.1"
- "127.0.0.1:80"
```
You can add a raw configuration to the default frontend with this variable:
```
haproxy_frontend_raw_config: |
acl admin path,url_dec -m beg /auth/admin
use_backend error404 if admin !internal_network
```
You can deactivate the default frontend with this variable:
```
haproxy_default_frontend: false
```
You can also define any number of custom frontends with this object:
```
haproxy_frontend_list:
- name: "something"
mode: "http/tcp"
bind_list:
- "*:389"
- "1.1.1.1:80"
config: |
free field to define the config of the frontend
```
This allows full control over custom frontends for haproxy.
# letsencrypt automatic certificate generation
/!\ Lets encrypt automatic certificate generation can only be used on single node cluster (no keepalived).
For this to work correctly, you need to need to have all domains in the `haproxy_https_monitoring` variable. Each domains has its own certificate, alternative names are not supported.
To activate it, set this variable:
```
haproxy_letsencrypt: true
```
During certificate generation and renew, an http server is created to handle the challenge on port 8888. The server is created via a simple python command line and is only active during lets encrypt operations.
# Coraza WAF installation
Enable coraza WAF like this:
```
haproxy_coraza: true
```
If the `haproxy_waf_sample_percent` variable is defined, Coraza will be enabled in the default frontend.
However, if `waf_sample_percent` is defined within the `haproxy_frontend_list`, Coraza will be enabled in each frontend where `waf_sample_percent` is explicitly set.
# IIS specific headers for https
The header `Front-End-Https On` is the equivalent to `X-Forwarded-Proto https` for IIS, to activate it, set this variable to `true`:
```
haproxy_iis: true
```
# Issues with compression
Some mime types are problematic if compressed so compression was disabled for them, those are:
```
application/hal+json
application/prs.hal-forms+json
```
See the following tickets for more informations:
- https://tracker.talas.com/browse/OP-4916
- https://tracker.talas.com/browse/OP-6532
- https://tracker.talas.com/browse/IT-9018
# haproxy and journald
In the systemd file for haproxy, the following line was added:
```
BindReadOnlyPaths=/dev/log:/var/lib/haproxy/dev/log
```
This line gives to haproxy the capability to send its log to journald. While this looks like a good idea, it is not.
With this lines, the logs are duplicated between `/var/log/haproxy.log` and journald. On production, this means an increase by a factor of 40 (!!!) of the amount of write to the disk.
With this line: 400KB/s, without: 10KB/s.
This is crazy... and remember that this is duplicate logs that we don't use since filebeat will read the `/var/log/haproxy.log` and ignore journald. This also shows the poor optimisation of journald vs simple log files but this is an other story.
Anyway, this role removes this line from the service file for all those reasons.
# haproxy documentation
Official documentation can be found at https://www.haproxy.org/download/2.8/doc/configuration.txt (change the version number for the latest if needed).
Important part that we look for often is the one that details the "Session state at disconnection", which is essential to debug connectivity issues. Search for "8.5. Session state at disconnection" in the doc to find it immediately.

View file

@ -0,0 +1,46 @@
---
# file: roles/haproxy/tasks/install_debian.yml
- name: "remove legacy key from apt-key"
apt_key:
id: "AEF2348766F371C689A7360095A42FE8353525F9"
state: absent
- name: "make sure /etc/apt/keyrings exists"
file:
path: "/etc/apt/keyrings"
state: directory
- name: "download modern signature key"
get_url:
url: "https://haproxy.debian.net/bernat.debian.org.gpg"
dest: "/dev/shm/bernat.debian.org.gpg"
changed_when: false
- name: "install modern signature key"
shell:
cmd: "cat /dev/shm/bernat.debian.org.gpg | gpg --dearmor -o /etc/apt/keyrings/haproxy.debian.net.gpg"
creates: "/etc/apt/keyrings/haproxy.debian.net.gpg"
- name: "repository file"
copy:
content: "deb [arch=amd64 signed-by=/etc/apt/keyrings/haproxy.debian.net.gpg] http://haproxy.debian.net {{ ansible_distribution_release }}-backports-{{ haproxy_version }} main\n"
dest: "/etc/apt/sources.list.d/haproxy_debian_net.list"
register: repository
- name: "refresh apt if repo was modified"
apt:
update_cache: yes
when: repository.changed
- name: "set fact to install latest version of software when the repository changed"
set_fact:
apt_state: "latest"
when: repository.changed
- name: "install haproxy"
apt:
name:
- haproxy
state: "{{ apt_state | default('present') }}"
default_release: "{{ ansible_distribution_release }}-backports-{{ haproxy_version }}"

View file

@ -0,0 +1,11 @@
---
# file: roles/haproxy/tasks/install_ubuntu.yml
- name: "repository"
apt_repository:
repo: "ppa:vbernat/haproxy-{{ haproxy_version }}"
- name: "install haproxy"
apt:
name: "haproxy"
update_cache: yes

View file

@ -0,0 +1,72 @@
---
# file: roles/haproxy/tasks/letsencrypt.yml
- name: "[letsencrypt] reload haproxy immediately when the configuration has changed, else letsencrypt challenge may fail"
systemd:
name: haproxy
state: reloaded
when: haproxy_config.changed
- name: "[letsencrypt] install git curl hexdump"
apt:
name:
- git
- curl
- bsdmainutils
update_cache: yes
- name: "[letsencrypt] directory /usr/local/etc/letsencrypt"
file:
path: "{{ item }}"
state: directory
loop:
- "/usr/local/etc/letsencrypt"
- "/var/www/letsencrypt"
- name: "[letsencrypt] git repo dehydrated"
git:
repo: https://github.com/dehydrated-io/dehydrated
dest: /usr/local/etc/letsencrypt/dehydrated
clone: yes
- name: "[letsencrypt] domains.txt"
template:
src: letsencrypt_domains.txt
dest: /usr/local/etc/letsencrypt/dehydrated/domains.txt
backup: yes
when: haproxy_https_monitoring is defined
- name: "[letsencrypt] le.config"
template:
src: letsencrypt_le.config
dest: /usr/local/etc/letsencrypt/dehydrated/le.config
backup: yes
- name: "[letsencrypt] dehydrated_haproxy_hook.sh"
copy:
src: "dehydrated_haproxy_hook.sh"
dest: "/usr/local/etc/letsencrypt/dehydrated_haproxy_hook.sh"
mode: 0700
backup: yes
- name: "[letsencrypt] http-letsencrypt.service"
copy:
src: "http-letsencrypt.service"
dest: "/etc/systemd/system/http-letsencrypt.service"
- name: "[letsencrypt] make sure the letsencrypt terms are accepted"
command: /usr/local/etc/letsencrypt/dehydrated/dehydrated --register --accept-terms --config /usr/local/etc/letsencrypt/dehydrated/le.config
register: accept_terms
changed_when: "accept_terms.stdout != '# INFO: Using main config file /usr/local/etc/letsencrypt/dehydrated/le.config\n+ Account already registered!'"
- name: "[letsencrypt] generate certificate(s) if needed"
command: "/usr/local/etc/letsencrypt/dehydrated/dehydrated --cron --out /usr/local/etc/tls --challenge http-01 --config /usr/local/etc/letsencrypt/dehydrated/le.config --hook /usr/local/etc/letsencrypt/dehydrated_haproxy_hook.sh"
register: generate_certificates
changed_when: "'Generating private key' in generate_certificates.stdout"
- name: "[letsencrypt] dehydrated crontab for automatic renew"
cron:
name: dehydrated
minute: "{{ 59 | random(seed=inventory_hostname) }}"
hour: "{{ 23 | random(seed=inventory_hostname) }}"
job: "/usr/local/etc/letsencrypt/dehydrated/dehydrated --cron --keep-going --out /usr/local/etc/tls --challenge http-01 --config /usr/local/etc/letsencrypt/dehydrated/le.config --hook /usr/local/etc/letsencrypt/dehydrated_haproxy_hook.sh"

View file

@ -0,0 +1,165 @@
---
# file: roles/haproxy/tasks/main.yml
- name: "display haproxy_version (verbosity 1 or more)"
debug:
var: haproxy_version
verbosity: 1
tags: haproxy
- name: "secrets.yml"
include_tasks: secrets.yml
loop: "{{ haproxy_userlist | dict2items | map(attribute='value') | flatten }}"
loop_control:
loop_var: user
when: haproxy_userlist is defined
tags: haproxy
- name: "debian install haproxy"
import_tasks: install_debian.yml
when: ansible_distribution == "Debian"
tags:
- haproxy
- apt_sources_list
- name: "ubuntu install haproxy"
import_tasks: install_ubuntu.yml
when: ansible_distribution == "Ubuntu"
tags: haproxy
- name: "folder /etc/systemd/system/haproxy.service.d"
file:
path: "/etc/systemd/system/haproxy.service.d"
state: directory
tags: haproxy
- name: "handle /etc/systemd/system/haproxy.service.d/override.conf to prevent double logging"
copy:
src: "override.conf"
dest: "/etc/systemd/system/haproxy.service.d/override.conf"
notify:
- systemctl daemon_reload
- restart haproxy
tags: haproxy
- name: "manage /etc/haproxy/errors/404.http and /etc/haproxy/errors/200.http"
copy:
src: "{{ item }}.http"
dest: "/etc/haproxy/errors/{{ item }}.http"
loop:
- 404
- 200
tags: haproxy
- name: "folder /usr/local/etc/tls/haproxy"
file:
path: /usr/local/etc/tls/haproxy
state: directory
mode: 0755
tags: haproxy
- name: "we need at least one certificate for haproxy to start: /usr/local/etc/tls/haproxy/selfsigned.pem"
copy:
src: selfsigned.pem
dest: /usr/local/etc/tls/haproxy/selfsigned.pem
tags: haproxy
- block:
- name: "folder /etc/haproxy/static"
file:
path: /etc/haproxy/static
state: directory
mode: 0755
- name: "manage /etc/haproxy/static/robots.txt"
copy:
src: "robots.txt"
dest: "/etc/haproxy/static/robots.txt"
tags: haproxy
- name: "undefined TLS security profile: set it to 'intermediate'"
set_fact:
haproxy_tls_profile: "intermediate"
when: haproxy_tls_profile is undefined
tags: haproxy
- name: "invalid TLS security profile"
fail:
msg: 'invalid haproxy_tls_profile "{{ haproxy_tls_profile }}", possible values are "modern" or "intermediate"'
when:
- haproxy_tls_profile != "modern"
- haproxy_tls_profile != "intermediate"
- haproxy_tls_profile != "old"
tags: haproxy
- name: "generate dhparams file (when the TLS profile is not modern)"
command: "openssl dhparam -out /usr/local/etc/tls/dh2048.pem 2048"
args:
creates: /usr/local/etc/tls/dh2048.pem
when: haproxy_tls_profile != "modern"
tags: haproxy
- name: "Modern TLS configuration"
set_fact:
tls_ciphersuites: "{{ haproxy_tls_modern['ciphersuites'] }}"
tls_options: "{{ haproxy_tls_modern['options'] }}"
when: haproxy_tls_profile == "modern"
tags: haproxy
- name: "Intermediate TLS configuration"
set_fact:
tls_ciphers: "{{ haproxy_tls_intermediate['ciphers'] }}"
tls_ciphersuites: "{{ haproxy_tls_intermediate['ciphersuites'] }}"
tls_options: "{{ haproxy_tls_intermediate['options'] }}"
when: haproxy_tls_profile == "intermediate"
tags: haproxy
- name: "Old TLS configuration"
set_fact:
tls_ciphers: "{{ haproxy_tls_old['ciphers'] }}"
tls_ciphersuites: "{{ haproxy_tls_old['ciphersuites'] }}"
tls_options: "{{ haproxy_tls_old['options'] }}"
when: haproxy_tls_profile == "old"
tags: haproxy
- name: "coraza spoa configuration"
ansible.builtin.copy:
src: coraza.cfg
dest: /etc/haproxy/coraza.cfg
when:
- haproxy_coraza is defined
- haproxy_coraza
tags:
- haproxy
- coraza
- name: "/etc/haproxy/haproxy.cfg"
template:
src: "haproxy.cfg"
dest: "/etc/haproxy/haproxy.cfg"
backup: yes
validate: "haproxy -c -f %s"
notify: reload haproxy
register: haproxy_config
tags: haproxy
- name: "lets encrypt"
import_tasks: letsencrypt.yml
when: haproxy_letsencrypt
tags:
- haproxy
- letsencrypt
- name: "check if the folder /etc/zabbix/zabbix_agentd.conf.d exists"
stat:
path: "/etc/zabbix/zabbix_agentd.conf.d"
register: zabbix_folder
tags:
- haproxy
- zabbix
- name: "import_tasks: zabbix.yml"
import_tasks: zabbix.yml
when: zabbix_folder.stat.exists
tags:
- haproxy
- zabbix

View file

@ -0,0 +1,20 @@
---
# file: roles/haproxy/tasks/secrest.yml
- name: "handle secret {{ user }}"
block:
- name: "get {{ user }} from hashicorp vault"
set_fact:
"{{ user }}": "{{ lookup('hashi_vault', 'secret=talas-kv/data/' + host_vars_location + '/' + ansible_hostname)['haproxy_basicauth_' + user + '_password'] }}"
rescue:
- name: "generate a random password for {{ user }}"
set_fact:
password: "{{ lookup('password','/dev/null chars=ascii_letters,digits length=50') }}"
- name: "patching hashicorp vault with generated {{ user }}"
delegate_to: localhost
become: no
command: "vault kv patch talas-kv/{{ host_vars_location }}/{{ ansible_hostname }} haproxy_basicauth_{{ user }}_password={{ password }}"
- name: "assign password value to {{ user }}"
set_fact:
"haproxy_basicauth_{{ user }}_password": "{{ password }}"
tags: haproxy

View file

@ -0,0 +1,31 @@
---
# file: roles/haproxy/tasks/zabbix.yml
- name: "apt install socat"
apt:
name:
- socat
- rsync
- name: "[zabbix] cache scripts"
copy:
src: "{{ item }}"
dest: "/etc/zabbix/scripts/{{ item }}"
mode: 0755
loop:
- haproxy_discovery.sh
- haproxy_info.sh
- haproxy_stat.py
- name: "[zabbix] userparameters"
copy:
src: haproxy.conf
dest: /etc/zabbix/zabbix_agentd.conf.d/haproxy.conf
mode: 0644
notify: restart zabbix_agent
- name: "[zabbix] https discovery file /usr/local/etc/tls/zabbix.discovery"
template:
src: zabbix.discovery
dest: /usr/local/etc/tls/zabbix.discovery
when: haproxy_https_monitoring is defined

View file

@ -0,0 +1,222 @@
# {{ ansible_managed }}
{% if haproxy_userlist is defined %}
{% for userlist, users in haproxy_userlist.items() %}
userlist {{ userlist }}
{% for user in users %}
user {{ user }} insecure-password {{ lookup('hashi_vault', 'secret=talas-kv/data/' + host_vars_location + '/' + ansible_hostname)['haproxy_basicauth_' + user + '_password'] | mandatory }}
{% endfor %}
{% endfor %}
{% endif %}
# BEGIN GLOBAL AND DEFAULTS
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
stats socket /run/haproxy/monitoring.sock mode 666 level user
stats timeout 30s
user haproxy
group haproxy
daemon
maxconn {{ haproxy_maxconn }}
ca-base /etc/ssl/certs
crt-base /etc/ssl/private
# Current TLS profile: {{ haproxy_tls_profile }}
ssl-default-bind-curves X25519:prime256v1:secp384r1
{% if tls_ciphers is defined %}
ssl-default-bind-ciphers {{ tls_ciphers|join(':') }}
ssl-default-server-ciphers {{ tls_ciphers|join(':') }}
{% endif %}
ssl-default-bind-ciphersuites {{ tls_ciphersuites|join(':') }}
ssl-default-server-ciphersuites {{ tls_ciphersuites|join(':') }}
ssl-default-bind-options {{ tls_options|join(' ') }}
ssl-default-server-options {{ tls_options|join(' ') }}
{% if haproxy_tls_profile != "modern" %}
ssl-dh-param-file /usr/local/etc/tls/dh2048.pem
{% endif %}
defaults
log global
mode http
option httplog
option dontlognull
timeout connect {{ haproxy_timeout_connect | default('5s') }}
timeout client {{ haproxy_timeout_client | default('50s') }}
timeout server {{ haproxy_timeout_server | default('50s') }}
timeout http-request {{ haproxy_timeout_http_request | default('5s') }}
timeout client-fin {{ haproxy_timeout_client_fin | default('30s') }}
timeout tunnel {{ haproxy_timeout_tunnel | default('1h') }}
errorfile 400 /etc/haproxy/errors/400.http
errorfile 403 /etc/haproxy/errors/403.http
errorfile 408 /etc/haproxy/errors/408.http
errorfile 500 /etc/haproxy/errors/500.http
errorfile 502 /etc/haproxy/errors/502.http
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
# END GLOBAL AND DEFAULTS => BEGIN FRONTENDS
{% if haproxy_default_frontend %}
frontend https
filter compression
compression algo gzip
compression type {{ haproxy_compression_type|join(' ') }}
mode http
{% if haproxy_frontend['bind_list'] is defined %}
{% for bind in haproxy_frontend['bind_list'] %}
bind {{ bind }}
{% endfor %}
{% else %}
bind :443,:::443 v6only ssl crt /usr/local/etc/tls/haproxy alpn h2,http/1.1
bind :80,:::80 v6only
{% endif %}
http-request set-header X-Forwarded-Proto https if { ssl_fc }
{% if haproxy_iis %}
http-request set-header Front-End-Https On if { ssl_fc }
{% endif %}
redirect scheme https code 301 if !{ ssl_fc }
option forwardfor
default_backend {{ haproxy_frontend['default_backend'] | default('error404') }}
# add an header to know on which haproxy we are
http-response set-header x-proxy-id {{ ansible_hostname }}
# HSTS for 1 year
http-response set-header Strict-Transport-Security "max-age=31536000; preload"
# block access to any git paths
acl git path,url_dec -m sub /.git
use_backend error404 if git
# block access to path begining by "/manager" except from 10.0.0.0/8
acl internal_network src 10.0.0.0/8
acl manager path,url_dec -m beg /manager
use_backend error404 if manager !internal_network
# redirect multiple traling slash to one slash
acl has_multiple_slash path_reg /{2,}
http-request set-path %[path,regsub(/+,/,g)] if has_multiple_slash
acl is_robots_txt path /robots.txt
{% if haproxy_robotstxt %}
use_backend robotstxt if is_robots_txt
{% endif %}
{% if haproxy_letsencrypt %}
acl acme-challenge path_beg -i /.well-known/acme-challenge
use_backend letsencrypt if acme-challenge
{% endif %}
{% if haproxy_coraza is defined and haproxy_coraza and haproxy_waf_sample_percent is defined%}
acl openvas src 185.14.128.171 2a03:a240:0:1dea::a2
{% if haproxy_waf_sample_percent | int < 100 %}
acl waf_trigger rand(100) lt {{ haproxy_waf_sample_percent }}
{% endif %}
http-request set-var(txn.coraza.app) str(haproxy_waf)
filter spoe engine coraza config /etc/haproxy/coraza.cfg
{% if haproxy_waf_sample_percent | int < 100 %}
http-request send-spoe-group coraza coraza-req if waf_trigger !openvas
{% else %}
http-request send-spoe-group coraza coraza-req !openvas
{% endif %}
http-request redirect code 302 location %[var(txn.coraza.data)] if { var(txn.coraza.action) -m str redirect }
http-response redirect code 302 location %[var(txn.coraza.data)] if { var(txn.coraza.action) -m str redirect }
http-request deny deny_status 403 hdr waf-block "request" if { var(txn.coraza.action) -m str deny }
http-response deny deny_status 403 hdr waf-block "response" if { var(txn.coraza.action) -m str deny }
http-request silent-drop if { var(txn.coraza.action) -m str drop }
http-response silent-drop if { var(txn.coraza.action) -m str drop }
http-request deny deny_status 500 if { var(txn.coraza.error) -m int gt 0 }
http-response deny deny_status 500 if { var(txn.coraza.error) -m int gt 0 }
{% endif %}
{% if haproxy_frontend_raw_config is defined %}
{{ haproxy_frontend_raw_config|indent(8, True) }}
{% endif %}
{% endif %}
{% if haproxy_frontend_list is defined %}
{% for frontend in haproxy_frontend_list %}
frontend {{ frontend['name'] }}
mode {{ frontend['mode'] | default('http') }}
{% for bind in frontend['bind_list'] %}
bind {{ bind }}
{% endfor %}
{% if frontend['mode'] is defined and frontend['mode'] == 'tcp' %}
option tcplog
{% endif %}
{% if frontend['use_backend'] is defined %}
use_backend {{ frontend['use_backend'] }}
{% endif %}
{% if haproxy_coraza is defined and haproxy_coraza and frontend['waf_sample_percent'] %}
acl openvas src 185.14.128.171 2a03:a240:0:1dea::a2
{% if frontend['waf_sample_percent'] | int < 100 %}
acl waf_trigger rand(100) lt {{ frontend['waf_sample_percent'] }}
{% endif %}
http-request set-var(txn.coraza.app) str(haproxy_waf)
filter spoe engine coraza config /etc/haproxy/coraza.cfg
{% if frontend['waf_sample_percent'] | int < 100 %}
http-request send-spoe-group coraza coraza-req if waf_trigger !openvas
{% else %}
http-request send-spoe-group coraza coraza-req !openvas
{% endif %}
http-request redirect code 302 location %[var(txn.coraza.data)] if { var(txn.coraza.action) -m str redirect }
http-response redirect code 302 location %[var(txn.coraza.data)] if { var(txn.coraza.action) -m str redirect }
http-request deny deny_status 403 hdr waf-block "request" if { var(txn.coraza.action) -m str deny }
http-response deny deny_status 403 hdr waf-block "response" if { var(txn.coraza.action) -m str deny }
http-request silent-drop if { var(txn.coraza.action) -m str drop }
http-response silent-drop if { var(txn.coraza.action) -m str drop }
http-request deny deny_status 500 if { var(txn.coraza.error) -m int gt 0 }
http-response deny deny_status 500 if { var(txn.coraza.error) -m int gt 0 }
{% endif %}
{% if frontend['config'] is defined %}
{{ frontend['config']|indent(8, True) }}
{% endif %}
{% endfor %}
{% endif %}
# END FRONTENDS => BEGIN BACKENDS
backend error404
mode http
errorfile 503 /etc/haproxy/errors/404.http
{% if haproxy_letsencrypt %}
backend letsencrypt
http-request set-path %[path,regsub(/.well-known/acme-challenge/,/)]
server localhost 127.0.0.1:8888
{% endif %}
{% if haproxy_coraza is defined and haproxy_coraza %}
backend coraza-spoa
mode tcp
server coraza_spoa 127.0.0.1:9000
{% endif %}
backend robotstxt
mode http
http-request return status 200 content-type "text/plain" file "/etc/haproxy/static/robots.txt" hdr "cache-control" "no-cache"
{% if haproxy_backend is defined %}
{% for backend in haproxy_backend %}
backend {{ backend['name'] }}
{% if backend['mode'] is defined %}
mode {{ backend['mode'] }}
{% endif %}
{% if backend['raw_config'] is defined %}
{{ backend['raw_config']|indent(8, True) }}{% endif %}
{% if backend['balance'] is defined %}
balance {{ backend['balance'] }}
{% endif %}
{% if backend['source'] is defined %}
source {{ backend['source'] }}
{% endif %}
{% if backend['server'] is defined %}
{% for server in backend['server'] %}
server {{ server['name'] | default(server['fqdn']) }} {{ server['fqdn'] | default(server['name']) }}:{{ server['port'] | default('80') }}{% if server['proto'] is defined %} proto {{ server['proto'] }}{% endif %} {{ server['check'] | default('check') }}{% if server['options'] is defined %} {{ server['options'] }}{% endif %}
{% endfor %}
{% endif %}
{% if backend['server'] is defined and (backend['mode'] is undefined or backend['mode'] == 'http') %}
filter compression
compression algo gzip
compression type {{ haproxy_compression_type|join(' ') }}
{% endif %}
{% endfor %}
{% endif %}
# END BACKENDS

View file

@ -0,0 +1,3 @@
{% for domain in haproxy_https_monitoring %}
{{ domain }}
{% endfor %}

View file

@ -0,0 +1,4 @@
# {{ ansible_managed }}
WELLKNOWN=/var/www/letsencrypt
KEYSIZE="2048"
HOOK_CHAIN=yes

View file

@ -0,0 +1,3 @@
{% for https in haproxy_https_monitoring %}
{{ https }}
{% endfor %}

View file

@ -0,0 +1,28 @@
# incus-client
## Variable reference
### Mandatory variables
| Variable | Description | Example value |
| -------- | ----------- | ------------- | ------ |
| incus_repository | name of the zabbly incus repo to use | lts-6.0 |
## Token documentation
* On the server you need to connect to:
Create an unrestricted access (all projects):
```
incus config trust add [client_name]
```
Create a token restricted to a project:
```
incus config trust add [client_name] --projects [project_name]
```
Copy the token, you will need it bellow.
* On the client:
```
incus remote add srv-384 srv-384.talas.com
```
Then paste the token there.

View file

@ -0,0 +1,34 @@
---
# file: roles/incus-client/tasks/main.yml
- name: "https://pkgs.zabbly.com/key.asc"
ansible.builtin.get_url:
url: "https://pkgs.zabbly.com/key.asc"
dest: "/etc/apt/keyrings/zabbly.asc"
tags: incus
- name: "/etc/apt/sources.list.d/zabbly.sources"
ansible.builtin.copy:
content: |
Enabled: yes
Types: deb
URIs: https://pkgs.zabbly.com/incus/{{ incus_repository }}
Suites: {{ ansible_distribution_release }}
Components: main
Architectures: amd64
Signed-By: /etc/apt/keyrings/zabbly.asc
dest: "/etc/apt/sources.list.d/zabbly.sources"
register: repository_incus
tags: incus
- name: "apt update"
ansible.builtin.apt:
update_cache: yes
when: repository_incus.changed
tags: incus
- name: "install incus-client"
ansible.builtin.apt:
name:
- incus-client
tags: incus

View file

@ -0,0 +1,140 @@
# file: incus/defaults/main.yml
incus_version_epoch: 1
incus_update_now: false
incus_zfs_backend: true
incus_zfs_root_dataset: "nvme/incus"
incus_standard_profiles:
- name: cpu-2-cores
config: '{ "limits.cpu" : "2" }'
- name: cpu-4-cores
config: '{ "limits.cpu" : "4" }'
- name: cpu-8-cores
config: '{ "limits.cpu" : "8" }'
- name: cpu-10-cores
config: '{ "limits.cpu" : "10" }'
- name: cpu-16-cores
config: '{ "limits.cpu" : "16" }'
- name: cpu-20-cores
config: '{ "limits.cpu" : "20" }'
- name: cpu-32-cores
config: '{ "limits.cpu" : "32" }'
- name: cpu-48-cores
config: '{ "limits.cpu" : "48" }'
- name: cpu-64-cores
config: '{ "limits.cpu" : "64" }'
- name: cpu-128-cores
config: '{ "limits.cpu" : "128" }'
- name: mem-2GiB
config: '{ "limits.memory" : "2GiB" }'
- name: mem-4GiB
config: '{ "limits.memory" : "4GiB" }'
- name: mem-6GiB
config: '{ "limits.memory" : "6GiB" }'
- name: mem-10GiB
config: '{ "limits.memory" : "10GiB" }'
- name: mem-20GiB
config: '{ "limits.memory" : "20GiB" }'
- name: mem-30GiB
config: '{ "limits.memory" : "30GiB" }'
- name: mem-40GiB
config: '{ "limits.memory" : "40GiB" }'
- name: mem-50GiB
config: '{ "limits.memory" : "50GiB" }'
- name: mem-60GiB
config: '{ "limits.memory" : "60GiB" }'
- name: mem-100GiB
config: '{ "limits.memory" : "100GiB" }'
- name: mem-200GiB
config: '{ "limits.memory" : "200GiB" }'
- name: mem-250GiB
config: '{ "limits.memory" : "250GiB" }'
- name: mem-500GiB
config: '{ "limits.memory" : "500GiB" }'
incus_yaml_unconfigured: |-
config: {}
networks: []
storage_pools: []
storage_volumes: []
profiles:
- config: {}
description: Default Incus profile
devices: {}
name: default
project: ""
projects:
- config:
features.images: "true"
features.networks: "true"
features.networks.zones: "true"
features.profiles: "true"
features.storage.buckets: "true"
features.storage.volumes: "true"
description: Default Incus project
name: default
certificates: []
incus_standalone_init_yaml: |-
config: {}
networks: []
storage_pools:
- config:
source: {{ incus_zfs_root_dataset }}
description: ""
name: default
driver: zfs
profiles:
- config: {}
description: ""
devices:
root:
path: /
pool: default
type: disk
name: default
cluster: null
incus_cluster_ovn_conf:
northbound_connection: "{{ incus_ovn_northbound }}"
ca_cert: "{{ incus_client_cert_issuing_ca_chain }}"
client_cert: "{{ incus_client_cert_ca }}"
client_key: "{{ incus_client_cert_private_key }}"
incus_cluster_main_init_yaml: |
config:
cluster.https_address: {{ incus_ip }}:8443
core.https_address: {{ incus_ip }}:8443
storage_pools:
- name: default
driver: zfs
config:
source: {{ incus_zfs_root_dataset }}
cluster:
server_name: {{ ansible_hostname }}
enabled: true
incus_cluster_init_yaml: |
config:
cluster.https_address: {{ incus_ip }}:8443
core.https_address: {{ incus_ip }}:8443
cluster:
server_name: {{ ansible_hostname }}
enabled: true
cluster_address: {{ incus_cluster_main_ip }}
cluster_token: {{ incus_cluster_add.stdout }}
server_address: {{ incus_ip }}
member_config:
{{ incus_cluster_init_member_config | indent(width=4) }}
incus_cluster_init_member_config: |
- entity: storage-pool
name: default
key: driver
value: zfs
- entity: storage-pool
name: default
key: source
value: {{ incus_zfs_root_dataset }}

View file

@ -0,0 +1,411 @@
#!/usr/local/venvs/init_container/bin/python
import argparse
import ipaddress
import logging
import os
import socket
import subprocess
import sys
import time
import pylxd
###############################################################################
# begin logger
###############################################################################
MYNAME = sys.argv[0]
LOGDIR = "~/" + os.path.basename(MYNAME).replace(".py", "_LOG") # noqa: PTH119
EXPENDED_LOGDIR = os.path.expanduser(LOGDIR) # noqa: PTH111
if os.path.isdir(EXPENDED_LOGDIR) is False: # noqa: PTH112
os.mkdir(EXPENDED_LOGDIR) # noqa: PTH102
LOGFILE = EXPENDED_LOGDIR + "/" + time.strftime("%Y-%m-%d") + ".log"
# from https://docs.python.org/2/howto/logging-cookbook.html
logger = logging.getLogger("jdl")
logger.setLevel(logging.DEBUG)
# create file handler
fh = logging.FileHandler(LOGFILE)
fh.setLevel(logging.INFO)
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info("Program Starting")
###############################################################################
# end logger - start parser
###############################################################################
domain_list = [
"talas.com",
"int.talas.veza",
"talas.dev",
"hds.talas.cloud",
"int.talas.cloud",
"test.talas.com",
"hds.reflex-holding.com",
"talas.sh",
]
parser = argparse.ArgumentParser(description="script to create a container on Debian or Ubuntu")
parser.add_argument("--name", help="name of the container", type=str, required=True)
parser.add_argument(
"--network",
help="network profile name for this container, this script only handle containers with 1 network profile, the name of the profile is usualy 'net-vlanXXX'",
type=str,
required=True,
)
parser.add_argument(
"--distrib",
help="distribution to use, current default is 'debian/bookworm'. Example: 'ubuntu/focal' 'debian/bookworm', you can see a list of available image with 'lxc image list images:'",
type=str,
default="debian/bookworm",
)
parser.add_argument(
"--cpu",
help="number of CPU threads for the container, will create the relevant profile if it doesn't exist (default=4)",
type=int,
default=4,
)
parser.add_argument(
"--memory",
help="memory limit in GiB, will create the relevant profile if it doesn't exist (default=4GiB)",
type=int,
default=4,
)
parser.add_argument("--quota", help="zfs quota in GiB for the root drive (default=10)", type=int, default=10)
parser.add_argument(
"--pool", help='name of the incus storage pool to use, the default value is "default" ', type=str, default="default"
)
parser.add_argument(
"--ip",
help=f"private IPv4 address, default to the DNS resolution of 'container name' with one of the following domains: {domain_list}. You can specify a mask, if not, a /24 will be used",
type=ipaddress.IPv4Interface,
)
parser.add_argument(
"--public",
help="Allow the use of public IP address with the --ip parameter, you will need to set a public DNS server with the --dns option for this to work as intended",
action="store_true",
)
parser.add_argument(
"--gateway",
help="IP address of the gateway, if unspecified, will use the last IP of the network",
type=ipaddress.ip_address,
)
parser.add_argument(
"--proxy", help="address of the proxy server, if any, syntax is 'hostname:port' ", type=str, default="no"
)
parser.add_argument(
"--dns",
help="DNS servers to use, the default is to copy the /etc/resolv.conf of the current host",
type=ipaddress.ip_address,
nargs="+",
)
parser.add_argument(
"--networkmethod",
help="which method should be used to configure the network, supported options are: 'networkd' or 'interfaces', default 'networkd', 'interfaces' should not be used anymore since Debian 11 is now working with networkd",
type=str,
choices=["networkd", "interfaces"],
default="networkd",
)
parser.add_argument("--debug", help="enable debug output", action="store_true")
args = parser.parse_args()
if args.debug:
fh.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
if args.ip is None:
resolved = False
for domain in domain_list:
try:
ip = socket.gethostbyname(f"{args.name}.{domain}") + "/24"
args.ip = ipaddress.IPv4Interface(ip)
logger.info(f'resolved "{args.name}.{domain}" to "{ip}"')
resolved = True
break
except: # noqa: E722
logger.debug(f'could not resolve "{args.name}.{domain}"')
if not resolved:
logger.info(f'could not resolve "{args.name}" with any of the following domains: {domain_list}, exiting.')
sys.exit(1)
###############################################################################
# end parser - start functions
###############################################################################
def ping(IP): # noqa: N803
try:
CMD = "ping -w 1 " + IP # noqa: N806
subprocess.check_output(CMD, shell=True)
logger.error("Specified IP is already up, exiting")
return True # noqa: TRY300
except: # noqa: E722
logger.debug("Specified IP is not responding, continuing")
return False
def check_container_exists(name):
try:
client.instances.get(name)
logger.error("container " + name + " already exists, exiting")
return True # noqa: TRY300
except: # noqa: E722
logger.debug("the container doesn't exist, proceeding")
return False
def wait(duration_in_sec):
print("waiting a bit ", end="", flush=True)
for n in range(1, duration_in_sec): # noqa: B007
time.sleep(1)
print(".", end="", flush=True)
print("")
###############################################################################
# end functions - start program
###############################################################################
# check if a network mask was specified
if args.ip.netmask == ipaddress.IPv4Address("255.255.255.255"):
logger.debug("no netmask specified, assuming netmask /24")
ip = ipaddress.IPv4Interface(str(args.ip.ip) + "/24")
else:
ip = args.ip
# verify that the IP address is private
if not ip.is_private and not args.public:
logger.error(
"IP address "
+ ip.exploded
+ " is not a private IPv4 address, use the --public option if you really want to use a public IP"
)
sys.exit(1)
# is this IP already up?
logger.debug(f"trying to ping {ip.ip} from this host")
if ping(str(ip.ip)):
sys.exit(1)
# connect ourselves to the incus daemon
try:
client = pylxd.Client(endpoint="/var/lib/incus/unix.socket")
except: # noqa: E722
logger.error("could not connect to the local incus daemon") # noqa: TRY400
sys.exit(1)
# check if the container already exists
logger.debug(f"check if a container with the name {args.name} already exists...")
if check_container_exists(args.name):
sys.exit(1)
# check if the storage pool exists
logger.debug(f'checking if the storage pool "{args.pool}" exists')
if client.storage_pools.exists(args.pool):
logger.debug(f'the storage pool "{args.pool}" exists')
else:
logger.error(f'the storage pool "{args.pool}" does not exists!')
sys.exit(1)
# check if the network profile already exists
logger.debug("check if the network profile exists (it cannot be created automatically to avoid mistakes)")
try:
client.profiles.get(args.network)
logger.debug(f"network profile {args.network} already exists")
except: # noqa: E722
logger.error( # noqa: TRY400
f"network profile {args.network} does not exists, are you sure you that you did specify a correct profile name?"
)
sys.exit(1)
profile_cpu = "cpu-" + str(args.cpu) + "-cores"
profile_memory = f"mem-{args.memory}GiB"
try:
client.profiles.get(profile_cpu)
logger.debug("profile " + profile_cpu + " already exists")
except: # noqa: E722
logger.info("creating profile " + profile_cpu)
profile_cpu_config = {"limits.cpu": str(args.cpu)}
client.profiles.create(profile_cpu, config=profile_cpu_config)
try:
client.profiles.get(profile_memory)
logger.debug("profile " + profile_memory + " already exists")
except: # noqa: E722
logger.info("creating profile " + profile_memory)
profile_memory_config = {"limits.memory": f"{args.memory}GiB"}
client.profiles.create(profile_memory, config=profile_memory_config)
profile_list = [args.network, profile_cpu, profile_memory]
device_list = {"root": {"path": "/", "pool": args.pool, "type": "disk", "size": f"{args.quota}GiB"}}
logger.debug(f"profile_list = profile_list") # noqa: F541
logger.debug('IT-8735: adding security.nesting: "true" on every containers')
config = {
"name": args.name,
"source": {
"type": "image",
"alias": args.distrib,
"mode": "pull",
"server": "https://images.linuxcontainers.org",
"protocol": "simplestreams",
},
"profiles": profile_list,
"devices": device_list,
"config": {"security.nesting": "true"},
}
logger.info("creating container " + args.name)
container = client.instances.create(config, wait=True)
logger.info("container created!")
# IT-8895 - define LXC systemd override file location
lxc_override_file = "/etc/systemd/system/service.d/lxc.conf"
logger.info(f"removing {lxc_override_file} if it exists - see IT-8895")
try:
container.files.delete(lxc_override_file)
logger.info(f"deleted {lxc_override_file}")
except Exception as e: # noqa: F841
logger.info(f"{lxc_override_file} did not exist")
# find the interface name for the network profile:
profile = client.profiles.get(args.network)
interface_name = list(profile.devices.keys())[0] # noqa: RUF015
if args.gateway is None:
gateway = str(list(ip.network.hosts())[-1])
logger.debug("defining the gateway with the last IP of the network: " + gateway)
else:
gateway = args.gateway.compressed
logger.debug("using manually defined gateway " + gateway)
# create the network config
if args.networkmethod == "networkd":
network_config_path = "/etc/systemd/network/10-autogenerated.network"
network_config_content = (
"[Match]\nName="
+ interface_name
+ "\n[Network]\nLinkLocalAddressing=no\nAddress="
+ str(ip)
+ "\nGateway="
+ gateway
+ "\n"
)
elif args.networkmethod == "interfaces":
network_config_path = "/etc/network/interfaces"
network_config_content = (
"# loopback\nauto lo\niface lo inet loopback\n\nauto "
+ interface_name
+ "\niface "
+ interface_name
+ " inet static\n\taddress "
+ str(ip.ip)
+ "\n\tnetmask "
+ str(ip.netmask)
+ "\n\tgateway "
+ gateway
+ "\n"
)
logger.debug("network config:\n" + network_config_content)
# send the network config
container.files.put(network_config_path, network_config_content)
# path and content of the apt configuration to not install the recommended packages by default
no_recommends_path = "/etc/apt/apt.conf.d/no_recommends.conf"
no_recommends_content = 'APT::Install-Recommends "false";\n'
container.files.put(no_recommends_path, no_recommends_content)
# if a proxy was defined, add the apt configuration for it
if args.proxy != "no":
apt_proxy_configuration_path = "/etc/apt/apt.conf.d/proxy.conf"
apt_proxy_configuration_content = (
'Acquire {\n HTTP::proxy "http://' + args.proxy + '";\n HTTPS::proxy "http://' + args.proxy + '";\n}\n'
)
container.files.put(apt_proxy_configuration_path, apt_proxy_configuration_content)
logger.info("starting the container...")
container.start()
wait(5)
logger.info("container started")
# execute all the needed chmod
container.execute(["chmod", "644", network_config_path])
container.execute(["chmod", "644", no_recommends_path])
if args.proxy != "no":
container.execute(["chmod", "644", apt_proxy_configuration_path])
# purge netplan.io
logger.info("apt purge -y netplan.io")
RESULT = container.execute(["apt-get", "purge", "-y", "netplan.io"])
logger.debug(str(RESULT))
wait(1)
# stop and disable systemd-resolved
logger.info("systemctl stop systemd-resolved.service")
RESULT = container.execute(["systemctl", "stop", "systemd-resolved.service"])
logger.debug(str(RESULT))
wait(2)
logger.info("systemctl disable systemd-resolved.service")
RESULT = container.execute(["systemctl", "disable", "systemd-resolved.service"])
logger.debug(str(RESULT))
wait(2)
# handle resolv.conf
resolvconf_path = "/etc/resolv.conf"
if args.dns is None:
resolvconf_content = open(resolvconf_path, "r").read() # noqa: SIM115, PTH123, UP015
else:
resolvconf_content = "# initial configuration\n"
for dns in args.dns:
resolvconf_content = resolvconf_content + "nameserver " + str(dns) + "\n"
logger.info(f"unlink {resolvconf_path}")
container.execute(["unlink", resolvconf_path])
logger.debug("resolvconf_content:\n" + resolvconf_content)
logger.info(f"sending {resolvconf_path}")
container.files.put(resolvconf_path, resolvconf_content)
container.execute(["chmod", "644", resolvconf_path])
logger.info("waiting 3 seconds to allow the system to take into account the DNS change...")
wait(3)
if args.networkmethod == "networkd":
# start and enable systemd-networkd
logger.info("systemctl enable systemd-networkd")
RESULT = container.execute(["systemctl", "enable", "systemd-networkd"])
logger.debug(str(RESULT))
logger.info("removing default config file /etc/systemd/network/eth0.network")
container.execute(["unlink", "/etc/systemd/network/eth0.network"])
wait(1)
logger.info("systemctl restart systemd-networkd")
RESULT = container.execute(["systemctl", "restart", "systemd-networkd"])
logger.debug(str(RESULT))
wait(2)
# handle authorized_keys
logger.info("mkdir /root/.ssh")
container.execute(["mkdir", "/root/.ssh"])
authorized_keys_path = "/root/.ssh/authorized_keys"
authorized_keys_content = open(authorized_keys_path, "r").read() # noqa: SIM115, PTH123, UP015
logger.info("sending " + authorized_keys_path)
container.files.put(authorized_keys_path, authorized_keys_content)
# install openssh
logger.info("apt update")
RESULT = container.execute(["apt", "update"])
logger.debug(str(RESULT))
wait(3)
logger.info("apt install -y openssh-server python3")
RESULT = container.execute(["apt", "install", "-y", "openssh-server", "python3"])
logger.debug(str(RESULT))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4

View file

@ -0,0 +1,16 @@
# {{ ansible_managed }}
# incus monitoring using API and command line
UserParameter=incus.container.discovery,sudo /usr/bin/incus list --format csv -c n,s 2>/dev/null|grep RUNNING| cut -d',' -f1 | /bin/sed -e '$ ! s/\(.*\)/{"{{ '{' }}#CONTAINER}":"\1"},/' | /bin/sed -e '$ s/\(.*\)/{"{{ '{' }}#CONTAINER}":"\1"}]}/' | /bin/sed -e '1 i { \"data\":['
# those are generic and allow the query of arbitrary values, those 2 userparameters are legacy
UserParameter=incus.container.config[*],sudo /usr/bin/incus list $0 --format json 2>/dev/null| jq '.[0].expanded_config."$2"'
UserParameter=incus.container.counter[*],sudo curl --silent --unix-socket /var/lib/incus/unix.socket a/1.0/instances/$1/state | jq '.metadata.$2'
# BEGIN cgroup v2 metrics
UserParameter=incus.container.cpu_usage[*],echo "$(cat /sys/fs/cgroup/lxc.payload.$1/cpu.stat |grep usage_usec| cut -d' ' -f2)000"
UserParameter=incus.container.memory[*],cat /sys/fs/cgroup/lxc.payload.$1/memory.current
UserParameter=incus.container.config.memory_limit[*],cat /sys/fs/cgroup/lxc.payload.$1/memory.max
UserParameter=incus.container.processes[*],cat /sys/fs/cgroup/lxc.payload.$1/pids.current
# END cgroup v2 metrics
# network interface traffic merged
UserParameter=incus.container.network.all_interfaces[*],echo $(($(sudo curl --silent --unix-socket /var/lib/incus/unix.socket a/1.0/instances/$1/state | jq '.metadata.network |del (.lo)'| jq '.[].counters.$2'| tr '\n' '+'| sed "s/\+$/\n/")))

View file

@ -0,0 +1,10 @@
---
# file: roles/incus/meta/main.yml
dependencies:
- role: ovn
when: ovn_cluster_name is defined
- role: zabbix_template_assignment
zabbix_template_assignment_list:
- zabbix_name: talas incus
user_parameter: incus

View file

@ -0,0 +1,96 @@
# incus role
<!-- TOC -->
* [incus role](#incus-role)
* [Variable reference](#variable-reference)
* [Mandatory variables](#mandatory-variables)
* [Optional variables](#optional-variables)
* [incus_cluster tips](#incus_cluster-tips)
* [incus_subuid_list and incus_subgid_list: mount directory from host to container with the host uid/gid](#incus_subuid_list-and-incus_subgid_list-mount-directory-from-host-to-container-with-the-host-uidgid)
* [Misc](#misc)
* [Incus logs](#incus-logs)
* [Detection of unconfigured incus](#detection-of-unconfigured-incus)
<!-- TOC -->
## Variable reference
### Mandatory variables
| Variable | Description | Example value |
| -------- | ----------- | ------------- | ------ |
| incus_repository | name of the zabbly incus repo to use | lts-6.0 |
### Optional variables
| Variable | Description | Default value | Example value |
|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|
| incus_version | inucs specific version to pin, only useful in a cluster configuration ; [a minor upgrade can block the cluster](https://linuxcontainers.org/incus/docs/main/howto/cluster_manage/#upgrade-cluster-members). Use `--extra-vars "incus_update_now=true"` to upgrade cluster members. | | |
| incus_trust_list | list of servers name authrorized to have their certificate added in incus trust config | None | {{ backup_server_list + [ 'srv-379' ] }} |
| incus_zfs_backend | create a zfs dataset for incus | true | false |
| incus_zfs_root_dataset | the zfs dataset to create for incus | nvme/incus | tank/incus |
| incus_standard_profiles | list of all default profiles | Look in the `defaults/main.yml` file for the standard configuration | see `defaults/main.yml` |
| incus_subuid_list | grand access to specific uid for incus | None | ['5001'] |
| incus_subgid_list | grand access to specific gid for incus | None | ['5000'] |
| incus_cluster_name | Set this variable if you want the host to be part of an incus cluster. Must be defined in a group_vars and have the same name that this group_vars | None | th3_core |
| incus_cluster_main_name | ansible_hostname of the incus cluster main server | None | srv-203 |
| incus_cluster_init_member_config | member_config list of the admin init preseed file. The list of the keys needed is cluster specific but the value is not necessarily the same on each member.<br /> It can be obtained with the command `incus query /1.0/cluster` | None | incus_cluster_init_member_config: \|<br /> entity: storage-pool<br /> name: default<br /> key: driver<br /> value: zfs |
| incus_cluster_scheduler | Per member optional cluster configuration to restrict automatic placement of instances https://linuxcontainers.org/incus/docs/main/explanation/clustering/#clustering-instance-placement | None | group |
| incus_ip | the ip on which incus listen, without CIDR | None | 10.24.10.10 |
| incus_bgp_asn | Private AS number used by the incus bgp daemon. If not set incus bgp daemon will not be activated | None | `65024` |
#### incus_cluster tips
The easiest way to set `incus_cluster_name` `incus_cluster_main_name` and `incus_ip` if you are willing to use OVN on this cluster is by respectivly them to the corresponding vars in ovn:
incus_cluster_name: {{ ovn_cluster_name }}
incus_cluster_main_name: {{ ovn_cluster_main_name }}
incus_ip: {{ ovn_ip }}
#### incus_subuid_list and incus_subgid_list: mount directory from host to container with the host uid/gid
Sometime, you need to give access to a directory to a container without remapping its uid/gid.
To do that, you must modify the system to grand access to specific uid/gid for incus and then modify the container to handle this specific case.
To grand access to specific uid/gid for incus set incus_subuid_list and incus_subgid_list on the host.
Then for the incus container, you must add the following setting:
```
echo -en "uid 5001 5001\ngid 5000 5000" | incus config set container_name raw.idmap -
```
A container restart is necessary to apply the change, it cannot be done live.
Then add the directory to the container storage, for instance:
```
devices:
01OPTI01370:
path: /srv/vaults/01OPTI01370
source: /srv/vaults/01OPTI01370
type: disk
```
You will then be able to access the directory inside the container with the same uid/gid as the host.
# Misc
## Incus logs
Log incus daemon:
```
/var/log/incus/incusd.log
```
Log container incus:
```
/var/log/incus/CONTAINER_NAME
```
## Detection of unconfigured incus
The `incus_yaml_unconfigured` variable is defined in `defaults/main.yml`, it contains the unconfigured state of a incus daemon just after installation, you should not have to change this, the variable exists to handle future incus version that may have a different default configuration.
The `incus_standalone_init_yaml ` variable is also defined in `defaults/main.yml`, it contains the initial configuration that must override the default above, you only need to change this if your default storage pool doesn't use zfs.

View file

@ -0,0 +1,158 @@
---
# file: roles/incus/tasks/cluster.yml
- name: "set facts incus_cluster_main_ip"
ansible.builtin.set_fact:
incus_cluster_main_ip: "{{ hostvars[ovn_cluster_main_name]['incus_ip'] }}"
- name: "Set ovn_connection_type"
ansible.builtin.set_fact:
ovn_connection_type: "{{ ovn_ssl | ternary('ssl', 'tcp') }}"
- name: "set facts ovn_central bounds lists"
ansible.builtin.set_fact:
incus_ovn_northbound_list: "{{ (incus_ovn_northbound_list | default([])) + [ovn_connection_type + ':' + hostvars[item]['ovn_ip'] + ':6641'] }}"
loop: "{{ ovn_central_servers }}"
- name: "set facts ovn_central bounds"
ansible.builtin.set_fact:
incus_ovn_northbound: "{{ incus_ovn_northbound_list | join(',') }}"
- name: "set facts ovn_ic bounds lists"
ansible.builtin.set_fact:
incus_ovn_ic_northbound_list: "{{ (incus_ovn_ic_northbound_list | default([])) + [ovn_connection_type + ':' + hostvars[item]['ovn_ip'] + ':6645'] }}"
incus_ovn_ic_southbound_list: "{{ (incus_ovn_ic_southbound_list | default([])) + [ovn_connection_type + ':' + hostvars[item]['ovn_ip'] + ':6646'] }}"
loop: "{{ ovn_ic_db_servers }}"
when:
- ovn_ic_db_servers is defined
- ansible_hostname == incus_cluster_main_name
- name: "set facts ovn_ic bounds"
ansible.builtin.set_fact:
incus_ovn_ic_northbound: "{{ incus_ovn_ic_northbound_list | join(',') }}"
incus_ovn_ic_southbound: "{{ incus_ovn_ic_southbound_list | join(',') }}"
when:
- ovn_ic_db_servers is defined
- ansible_hostname == incus_cluster_main_name
- name: "Get {{ incus_cluster_name }} secrets from hashicorp vault"
ansible.builtin.set_fact:
incus_hv_secrets: "{{ lookup('community.hashi_vault.vault_kv2_get', 'group_vars/' + incus_cluster_name, engine_mount_point='talas-kv') }}"
when: ovn_ssl
- name: "Extract cert and private key from hashicorp vault"
ansible.builtin.set_fact:
incus_client_cert_serial_number: "{{ incus_hv_secrets.secret.ovn_cert_incus_client_serial_number }}"
incus_client_cert_private_key: "{{ incus_hv_secrets.secret.ovn_cert_incus_client_private_key }}"
when: ovn_ssl
- name: "Get client cert and issuing certificates from hashicorp vault pki"
ansible.builtin.set_fact:
incus_client_cert_issuing_ca_chain: "{{ lookup('community.hashi_vault.vault_read', 'pki/issuer/OVN') | community.general.json_query('data.ca_chain') | join() | trim }}"
incus_client_cert_ca: "{{ lookup('community.hashi_vault.vault_read', 'pki/cert/' + incus_client_cert_serial_number) | community.general.json_query('data.certificate') }}"
when: ovn_ssl
- name: "init preseed if first cluster main install"
ansible.builtin.shell:
cmd: /usr/bin/incus admin init --preseed
stdin: "{{ incus_cluster_main_init_yaml }}"
stdin_add_newline: true
when:
- incus_admin_init_dump.stdout == incus_yaml_unconfigured
- ansible_hostname == incus_cluster_main_name
# https://tracker.talas.com/browse/INFRA-179
#- name: "get and set OVN configuration WITH TLS"
# ansible.builtin.include_tasks: get_and_set.yml
# loop:
# - { section: config, key: network.ovn.northbound_connection, value: "{{ incus_cluster_ovn_conf.northbound_connection }}" }
# - { section: config, key: network.ovn.client_cert, value: "{{ incus_cluster_ovn_conf.client_cert }}" }
# - { section: config, key: network.ovn.client_key, value: "{{ incus_cluster_ovn_conf.client_key }}" }
# - { section: config, key: network.ovn.ca_cert, value: "{{ incus_cluster_ovn_conf.ca_cert }}" }
# when:
# - ovn_ssl
# - ansible_hostname == incus_cluster_main_name
- name: "check if ovn_ic_integration exists"
ansible.builtin.command: "/usr/bin/incus network integration show ovn_ic_integration"
register: get_ovn_ic_integration
changed_when: false
check_mode: false
failed_when: false
when:
- ovn_ssl
- ovn_ic_db_servers is defined
- ansible_hostname == incus_cluster_main_name
- name: "incus network integration create ovn_ic_integration"
ansible.builtin.command: "/usr/bin/incus network integration create ovn_ic_integration ovn"
when:
- ovn_ssl
- ovn_ic_db_servers is defined
- ansible_hostname == incus_cluster_main_name
- get_ovn_ic_integration.rc != 0
- name: "get and set OVN IC integration configuration WITH TLS"
ansible.builtin.include_tasks: get_and_set.yml
loop:
- { section: "network integration", object: ovn_ic_integration, key: ovn.northbound_connection, value: "{{ incus_ovn_ic_northbound }}" }
- { section: "network integration", object: ovn_ic_integration, key: ovn.southbound_connection, value: "{{ incus_ovn_ic_southbound }}" }
- { section: "network integration", object: ovn_ic_integration, key: ovn.ca_cert, value: "{{ incus_cluster_ovn_conf.ca_cert }}" }
- { section: "network integration", object: ovn_ic_integration, key: ovn.client_cert, value: "{{ incus_cluster_ovn_conf.client_cert }}" }
- { section: "network integration", object: ovn_ic_integration, key: ovn.client_key, value: "{{ incus_cluster_ovn_conf.client_key }}" }
when:
- ovn_ssl
- ovn_ic_db_servers is defined
- ansible_hostname == incus_cluster_main_name
- name: "get IC integration transit pattern"
ansible.builtin.shell: incus network integration get ovn_ic_integration ovn.transit.pattern
register: get_transit_pattern
changed_when: false
check_mode: false
when:
- ovn_ic_db_servers is defined
- ansible_hostname == incus_cluster_main_name
- name: "/usr/bin/incus network integration set ovn_ic_integration ovn.transit.pattern"
ansible.builtin.shell: "/usr/bin/incus network integration set ovn_ic_integration ovn.transit.pattern {% raw %}'ts-incus-{{ integrationName }}-{{ peerName }}'{% endraw %}"
when:
- ovn_ic_db_servers is defined
- ansible_hostname == incus_cluster_main_name
- get_transit_pattern.stdout != {% raw %}'ts-incus-{{ integrationName }}-{{ peerName }}'{% endraw %}
- name: "get and set OVN configuration WITHOUT TLS"
ansible.builtin.include_tasks: get_and_set.yml
loop:
- { section: config, key: "network.ovn.northbound_connection", value: "{{ incus_cluster_ovn_conf.northbound_connection }}" }
when:
- not ovn_ssl
- ansible_hostname == incus_cluster_main_name
- name: "Create join tokens"
throttle: 1
delegate_to: "{{ incus_cluster_main_name }}"
vars:
ansible_python_interpreter: "{{ hostvars[incus_cluster_main_name].ansible_python_interpreter | default('/usr/bin/python3') }}"
ansible.builtin.shell:
cmd: "incus --force-local --quiet cluster add {{ inventory_hostname }}"
register: incus_cluster_add
when:
- incus_admin_init_dump.stdout == incus_yaml_unconfigured
- ansible_hostname != incus_cluster_main_name
- name: "init preseed if first cluster secondary install"
ansible.builtin.shell:
cmd: /usr/bin/incus admin init --preseed
stdin: "{{ incus_cluster_init_yaml }}"
stdin_add_newline: true
throttle: 1
when:
- incus_admin_init_dump.stdout == incus_yaml_unconfigured
- ansible_hostname != incus_cluster_main_name
- name: "get and set scheduler.instance"
ansible.builtin.include_tasks: get_and_set_cluster.yml
loop:
- { key: 'scheduler.instance', value: "{{ incus_cluster_scheduler }}" }
when: incus_cluster_scheduler is defined

View file

@ -0,0 +1,13 @@
---
# file: roles/incus/tasks/get_and_set.yml
- name: "incus {{ item.section }} get {{ item.object | default('') }} {{ item.key }}"
ansible.builtin.shell: "/usr/bin/incus {{ item.section }} get {{ item.object | default('') }} {{ item.key }}"
register: get_configuration
changed_when: false
check_mode: false
failed_when: 'get_configuration.rc != 0 and "not found" not in get_configuration.stderr'
- name: "/usr/bin/incus {{ item.section }} set {{ item.object | default('') }} {{ item.key }}"
ansible.builtin.shell: "/usr/bin/incus {{ item.section }} set {{ item.object | default('') }} {{ item.key }} -- \"{{ item.value }}\""
when: get_configuration.stdout != item.value

View file

@ -0,0 +1,13 @@
---
# file: roles/incus/tasks/get_and_set_cluster.yml
- name: "incus cluster get {{ ansible_hostname }} {{ item.key }}"
shell: "/usr/bin/incus cluster get {{ ansible_hostname }} {{ item.key }}"
register: get_configuration
changed_when: false
check_mode: false
failed_when: get_configuration.rc != 0 and "does not exist on cluster member" not in get_configuration.stderr
- name: "incus cluster set {{ ansible_hostname }} set {{ item.key }}"
shell: "/usr/bin/incus cluster set {{ ansible_hostname }} {{ item.key }}={{ item.value }}"
when: get_configuration.stdout != item.value

View file

@ -0,0 +1,174 @@
---
# file: roles/incus/tasks/main.yml
- name: "/etc/apt/sources.list.d/zabbly.sources"
ansible.builtin.deb822_repository:
name: zabbly
types: deb
uris: "https://pkgs.zabbly.com/incus/{{ incus_repository }}"
suites: "{{ ansible_distribution_release }}"
components: main
architectures: amd64
signed_by: https://pkgs.zabbly.com/key.asc
register: incus_repository_out
tags: incus
- name: "apt pin incus version"
ansible.builtin.copy:
content: |
Package: incus*
Pin: version {{ incus_version_epoch }}:{{ incus_version }}*
Pin-Priority: 999
dest: "/etc/apt/preferences.d/incus"
register: incus_pin_out
when: incus_version is defined
tags: incus
- name: "apt update"
ansible.builtin.apt:
update_cache: true
when: incus_repository_out.changed or incus_pin_out.changed
tags: incus
- name: "install needed packages: bridge-utils and ifenslave for the network, jq/curl for monitoring and apparmor for security"
ansible.builtin.apt:
name:
- bridge-utils
- ifenslave
- apparmor
- curl
tags: incus
- name: "install incus (and upgrade if incus_version is defined and incus_update_now)"
ansible.builtin.apt:
name:
- incus
state: "{{ 'latest' if incus_version is defined and incus_update_now else 'present' }}"
tags: incus
- name: "/etc/sysctl.conf tunable that should alway be set"
ansible.posix.sysctl:
name: "{{ item['name'] }}"
value: "{{ item['value'] }}"
loop:
- { 'name': 'fs.aio-max-nr', 'value': '524288' }
- { 'name': 'fs.inotify.max_queued_events', 'value': '1048576' }
- { 'name': 'fs.inotify.max_user_instances', 'value': '1048576' }
- { 'name': 'fs.inotify.max_user_watches', 'value': '1048576' }
- { 'name': 'kernel.dmesg_restrict', 'value': '1' }
- { 'name': 'kernel.keys.maxbytes', 'value': '2000000' }
- { 'name': 'kernel.keys.maxkeys', 'value': '2000' }
- { 'name': 'vm.max_map_count', 'value': '262144' }
- { 'name': 'net.core.bpf_jit_limit', 'value': '1000000000' }
- { 'name': 'net.ipv4.neigh.default.gc_thresh3', 'value': '8192' }
- { 'name': 'net.ipv6.neigh.default.gc_thresh3', 'value': '8192' }
tags:
- incus
- sysctl
- name: "incus dataset"
community.general.zfs:
name: "{{ incus_zfs_root_dataset }}"
state: present
extra_zfs_properties:
mountpoint: legacy
when: incus_zfs_backend
tags: incus
- name: "set default incus_ip"
ansible.builtin.set_fact:
incus_ip: "[::]"
when: incus_ip is not defined
tags: incus
- name: "dump current admin init"
ansible.builtin.command: "/usr/bin/incus admin init --dump"
register: incus_admin_init_dump
changed_when: false
check_mode: false
tags: incus
- name: "display current incus configuration"
ansible.builtin.debug:
var: incus_admin_init_dump.stdout
verbosity: 1
tags: incus
- name: "set configuration if first standalone install"
ansible.builtin.shell: 'echo "{{ incus_standalone_init_yaml }}" | /usr/bin/incus admin init --preseed'
when:
- incus_admin_init_dump.stdout == incus_yaml_unconfigured
- incus_cluster_name is not defined
tags: incus
- name: "install cluster"
ansible.builtin.import_tasks: cluster.yml
when: incus_cluster_name is defined
tags: incus
- name: "incus profiles"
ansible.builtin.import_tasks: profiles.yml
when: incus_cluster_name is not defined
tags:
- incus
- incus_profiles
- name: "administration scripts"
ansible.builtin.import_tasks: scripts.yml
tags:
- incus
- incus_scripts
- name: "handle /etc/subuid"
ansible.builtin.lineinfile:
path: "/etc/subuid"
line: "root:{{ item }}:1"
loop: "{{ incus_subuid_list }}"
when: incus_subuid_list is defined
tags: incus
- name: "handle /etc/subgid"
ansible.builtin.lineinfile:
path: "/etc/subgid"
line: "root:{{ item }}:1"
loop: "{{ incus_subgid_list }}"
when: incus_subgid_list is defined
tags: incus
- name: "get and set https_address"
ansible.builtin.include_tasks:
file: get_and_set.yml
apply:
tags:
- incus
loop:
- { section: config, key: "core.https_address", value: "{{ incus_ip }}:8443" }
tags: incus
- name: "get and set core.bgp_*"
ansible.builtin.include_tasks:
file: get_and_set.yml
apply:
tags:
- incus
loop:
- { section: config, key: "core.bgp_address", value: "{{ incus_ip }}:179" }
- { section: config, key: "core.bgp_asn", value: "{{ incus_bgp_asn }}" }
- { section: config, key: "core.bgp_routerid", value: "{{ incus_ip }}" }
when: incus_bgp_asn is defined
tags: incus
- name: "incus trust configuration via openssl certificates"
ansible.builtin.import_tasks: trust.yml
when:
- incus_trust_list is defined
- incus_cluster_name is not defined or (incus_cluster_name is defined and ansible_hostname == incus_cluster_main_name)
tags:
- incus
- incus_trust
- name: "cosinfo"
ansible.builtin.import_tasks: cosinfo.yml
tags:
- incus
- cosinfo

Some files were not shown because too many files have changed in this diff Show more