small fixes : cors + login loop
1604
MODULE.bazel.lock
Normal file
652
Makefile
|
|
@ -3,644 +3,38 @@
|
|||
# ==============================================================================
|
||||
# Stack: Docker + Incus (LXD) Support
|
||||
# System: Linux / Bash
|
||||
#
|
||||
# Configuration: edit make/config.mk (ports, services, paths).
|
||||
# Add new targets in make/*.mk or below.
|
||||
# ==============================================================================
|
||||
|
||||
# --- Auto-Configuration ---
|
||||
-include .env
|
||||
|
||||
# Shell setup
|
||||
SHELL := /bin/bash
|
||||
.ONESHELL:
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
# --- Variables ---
|
||||
PROJECT_NAME := veza
|
||||
COMPOSE_FILE := docker-compose.yml
|
||||
COMPOSE_PROD := docker-compose.prod.yml
|
||||
# --- Configuration (single source of truth) ---
|
||||
include make/config.mk
|
||||
include make/ui.mk
|
||||
|
||||
# Services
|
||||
SERVICES := backend-api chat-server stream-server web haproxy
|
||||
INFRA_SERVICES := postgres redis rabbitmq
|
||||
|
||||
# Ports
|
||||
PORT_GO ?= 8080
|
||||
PORT_CHAT ?= 3000
|
||||
PORT_STREAM ?= 3001
|
||||
PORT_WEB ?= 5173
|
||||
PORT_HAPROXY ?= 80
|
||||
|
||||
# Database & Infra
|
||||
DB_USER ?= veza
|
||||
DB_PASS ?= password
|
||||
DB_NAME ?= veza
|
||||
DB_HOST ?= localhost
|
||||
DB_PORT ?= 5432
|
||||
|
||||
# Connection Strings
|
||||
DATABASE_URL = postgres://$(DB_USER):$(DB_PASS)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?sslmode=disable
|
||||
REDIS_URL = redis://localhost:6379
|
||||
AMQP_URL = amqp://$(DB_USER):$(DB_PASS)@localhost:5672
|
||||
|
||||
# Directories
|
||||
DIR_GO := veza-backend-api
|
||||
DIR_CHAT := veza-chat-server
|
||||
DIR_STREAM := veza-stream-server
|
||||
DIR_WEB := apps/web
|
||||
|
||||
# Deployment
|
||||
DEPLOY_TARGET ?= docker
|
||||
INCUS_PROFILE := veza-profile
|
||||
INCUS_NETWORK := veza-network
|
||||
|
||||
# --- Aesthetics & UI ---
|
||||
BOLD := \033[1m
|
||||
RED := \033[0;31m
|
||||
GREEN := \033[0;32m
|
||||
YELLOW := \033[0;33m
|
||||
BLUE := \033[0;34m
|
||||
PURPLE := \033[0;35m
|
||||
CYAN := \033[0;36m
|
||||
NC := \033[0m
|
||||
|
||||
ECHO_CMD = echo -e
|
||||
# --- All feature modules ---
|
||||
include make/tools.mk
|
||||
include make/infra.mk
|
||||
include make/dev.mk
|
||||
include make/build.mk
|
||||
include make/test.mk
|
||||
include make/services.mk
|
||||
include make/high.mk
|
||||
include make/incus.mk
|
||||
include make/help.mk
|
||||
|
||||
# ==============================================================================
|
||||
# HELP & DASHBOARD
|
||||
# PER-SERVICE CONVENIENCE (dev-*, test-*, lint-*, build-*)
|
||||
# ==============================================================================
|
||||
.PHONY: help
|
||||
help: ## Show this dashboard
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}${PURPLE}⚡ VEZA MONOREPO CLI ⚡${NC}"
|
||||
@$(ECHO_CMD) "================================================================="
|
||||
@$(ECHO_CMD) "${BOLD}INFRASTRUCTURE:${NC}"
|
||||
@printf " ${CYAN}%-15s${NC} %s\n" "Postgres" "${DATABASE_URL}"
|
||||
@printf " ${CYAN}%-15s${NC} %s\n" "Redis" "${REDIS_URL}"
|
||||
@printf " ${CYAN}%-15s${NC} %s\n" "RabbitMQ" "UI: http://localhost:15672 (veza/password)"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}${GREEN}HIGH LEVEL COMMANDS:${NC}"
|
||||
@grep -E '^[a-zA-Z0-9_-]+:.*?## \[HIGH\] .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " ${YELLOW}%-25s${NC} %s\n", $$1, $$2}'
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}${BLUE}INTERMEDIATE COMMANDS:${NC}"
|
||||
@grep -E '^[a-zA-Z0-9_-]+:.*?## \[MID\] .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " ${CYAN}%-25s${NC} %s\n", $$1, $$2}'
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}${PURPLE}LOW LEVEL / DEBUG:${NC}"
|
||||
@grep -E '^[a-zA-Z0-9_-]+:.*?## \[LOW\] .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " ${PURPLE}%-25s${NC} %s\n", $$1, $$2}'
|
||||
@$(ECHO_CMD) ""
|
||||
|
||||
# Usage: make dev-web, make test-backend-api, make lint-web, etc.
|
||||
# Add new services in make/config.mk (SERVICES, SERVICE_DIR_*, PORT_*).
|
||||
# ==============================================================================
|
||||
# HIGH LEVEL COMMANDS
|
||||
# ==============================================================================
|
||||
.PHONY: setup stop-all restart-all clean deploy-docker deploy-incus status-full
|
||||
|
||||
setup: check-tools install-tools install-deps ## [HIGH] Full project initialization
|
||||
@$(ECHO_CMD) "${BOLD}${GREEN}✅ Setup Complete! Ready to rock with 'make dev'.${NC}"
|
||||
|
||||
web-minimal: ## [HIGH] Start Veza Web Minimal Journey (Backend + Frontend + DB)
|
||||
@./scripts/start_minimal.sh
|
||||
|
||||
stop-minimal: ## [HIGH] Stop Minimal Stack
|
||||
@./scripts/stop_minimal.sh
|
||||
|
||||
stop-all: ## [HIGH] Stop all services (Docker + Local)
|
||||
@$(ECHO_CMD) "${RED}🛑 Stopping all services...${NC}"
|
||||
@docker compose -f $(COMPOSE_FILE) down 2>/dev/null || true
|
||||
@docker compose -f $(COMPOSE_PROD) down 2>/dev/null || true
|
||||
@$(MAKE) -s stop-local-services
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services stopped.${NC}"
|
||||
|
||||
restart-all: stop-all ## [HIGH] Restart all services
|
||||
@$(ECHO_CMD) "${BLUE}🔄 Restarting all services...${NC}"
|
||||
@$(MAKE) -s infra-up
|
||||
@$(MAKE) -s dev
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services restarted.${NC}"
|
||||
|
||||
clean: ## [HIGH] Clean build artifacts and caches
|
||||
@$(ECHO_CMD) "${YELLOW}🧹 Cleaning build artifacts...${NC}"
|
||||
@rm -rf $(DIR_WEB)/node_modules/.cache
|
||||
@rm -rf $(DIR_CHAT)/target/debug $(DIR_STREAM)/target/debug
|
||||
@find . -type d -name "node_modules" -prune -o -type f -name "*.log" -delete
|
||||
@$(ECHO_CMD) "${GREEN}✅ Clean complete.${NC}"
|
||||
|
||||
clean-deep: ## [HIGH] ⚠️ Nuclear Clean (Confirm required)
|
||||
@read -p "${RED}Are you sure? This will delete ALL builds, volumes, and caches! [y/N]${NC} " ans && [ $${ans:-N} = y ]
|
||||
@$(ECHO_CMD) "${RED}☢️ DESTROYING ARTIFACTS...${NC}"
|
||||
@rm -rf $(DIR_WEB)/node_modules
|
||||
@rm -rf $(DIR_CHAT)/target $(DIR_STREAM)/target
|
||||
@docker compose -f $(COMPOSE_FILE) down -v 2>/dev/null || true
|
||||
@docker compose -f $(COMPOSE_PROD) down -v 2>/dev/null || true
|
||||
@$(ECHO_CMD) "${GREEN}System Cleaned.${NC}"
|
||||
|
||||
deploy-docker: build-all ## [HIGH] Deploy all services with Docker + HAProxy
|
||||
@$(ECHO_CMD) "${BOLD}${BLUE}🐳 Deploying with Docker...${NC}"
|
||||
@docker compose -f $(COMPOSE_PROD) up -d --build
|
||||
@$(MAKE) -s wait-for-services
|
||||
@$(ECHO_CMD) "${GREEN}✅ Deployment complete! Access via http://localhost:$(PORT_HAPROXY)${NC}"
|
||||
|
||||
deploy-incus: build-all-native ## [HIGH] Deploy all services with Incus containers (native, no Docker)
|
||||
@$(ECHO_CMD) "${BOLD}${BLUE}📦 Deploying with Incus (native)...${NC}"
|
||||
@$(MAKE) -s incus-setup-network
|
||||
@$(MAKE) -s incus-deploy-infra
|
||||
@$(MAKE) -s incus-deploy-all-native
|
||||
@$(MAKE) -s incus-start-all
|
||||
@$(ECHO_CMD) "${GREEN}✅ Incus deployment complete!${NC}"
|
||||
@$(ECHO_CMD) "${BLUE}Access services at:${NC}"
|
||||
@$(ECHO_CMD) " Backend API: http://10.10.10.2:8080"
|
||||
@$(ECHO_CMD) " Chat Server: http://10.10.10.3:8081"
|
||||
@$(ECHO_CMD) " Stream Server: http://10.10.10.4:3002"
|
||||
@$(ECHO_CMD) " Web Frontend: http://10.10.10.5:80"
|
||||
@$(ECHO_CMD) " HAProxy: http://10.10.10.6:80"
|
||||
|
||||
status-full: ## [HIGH] Show complete system status
|
||||
@$(ECHO_CMD) "${BOLD}${CYAN}📊 SYSTEM STATUS${NC}"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}Docker Containers:${NC}"
|
||||
@docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -E "NAME|veza" || echo " No containers running"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}Local Processes:${NC}"
|
||||
@lsof -i :$(PORT_GO) -i :$(PORT_CHAT) -i :$(PORT_STREAM) -i :$(PORT_WEB) 2>/dev/null | grep LISTEN || echo " No local processes"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}Incus Containers:${NC}"
|
||||
@incus list veza- 2>/dev/null | grep -E "NAME|veza" || echo " No Incus containers"
|
||||
@$(ECHO_CMD) ""
|
||||
|
||||
# ==============================================================================
|
||||
# INTERMEDIATE COMMANDS
|
||||
# ==============================================================================
|
||||
.PHONY: start-service stop-service restart-service logs-service build-service
|
||||
|
||||
start-service: ## [MID] Start a specific service (usage: make start-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${BLUE}🚀 Starting $(SERVICE)...${NC}"
|
||||
@docker compose -f $(COMPOSE_PROD) up -d $(SERVICE) || \
|
||||
$(MAKE) -s start-local-service SERVICE=$(SERVICE)
|
||||
@$(ECHO_CMD) "${GREEN}✅ $(SERVICE) started.${NC}"
|
||||
|
||||
stop-service: ## [MID] Stop a specific service (usage: make stop-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${YELLOW}🛑 Stopping $(SERVICE)...${NC}"
|
||||
@docker compose -f $(COMPOSE_PROD) stop $(SERVICE) 2>/dev/null || \
|
||||
$(MAKE) -s stop-local-service SERVICE=$(SERVICE)
|
||||
@$(ECHO_CMD) "${GREEN}✅ $(SERVICE) stopped.${NC}"
|
||||
|
||||
restart-service: stop-service ## [MID] Restart a specific service (usage: make restart-service SERVICE=backend-api)
|
||||
@$(ECHO_CMD) "${BLUE}🔄 Restarting $(SERVICE)...${NC}"
|
||||
@$(MAKE) -s start-service SERVICE=$(SERVICE)
|
||||
@$(ECHO_CMD) "${GREEN}✅ $(SERVICE) restarted.${NC}"
|
||||
|
||||
logs-service: ## [MID] Show logs for a service (usage: make logs-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@docker compose -f $(COMPOSE_PROD) logs -f $(SERVICE) || \
|
||||
$(ECHO_CMD) "${YELLOW}Service not running in Docker, check local logs${NC}"
|
||||
|
||||
build-service: ## [MID] Build a specific service (usage: make build-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building $(SERVICE)...${NC}"
|
||||
@$(MAKE) -s build-$(SERVICE)
|
||||
@$(ECHO_CMD) "${GREEN}✅ $(SERVICE) built.${NC}"
|
||||
|
||||
build-all: ## [MID] Build all services (Docker images)
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building all services...${NC}"
|
||||
@$(MAKE) -s build-backend-api
|
||||
@$(MAKE) -s build-chat-server
|
||||
@$(MAKE) -s build-stream-server
|
||||
@$(MAKE) -s build-web
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services built.${NC}"
|
||||
|
||||
build-all-native: ## [MID] Build all services natively (for Incus)
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building all services natively...${NC}"
|
||||
@$(shell pwd)/config/incus/build-native.sh all
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services built natively.${NC}"
|
||||
|
||||
# ==============================================================================
|
||||
# LOW LEVEL / DEBUG COMMANDS
|
||||
# ==============================================================================
|
||||
.PHONY: check-tools install-tools install-deps check-ports
|
||||
|
||||
check-tools: ## [LOW] Check required tools
|
||||
@$(ECHO_CMD) "${BLUE}Checking core requirements...${NC}"
|
||||
@for tool in docker go cargo npm; do \
|
||||
command -v $$tool >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ $$tool is missing!${NC}"; exit 1; }; \
|
||||
done
|
||||
@$(ECHO_CMD) "${GREEN}✅ All tools present.${NC}"
|
||||
|
||||
check-tools-incus: ## [LOW] Check required tools for Incus deployment
|
||||
@$(ECHO_CMD) "${BLUE}Checking Incus deployment requirements...${NC}"
|
||||
@command -v incus >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ incus is missing! Install with: sudo snap install incus${NC}"; exit 1; }
|
||||
@command -v go >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ go is missing!${NC}"; exit 1; }
|
||||
@command -v cargo >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ cargo is missing!${NC}"; exit 1; }
|
||||
@command -v npm >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ npm is missing!${NC}"; exit 1; }
|
||||
@$(ECHO_CMD) "${GREEN}✅ All Incus tools present.${NC}"
|
||||
|
||||
install-tools: ## [LOW] Install Power User tools (Hot Reload, Linters)
|
||||
@$(ECHO_CMD) "${BLUE}🛠️ Installing Dev Tools...${NC}"
|
||||
@command -v air >/dev/null 2>&1 || go install github.com/air-verse/air@latest
|
||||
@command -v cargo-watch >/dev/null 2>&1 || cargo install cargo-watch
|
||||
@command -v sqlx >/dev/null 2>&1 || cargo install sqlx-cli --no-default-features --features native-tls,postgres
|
||||
@$(ECHO_CMD) "${GREEN}✅ Tools installed.${NC}"
|
||||
|
||||
install-deps: ## [LOW] Install code dependencies
|
||||
@$(ECHO_CMD) "${BLUE}📦 Installing dependencies...${NC}"
|
||||
@$(ECHO_CMD) " -> [Go] Downloading modules..."
|
||||
@(cd $(DIR_GO) && go mod download)
|
||||
@$(ECHO_CMD) " -> [Rust Chat] Fetching crates..."
|
||||
@(cd $(DIR_CHAT) && cargo fetch)
|
||||
@$(ECHO_CMD) " -> [Rust Stream] Fetching crates..."
|
||||
@(cd $(DIR_STREAM) && cargo fetch)
|
||||
@$(ECHO_CMD) " -> [Web] Installing npm packages..."
|
||||
@(cd $(DIR_WEB) && npm install --silent)
|
||||
|
||||
check-ports: ## [LOW] Check if ports are available
|
||||
@$(ECHO_CMD) "${BLUE}🔍 Checking ports...${NC}"
|
||||
@for port in $(PORT_GO) $(PORT_CHAT) $(PORT_STREAM) $(PORT_WEB); do \
|
||||
if lsof -i :$$port -t >/dev/null 2>&1; then \
|
||||
$(ECHO_CMD) "${YELLOW}⚠️ Port $$port is busy${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${GREEN}✅ Port $$port is free${NC}"; \
|
||||
fi; \
|
||||
done
|
||||
|
||||
# ==============================================================================
|
||||
# INFRASTRUCTURE
|
||||
# ==============================================================================
|
||||
.PHONY: infra-up infra-down wait-for-infra db-shell redis-shell db-migrate
|
||||
|
||||
infra-up: ## [MID] Start Docker Infra (with health checks)
|
||||
@$(ECHO_CMD) "${BLUE}🐳 Starting Infrastructure...${NC}"
|
||||
@docker compose -f $(COMPOSE_FILE) up -d
|
||||
@$(MAKE) -s wait-for-infra
|
||||
|
||||
infra-down: ## [MID] Stop Docker Infra
|
||||
@$(ECHO_CMD) "${BLUE}🛑 Stopping Infrastructure...${NC}"
|
||||
@docker compose -f $(COMPOSE_FILE) down
|
||||
|
||||
wait-for-infra: ## [LOW] Wait for infrastructure to be ready
|
||||
@printf "${BLUE}⏳ Waiting for services...${NC}"
|
||||
@until docker compose -f $(COMPOSE_FILE) exec -T postgres pg_isready -U $(DB_USER) > /dev/null 2>&1; do printf "."; sleep 1; done
|
||||
@until docker compose -f $(COMPOSE_FILE) exec -T redis redis-cli ping > /dev/null 2>&1; do printf "."; sleep 1; done
|
||||
@$(ECHO_CMD) " ${GREEN}OK${NC}"
|
||||
|
||||
wait-for-services: ## [LOW] Wait for all application services
|
||||
@printf "${BLUE}⏳ Waiting for services...${NC}"
|
||||
@for service in backend-api chat-server stream-server web; do \
|
||||
until docker compose -f $(COMPOSE_PROD) exec -T $$service echo "ready" > /dev/null 2>&1; do \
|
||||
printf "."; sleep 1; \
|
||||
done; \
|
||||
done
|
||||
@$(ECHO_CMD) " ${GREEN}OK${NC}"
|
||||
|
||||
db-shell: ## [MID] Connect to Postgres shell
|
||||
@docker compose -f $(COMPOSE_FILE) exec postgres psql -U $(DB_USER) -d $(DB_NAME)
|
||||
|
||||
redis-shell: ## [MID] Connect to Redis shell
|
||||
@docker compose -f $(COMPOSE_FILE) exec redis redis-cli
|
||||
|
||||
db-migrate: infra-up ## [MID] Run all database migrations
|
||||
@$(ECHO_CMD) "${BLUE}🔄 Running Migrations...${NC}"
|
||||
@$(ECHO_CMD) " -> [Go] Migrating..."
|
||||
@(cd $(DIR_GO) && go run cmd/migrate_tool/main.go up || $(ECHO_CMD) "${YELLOW}Warning: Go migration failed${NC}")
|
||||
@$(ECHO_CMD) " -> [Chat] Migrating..."
|
||||
@(cd $(DIR_CHAT) && sqlx migrate run || $(ECHO_CMD) "${YELLOW}Warning: Chat migration failed${NC}")
|
||||
@$(ECHO_CMD) " -> [Stream] Migrating..."
|
||||
@(cd $(DIR_STREAM) && sqlx migrate run || $(ECHO_CMD) "${YELLOW}Warning: Stream migration failed${NC}")
|
||||
@$(ECHO_CMD) "${GREEN}✅ Migrations done.${NC}"
|
||||
|
||||
# ==============================================================================
|
||||
# DEVELOPMENT
|
||||
# ==============================================================================
|
||||
.PHONY: dev dev-backend stop-local-services start-local-service stop-local-service
|
||||
|
||||
dev: check-ports infra-up ## [HIGH] Start Everything (Detects Hot Reload tools)
|
||||
@$(ECHO_CMD) "${BOLD}${PURPLE}🚀 STARTING HYBRID DEV ENVIRONMENT${NC}"
|
||||
@$(ECHO_CMD) " Go: http://localhost:${PORT_GO}"
|
||||
@$(ECHO_CMD) " Chat: http://localhost:${PORT_CHAT}"
|
||||
@$(ECHO_CMD) " Web: http://localhost:${PORT_WEB}"
|
||||
@$(ECHO_CMD) "${YELLOW}Hit Ctrl+C to stop all.${NC}"
|
||||
@(trap 'kill 0' SIGINT; \
|
||||
if command -v air >/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN}[Go] Hot Reload Active (Air)${NC}" && cd $(DIR_GO) && air & \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW}[Go] Standard Run${NC}" && cd $(DIR_GO) && go run cmd/modern-server/main.go & \
|
||||
fi; \
|
||||
if command -v cargo-watch >/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN}[Chat] Hot Reload Active${NC}" && cd $(DIR_CHAT) && cargo watch -x run -q & \
|
||||
$(ECHO_CMD) "${GREEN}[Stream] Hot Reload Active${NC}" && cd $(DIR_STREAM) && cargo watch -x run -q & \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW}[Chat] Standard Run${NC}" && cd $(DIR_CHAT) && cargo run -q & \
|
||||
$(ECHO_CMD) "${YELLOW}[Stream] Standard Run${NC}" && cd $(DIR_STREAM) && cargo run -q & \
|
||||
fi; \
|
||||
$(ECHO_CMD) "${GREEN}[Web] Starting Vite...${NC}" && cd $(DIR_WEB) && npm run dev & \
|
||||
wait)
|
||||
|
||||
dev-backend: check-ports infra-up ## [MID] Start Backends Only (Hot Reload supported)
|
||||
@$(ECHO_CMD) "${BOLD}${PURPLE}🚀 STARTING BACKEND ONLY${NC}"
|
||||
@(trap 'kill 0' SIGINT; \
|
||||
if command -v air >/dev/null; then cd $(DIR_GO) && air & else cd $(DIR_GO) && go run cmd/modern-server/main.go & fi; \
|
||||
if command -v cargo-watch >/dev/null; then cd $(DIR_CHAT) && cargo watch -x run -q & else cd $(DIR_CHAT) && cargo run -q & fi; \
|
||||
if command -v cargo-watch >/dev/null; then cd $(DIR_STREAM) && cargo watch -x run -q & else cd $(DIR_STREAM) && cargo run -q & fi; \
|
||||
wait)
|
||||
|
||||
stop-local-services: ## [LOW] Stop all local processes
|
||||
@pkill -f "air\|cargo watch\|npm run dev\|go run.*modern-server" 2>/dev/null || true
|
||||
|
||||
start-local-service: ## [LOW] Start a service locally
|
||||
@case "$(SERVICE)" in \
|
||||
backend-api) \
|
||||
if command -v air >/dev/null; then cd $(DIR_GO) && air & else cd $(DIR_GO) && go run cmd/modern-server/main.go & fi ;; \
|
||||
chat-server) \
|
||||
if command -v cargo-watch >/dev/null; then cd $(DIR_CHAT) && cargo watch -x run -q & else cd $(DIR_CHAT) && cargo run -q & fi ;; \
|
||||
stream-server) \
|
||||
if command -v cargo-watch >/dev/null; then cd $(DIR_STREAM) && cargo watch -x run -q & else cd $(DIR_STREAM) && cargo run -q & fi ;; \
|
||||
web) \
|
||||
cd $(DIR_WEB) && npm run dev & ;; \
|
||||
*) \
|
||||
$(ECHO_CMD) "${RED}Unknown service: $(SERVICE)${NC}"; exit 1 ;; \
|
||||
esac
|
||||
|
||||
stop-local-service: ## [LOW] Stop a local service
|
||||
@case "$(SERVICE)" in \
|
||||
backend-api) pkill -f "air\|go run.*modern-server" ;; \
|
||||
chat-server|stream-server) pkill -f "cargo.*$(SERVICE)" ;; \
|
||||
web) pkill -f "npm run dev\|vite" ;; \
|
||||
*) $(ECHO_CMD) "${RED}Unknown service: $(SERVICE)${NC}" ;; \
|
||||
esac
|
||||
|
||||
# ==============================================================================
|
||||
# BUILD COMMANDS
|
||||
# ==============================================================================
|
||||
.PHONY: build-backend-api build-chat-server build-stream-server build-web
|
||||
|
||||
build-backend-api: ## [LOW] Build Go backend
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building backend-api...${NC}"
|
||||
@docker build -t $(PROJECT_NAME)-backend-api:latest -f $(DIR_GO)/Dockerfile.production $(DIR_GO) || \
|
||||
$(ECHO_CMD) "${YELLOW}Using local Dockerfile...${NC}" && \
|
||||
docker build -t $(PROJECT_NAME)-backend-api:latest -f $(DIR_GO)/Dockerfile $(DIR_GO)
|
||||
|
||||
build-chat-server: ## [LOW] Build Rust chat server
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building chat-server...${NC}"
|
||||
@docker build -t $(PROJECT_NAME)-chat-server:latest -f $(DIR_CHAT)/Dockerfile.production $(DIR_CHAT) || \
|
||||
docker build -t $(PROJECT_NAME)-chat-server:latest -f $(DIR_CHAT)/Dockerfile $(DIR_CHAT)
|
||||
|
||||
build-stream-server: ## [LOW] Build Rust stream server
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building stream-server...${NC}"
|
||||
@docker build -t $(PROJECT_NAME)-stream-server:latest -f $(DIR_STREAM)/Dockerfile.production $(DIR_STREAM) || \
|
||||
docker build -t $(PROJECT_NAME)-stream-server:latest -f $(DIR_STREAM)/Dockerfile $(DIR_STREAM)
|
||||
|
||||
build-web: ## [LOW] Build web frontend
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building web...${NC}"
|
||||
@docker build -t $(PROJECT_NAME)-web:latest -f $(DIR_WEB)/Dockerfile.production $(DIR_WEB) || \
|
||||
docker build -t $(PROJECT_NAME)-web:latest -f $(DIR_WEB)/Dockerfile $(DIR_WEB)
|
||||
|
||||
# ==============================================================================
|
||||
# INCUS / LXD DEPLOYMENT
|
||||
# ==============================================================================
|
||||
.PHONY: incus-setup-network incus-deploy-all incus-deploy-all-native incus-deploy-service incus-deploy-service-native incus-deploy-infra incus-start-all incus-stop-all incus-logs
|
||||
|
||||
incus-setup-network: ## [LOW] Setup Incus network profile
|
||||
@$(ECHO_CMD) "${BLUE}📦 Setting up Incus network...${NC}"
|
||||
@if ! incus network show $(INCUS_NETWORK) >/dev/null 2>&1; then \
|
||||
$(ECHO_CMD) "Creating network $(INCUS_NETWORK)..."; \
|
||||
incus network create $(INCUS_NETWORK) \
|
||||
ipv4.address=10.10.10.1/24 \
|
||||
ipv4.nat=true \
|
||||
ipv4.dhcp=true \
|
||||
dns.mode=managed \
|
||||
dns.nameservers=8.8.8.8,1.1.1.1; \
|
||||
else \
|
||||
$(ECHO_CMD) "Updating network configuration..."; \
|
||||
incus network set $(INCUS_NETWORK) ipv4.dhcp=true 2>/dev/null || true; \
|
||||
incus network set $(INCUS_NETWORK) dns.mode=managed 2>/dev/null || true; \
|
||||
incus network set $(INCUS_NETWORK) dns.nameservers=8.8.8.8,1.1.1.1 2>/dev/null || true; \
|
||||
fi
|
||||
@if ! incus profile show $(INCUS_PROFILE) >/dev/null 2>&1; then \
|
||||
$(ECHO_CMD) "Creating profile $(INCUS_PROFILE)..."; \
|
||||
incus profile create $(INCUS_PROFILE); \
|
||||
incus profile device add $(INCUS_PROFILE) root disk path=/ pool=default 2>/dev/null || \
|
||||
incus profile device add $(INCUS_PROFILE) root disk path=/ 2>/dev/null || true; \
|
||||
incus profile device add $(INCUS_PROFILE) eth0 nic network=$(INCUS_NETWORK) 2>/dev/null || true; \
|
||||
else \
|
||||
$(ECHO_CMD) "Ensuring profile devices..."; \
|
||||
if ! incus profile show $(INCUS_PROFILE) | grep -q "root:"; then \
|
||||
incus profile device add $(INCUS_PROFILE) root disk path=/ pool=default 2>/dev/null || \
|
||||
incus profile device add $(INCUS_PROFILE) root disk path=/ 2>/dev/null || true; \
|
||||
fi; \
|
||||
if ! incus profile show $(INCUS_PROFILE) | grep -q "eth0:"; then \
|
||||
incus profile device add $(INCUS_PROFILE) eth0 nic network=$(INCUS_NETWORK) 2>/dev/null || true; \
|
||||
fi; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${GREEN}✅ Incus network ready.${NC}"
|
||||
|
||||
incus-deploy-all: incus-setup-network ## [MID] Deploy all services to Incus (legacy Docker method)
|
||||
@$(ECHO_CMD) "${BLUE}📦 Deploying all services to Incus (Docker)...${NC}"
|
||||
@$(MAKE) -s incus-deploy-service SERVICE=backend-api
|
||||
@$(MAKE) -s incus-deploy-service SERVICE=chat-server
|
||||
@$(MAKE) -s incus-deploy-service SERVICE=stream-server
|
||||
@$(MAKE) -s incus-deploy-service SERVICE=web
|
||||
@$(MAKE) -s incus-deploy-service SERVICE=haproxy
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services deployed to Incus.${NC}"
|
||||
|
||||
incus-deploy-all-native: incus-setup-network ## [MID] Deploy all services to Incus (native, no Docker) - excludes Rust services
|
||||
@$(ECHO_CMD) "${BLUE}📦 Deploying all services to Incus (native, excluding Rust services)...${NC}"
|
||||
@$(ECHO_CMD) "${YELLOW}⚠️ Note: chat-server and stream-server are excluded${NC}"
|
||||
@$(MAKE) -s incus-deploy-service-native SERVICE=backend-api
|
||||
@$(MAKE) -s incus-deploy-service-native SERVICE=web
|
||||
@$(MAKE) -s incus-deploy-service-native SERVICE=haproxy
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services deployed to Incus.${NC}"
|
||||
|
||||
incus-deploy-service: ## [LOW] Deploy a service to Incus with Docker (usage: make incus-deploy-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${BLUE}📦 Deploying $(SERVICE) to Incus (Docker)...${NC}"
|
||||
@if incus list -c n --format csv | grep -q "^veza-$(SERVICE)$$"; then \
|
||||
$(ECHO_CMD) "${YELLOW}Container exists, removing...${NC}"; \
|
||||
incus delete veza-$(SERVICE) --force; \
|
||||
fi
|
||||
@incus init images:debian/13 veza-$(SERVICE) --profile $(INCUS_PROFILE)
|
||||
@incus start veza-$(SERVICE)
|
||||
@$(ECHO_CMD) "${BLUE}Installing Docker in container...${NC}"
|
||||
@incus exec veza-$(SERVICE) -- bash -c "apt-get update && apt-get install -y docker.io docker-compose && systemctl enable docker && systemctl start docker" || true
|
||||
@$(ECHO_CMD) "${GREEN}✅ $(SERVICE) deployed.${NC}"
|
||||
|
||||
incus-deploy-service-native: ## [LOW] Deploy a service to Incus natively (usage: make incus-deploy-service-native SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${BLUE}📦 Deploying $(SERVICE) to Incus (native)...${NC}"
|
||||
@$(shell pwd)/config/incus/deploy-service-native.sh $(SERVICE)
|
||||
|
||||
incus-deploy-infra: incus-setup-network ## [LOW] Deploy infrastructure services (PostgreSQL, Redis)
|
||||
@$(ECHO_CMD) "${BLUE}📦 Deploying infrastructure services...${NC}"
|
||||
@$(MAKE) -s incus-deploy-service-native SERVICE=infra
|
||||
@$(ECHO_CMD) "${BLUE}Waiting for infrastructure to be ready...${NC}"
|
||||
@for i in $$(seq 1 30); do \
|
||||
if incus exec veza-infra -- systemctl is-active postgresql >/dev/null 2>&1 && \
|
||||
incus exec veza-infra -- systemctl is-active redis-server >/dev/null 2>&1; then \
|
||||
$(ECHO_CMD) "${GREEN}✅ Infrastructure services ready${NC}"; \
|
||||
break; \
|
||||
fi; \
|
||||
sleep 1; \
|
||||
done
|
||||
@$(ECHO_CMD) "${GREEN}✅ Infrastructure deployed.${NC}"
|
||||
|
||||
incus-start-all: ## [MID] Start all Incus services (excluding Rust services)
|
||||
@$(ECHO_CMD) "${BLUE}🚀 Starting all Incus services (excluding Rust services)...${NC}"
|
||||
@for service in backend-api; do \
|
||||
if incus list -c n --format csv | grep -q "^veza-$$service$$"; then \
|
||||
$(ECHO_CMD) "Starting veza-$$service..."; \
|
||||
if incus exec veza-$$service -- systemctl start veza-$$service 2>/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN} ✅ veza-$$service started${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW} ⚠️ veza-$$service failed to start (check logs)${NC}"; \
|
||||
fi; \
|
||||
fi; \
|
||||
done
|
||||
@if incus list -c n --format csv | grep -q "^veza-web$$"; then \
|
||||
$(ECHO_CMD) "Starting veza-web..."; \
|
||||
if incus exec veza-web -- systemctl start apache2 2>/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN} ✅ Apache started${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW} ⚠️ Apache failed to start${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@if incus list -c n --format csv | grep -q "^veza-haproxy$$"; then \
|
||||
$(ECHO_CMD) "Starting veza-haproxy..."; \
|
||||
if incus exec veza-haproxy -- systemctl start haproxy 2>/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN} ✅ HAProxy started${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW} ⚠️ HAProxy failed to start${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@if incus list -c n --format csv | grep -q "^veza-infra$$"; then \
|
||||
$(ECHO_CMD) "Starting infrastructure services..."; \
|
||||
if incus exec veza-infra -- systemctl start postgresql 2>/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN} ✅ PostgreSQL started${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW} ⚠️ PostgreSQL failed to start${NC}"; \
|
||||
fi; \
|
||||
if incus exec veza-infra -- systemctl start redis-server 2>/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN} ✅ Redis started${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW} ⚠️ Redis failed to start${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services started.${NC}"
|
||||
@$(ECHO_CMD) "${BLUE}Run 'make incus-status' to check service status${NC}"
|
||||
|
||||
incus-stop-all: ## [MID] Stop all Incus containers
|
||||
@$(ECHO_CMD) "${YELLOW}🛑 Stopping all Incus containers...${NC}"
|
||||
@for container in $$(incus list -c n --format csv | grep veza-); do \
|
||||
incus stop $$container 2>/dev/null || true; \
|
||||
done
|
||||
@$(ECHO_CMD) "${GREEN}✅ All Incus containers stopped.${NC}"
|
||||
|
||||
incus-status: ## [MID] Show status of all Incus services
|
||||
@$(ECHO_CMD) "${BOLD}${CYAN}📊 INCUS DEPLOYMENT STATUS${NC}"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}Containers:${NC}"
|
||||
@incus list veza- --format table 2>/dev/null || echo " No containers found"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}Service Status:${NC}"
|
||||
@for service in backend-api chat-server stream-server; do \
|
||||
if incus list -c n --format csv 2>/dev/null | grep -q "^veza-$$service$$"; then \
|
||||
STATUS=$$(incus exec veza-$$service -- systemctl is-active veza-$$service 2>/dev/null || echo "inactive"); \
|
||||
if [ "$$STATUS" = "active" ]; then \
|
||||
$(ECHO_CMD) " ${GREEN}✅ veza-$$service: active${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) " ${YELLOW}⚠️ veza-$$service: $$STATUS${NC}"; \
|
||||
fi; \
|
||||
fi; \
|
||||
done
|
||||
@if incus list -c n --format csv 2>/dev/null | grep -q "^veza-web$$"; then \
|
||||
STATUS=$$(incus exec veza-web -- systemctl is-active apache2 2>/dev/null || echo "inactive"); \
|
||||
if [ "$$STATUS" = "active" ]; then \
|
||||
$(ECHO_CMD) " ${GREEN}✅ veza-web (Apache): active${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) " ${YELLOW}⚠️ veza-web (Apache): $$STATUS${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@if incus list -c n --format csv 2>/dev/null | grep -q "^veza-haproxy$$"; then \
|
||||
STATUS=$$(incus exec veza-haproxy -- systemctl is-active haproxy 2>/dev/null || echo "inactive"); \
|
||||
if [ "$$STATUS" = "active" ]; then \
|
||||
$(ECHO_CMD) " ${GREEN}✅ veza-haproxy: active${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) " ${YELLOW}⚠️ veza-haproxy: $$STATUS${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@if incus list -c n --format csv 2>/dev/null | grep -q "^veza-infra$$"; then \
|
||||
PG_STATUS=$$(incus exec veza-infra -- systemctl is-active postgresql 2>/dev/null || echo "inactive"); \
|
||||
REDIS_STATUS=$$(incus exec veza-infra -- systemctl is-active redis-server 2>/dev/null || echo "inactive"); \
|
||||
if [ "$$PG_STATUS" = "active" ]; then \
|
||||
$(ECHO_CMD) " ${GREEN}✅ PostgreSQL: active${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) " ${YELLOW}⚠️ PostgreSQL: $$PG_STATUS${NC}"; \
|
||||
fi; \
|
||||
if [ "$$REDIS_STATUS" = "active" ]; then \
|
||||
$(ECHO_CMD) " ${GREEN}✅ Redis: active${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) " ${YELLOW}⚠️ Redis: $$REDIS_STATUS${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@$(ECHO_CMD) ""
|
||||
|
||||
incus-logs: ## [LOW] Show logs from Incus container (usage: make incus-logs SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@incus exec veza-$(SERVICE) -- journalctl -f
|
||||
|
||||
# ==============================================================================
|
||||
# TEST & QUALITY
|
||||
# ==============================================================================
|
||||
.PHONY: test test-tmt lint fmt status
|
||||
|
||||
test-tmt: ## [MID] Run Unified TMT Pipeline
|
||||
@$(ECHO_CMD) "${BLUE}🧪 Running TMT Pipeline...${NC}"
|
||||
@command -v tmt >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ tmt is missing! Install with 'pip install tmt'${NC}"; exit 1; }
|
||||
@tmt run
|
||||
|
||||
test: infra-up ## [MID] Run All Tests (Fastest strategy)
|
||||
@$(ECHO_CMD) "${BLUE}🧪 Running Tests...${NC}"
|
||||
@$(ECHO_CMD) " [Go] Unit Tests..."
|
||||
@(cd $(DIR_GO) && go test ./... -short)
|
||||
@$(ECHO_CMD) " [Rust] Unit Tests..."
|
||||
@(cd $(DIR_CHAT) && cargo test --lib -q)
|
||||
@(cd $(DIR_STREAM) && cargo test --lib -q)
|
||||
@$(ECHO_CMD) " [Web] Unit Tests..."
|
||||
@(cd $(DIR_WEB) && npm run test -- --run)
|
||||
@$(ECHO_CMD) "${GREEN}✅ All tests passed.${NC}"
|
||||
|
||||
lint: ## [MID] Lint everything
|
||||
@$(ECHO_CMD) "${BLUE}🔍 Linting Codebase...${NC}"
|
||||
@(cd $(DIR_CHAT) && cargo clippy -- -D warnings) || true
|
||||
@(cd $(DIR_STREAM) && cargo clippy -- -D warnings) || true
|
||||
@(cd $(DIR_GO) && golangci-lint run ./...) || true
|
||||
@(cd $(DIR_WEB) && npm run lint) || true
|
||||
|
||||
fmt: ## [MID] Format everything
|
||||
@$(ECHO_CMD) "${BLUE}✨ Formatting...${NC}"
|
||||
@(cd $(DIR_GO) && go fmt ./...)
|
||||
@(cd $(DIR_CHAT) && cargo fmt)
|
||||
@(cd $(DIR_STREAM) && cargo fmt)
|
||||
@(cd $(DIR_WEB) && npm run format) || true
|
||||
|
||||
status: ## [MID] Show system health & stats
|
||||
@$(ECHO_CMD) "${BOLD}DOCKER STATS:${NC}"
|
||||
@docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}" 2>/dev/null | grep -E "NAME|veza" || echo "No containers running"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}LOCAL PORTS:${NC}"
|
||||
@lsof -i :$(PORT_GO) -i :$(PORT_CHAT) -i :$(PORT_STREAM) -i :$(PORT_WEB) 2>/dev/null | grep LISTEN || echo "No apps listening."
|
||||
.PHONY: dev-web dev-backend-api dev-chat-server dev-stream-server
|
||||
.PHONY: test-web test-backend-api test-chat-server test-stream-server
|
||||
.PHONY: lint-web lint-backend-api lint-chat-server lint-stream-server
|
||||
# (targets defined in make/dev.mk and make/test.mk)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,9 @@
|
|||
|
||||
# API Configuration
|
||||
# Base URL for the REST API (can be absolute URL or path starting with /)
|
||||
# DEV (veza.fr or localhost): use /api/v1 so the Vite proxy forwards to the backend.
|
||||
# - Same origin => cookies are sent => login and /auth/me work. Using http://localhost:8080
|
||||
# from veza.fr:5173 is cross-origin => cookies not sent => 401 and redirect loop.
|
||||
VITE_API_URL=/api/v1
|
||||
|
||||
# WebSocket Configuration
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
# Configuration API pour développement local
|
||||
# Backend Go tourne sur le port 8080
|
||||
VITE_API_URL=http://localhost:8080/api/v1
|
||||
VITE_API_URL=/api/v1
|
||||
VITE_WS_URL=ws://localhost:8081/ws
|
||||
VITE_STREAM_URL=ws://localhost:8082/stream
|
||||
|
|
|
|||
85
apps/web/e2e/playwright-report-visual/index.html
Normal file
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 414 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
|
After Width: | Height: | Size: 332 KiB |
4
apps/web/e2e/test-results-visual/.last-run.json
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"status": "passed",
|
||||
"failedTests": []
|
||||
}
|
||||
|
|
@ -107,26 +107,18 @@ export function App() {
|
|||
}, [setTheme, theme, language, setLanguage]);
|
||||
|
||||
// P1.2: Initialize auth state before rendering app
|
||||
// This prevents race condition where router renders before auth is checked
|
||||
// With httpOnly cookies we cannot read tokens in JS; always call refreshUser()
|
||||
// so getMe() is used to verify auth (cookies sent automatically).
|
||||
useEffect(() => {
|
||||
const initAuth = async () => {
|
||||
try {
|
||||
// Check if user has tokens
|
||||
const { hasTokens } = await import('@/services/tokenStorage').then(
|
||||
(m) => ({ hasTokens: m.TokenStorage.hasTokens() })
|
||||
);
|
||||
|
||||
if (hasTokens) {
|
||||
// Wait for auth check to complete
|
||||
await refreshUser();
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('[App] Auth initialization failed', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
stack: error instanceof Error ? error.stack : undefined,
|
||||
});
|
||||
} finally {
|
||||
// Always set ready, even if auth check fails
|
||||
setIsAuthReady(true);
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -75,6 +75,25 @@ const parseEnv = () => {
|
|||
// Variables d'environnement validées
|
||||
const validatedEnv = parseEnv();
|
||||
|
||||
// En dev, alerter si l'API est en cross-origin : les cookies ne seront pas envoyés (SameSite),
|
||||
// ce qui provoque 401 après login et redirections en boucle. Utiliser VITE_API_URL=/api/v1 (proxy).
|
||||
if (import.meta.env.DEV && typeof window !== 'undefined') {
|
||||
const apiUrl = validatedEnv.VITE_API_URL;
|
||||
if (apiUrl.startsWith('http')) {
|
||||
try {
|
||||
const apiOrigin = new URL(apiUrl).origin;
|
||||
if (window.location.origin !== apiOrigin) {
|
||||
logger.warn(
|
||||
'[Config] API is cross-origin: cookies will not be sent, login may fail or redirect in a loop. Use VITE_API_URL=/api/v1 so the Vite proxy is used (same origin).',
|
||||
{ apiOrigin, pageOrigin: window.location.origin }
|
||||
);
|
||||
}
|
||||
} catch {
|
||||
// ignore invalid URL
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export de l'objet env avec types
|
||||
export const env = {
|
||||
API_URL: validatedEnv.VITE_API_URL,
|
||||
|
|
|
|||
|
|
@ -10,31 +10,22 @@ export const useLogin = () => {
|
|||
|
||||
return useMutation({
|
||||
mutationFn: async (credentials: LoginRequest) => {
|
||||
// loginStore appelle déjà loginService et met à jour le store
|
||||
// Il attend aussi que la persistance soit complète
|
||||
await loginStore(credentials);
|
||||
const loginResponse = await loginStore(credentials);
|
||||
const user = loginResponse.user;
|
||||
|
||||
// Vérifier que le store est bien mis à jour après la persistance
|
||||
const { isAuthenticated } = useAuthStore.getState();
|
||||
if (!isAuthenticated) {
|
||||
// Attendre un peu plus et réessayer
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
const retryState = useAuthStore.getState();
|
||||
if (!retryState.isAuthenticated) {
|
||||
throw new Error(
|
||||
'Login failed: user not authenticated after persistence',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch user data and update React Query cache
|
||||
const user = await getMe();
|
||||
// Populate React Query cache so the app has user data immediately
|
||||
queryClient.setQueryData(['user', 'me'], user);
|
||||
|
||||
return {
|
||||
user,
|
||||
isAuthenticated,
|
||||
};
|
||||
// Optionally refresh from /auth/me; do not fail login if this fails (e.g. backend delay)
|
||||
getMe()
|
||||
.then((freshUser) => {
|
||||
queryClient.setQueryData(['user', 'me'], freshUser);
|
||||
})
|
||||
.catch(() => {
|
||||
// Keep using user from login response
|
||||
});
|
||||
|
||||
return { user, isAuthenticated: true };
|
||||
},
|
||||
});
|
||||
};
|
||||
|
|
|
|||
|
|
@ -7,10 +7,27 @@ import { OAuthButton } from '../components/OAuthButton';
|
|||
import { useLogin } from '../hooks/useLogin';
|
||||
import type { LoginFormData } from '../types';
|
||||
import { logger } from '@/utils/logger';
|
||||
import { formatErrorMessage as formatApiErrorMessage } from '@/utils/apiErrorHandler';
|
||||
import type { ApiError } from '@/schemas/apiSchemas';
|
||||
import { CardContent, CardHeader, CardTitle, CardDescription } from '@/components/ui/card';
|
||||
import { AlertCircle } from 'lucide-react';
|
||||
import { AuthLayout } from '../components/AuthLayout';
|
||||
|
||||
function getLoginErrorMessage(error: unknown): string {
|
||||
if (error == null) return '';
|
||||
if (typeof error === 'object' && error !== null && 'message' in error && 'code' in error) {
|
||||
return formatApiErrorMessage(error as ApiError);
|
||||
}
|
||||
if (error instanceof Error) {
|
||||
const msg = error.message?.toLowerCase() ?? '';
|
||||
if (msg.includes('invalid credentials') || msg.includes('401')) return 'Incorrect email or password';
|
||||
if (msg.includes('email not verified')) return "Your email is not verified. Check your inbox.";
|
||||
if (msg.includes('network')) return 'Connection error. Check your internet.';
|
||||
return error.message || 'An error occurred. Please try again.';
|
||||
}
|
||||
return String(error);
|
||||
}
|
||||
|
||||
export function LoginPage() {
|
||||
const navigate = useNavigate();
|
||||
const { isAuthenticated, isLoading } = useAuthStore();
|
||||
|
|
@ -117,23 +134,6 @@ export function LoginPage() {
|
|||
window.location.href = `/api/v1/auth/oauth/${provider}`;
|
||||
};
|
||||
|
||||
const formatErrorMessage = (error: Error | null): string => {
|
||||
if (!error) return '';
|
||||
const message = error.message.toLowerCase();
|
||||
const errorString = error.toString().toLowerCase();
|
||||
|
||||
if (message.includes('invalid credentials') || message.includes('401') || errorString.includes('401')) {
|
||||
return 'Incorrect email or password';
|
||||
}
|
||||
if (message.includes('email not verified')) {
|
||||
return "Your email is not verified. Check your inbox.";
|
||||
}
|
||||
if (message.includes('network')) {
|
||||
return 'Connection error. Check your internet.';
|
||||
}
|
||||
return 'An error occurred. Please try again.';
|
||||
};
|
||||
|
||||
return (
|
||||
<AuthLayout
|
||||
title="Welcome Back"
|
||||
|
|
@ -156,7 +156,7 @@ export function LoginPage() {
|
|||
{error && (
|
||||
<div className="bg-destructive/10 border border-destructive/30 text-destructive px-4 py-3 rounded-lg text-sm flex items-center gap-2">
|
||||
<AlertCircle className="w-4 h-4 flex-shrink-0" />
|
||||
<p>{formatErrorMessage(error)}</p>
|
||||
<p>{getLoginErrorMessage(error)}</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
|
|
@ -183,18 +183,18 @@ export function LoginPage() {
|
|||
/>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center justify-between text-sm">
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="flex items-center justify-between gap-3 text-sm min-w-0">
|
||||
<label htmlFor="remember_me" className="flex items-center gap-2 text-muted-foreground cursor-pointer min-w-0 flex-shrink">
|
||||
<input
|
||||
type="checkbox"
|
||||
id="remember_me"
|
||||
checked={remember_me}
|
||||
onChange={(e) => setRemember_me(e.target.checked)}
|
||||
className="h-4 w-4 rounded border-white/10 bg-black/20 text-primary focus:ring-primary/50"
|
||||
className="h-4 w-4 rounded border-white/10 bg-black/20 text-primary focus:ring-primary/50 flex-shrink-0"
|
||||
/>
|
||||
<label htmlFor="remember_me" className="text-muted-foreground">Remember me</label>
|
||||
</div>
|
||||
<Link to="/forgot-password" className="text-primary hover:underline">Forgot password?</Link>
|
||||
<span className="truncate">Remember me</span>
|
||||
</label>
|
||||
<Link to="/forgot-password" className="text-primary hover:underline flex-shrink-0">Forgot password?</Link>
|
||||
</div>
|
||||
|
||||
<AuthButton type="submit" loading={loading} className="w-full bg-gradient-to-r from-cyan-600 to-magenta-600 hover:from-cyan-500 hover:to-magenta-500 text-white border-0 shadow-lg shadow-cyan-900/20">
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import {
|
|||
logout as logoutService,
|
||||
getMe,
|
||||
type LoginRequest,
|
||||
type LoginResponse,
|
||||
type RegisterRequest,
|
||||
} from '@/services/api/auth';
|
||||
import { TokenStorage } from '@/services/tokenStorage';
|
||||
|
|
@ -26,7 +27,7 @@ export interface AuthState {
|
|||
}
|
||||
|
||||
export interface AuthActions {
|
||||
login: (credentials: LoginRequest) => Promise<void>;
|
||||
login: (credentials: LoginRequest) => Promise<LoginResponse>;
|
||||
register: (userData: RegisterRequest) => Promise<void>;
|
||||
logout: () => Promise<void>;
|
||||
logoutLocal: () => void; // Logout local sans appel API (pour éviter les boucles)
|
||||
|
|
@ -54,25 +55,22 @@ export const useAuthStore = create<AuthStore>()(
|
|||
login: async (credentials: LoginRequest) => {
|
||||
set({ isLoading: true, error: null });
|
||||
try {
|
||||
// Le service auth gère déjà le stockage des tokens
|
||||
// Action 4.1.1.5: user field removed - user data managed by React Query
|
||||
// Response contains user data but we don't store it (React Query handles that)
|
||||
await loginService(credentials);
|
||||
const response = await loginService(credentials);
|
||||
|
||||
// Mettre à jour l'état de manière atomique pour éviter les problèmes de timing
|
||||
set({
|
||||
isAuthenticated: true,
|
||||
isLoading: false,
|
||||
error: null,
|
||||
});
|
||||
|
||||
// Récupérer le token CSRF après login
|
||||
csrfService.refreshToken().catch((error) => {
|
||||
logger.warn('Failed to fetch CSRF token after login', {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
stack: error instanceof Error ? error.stack : undefined,
|
||||
});
|
||||
});
|
||||
|
||||
return response;
|
||||
} catch (error: unknown) {
|
||||
set({
|
||||
error: parseApiError(error),
|
||||
|
|
@ -176,26 +174,15 @@ export const useAuthStore = create<AuthStore>()(
|
|||
|
||||
refreshUser: async () => {
|
||||
// Action 4.3.1.2: Simplified using React Query - no manual promise deduplication needed
|
||||
// React Query's useUser hook handles deduplication automatically at the query level
|
||||
const currentState = useAuthStore.getState();
|
||||
|
||||
if (!TokenStorage.hasTokens()) {
|
||||
// CRITIQUE FIX #2: Ne réinitialiser que si on n'était pas déjà authentifié
|
||||
if (!currentState.isAuthenticated) {
|
||||
set({ isAuthenticated: false, isLoading: false });
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// CRITIQUE FIX #2: Ne pas réinitialiser isAuthenticated si on était déjà authentifié
|
||||
// Cela évite les problèmes de timing après le login et la navigation
|
||||
const hasAuth = currentState.isAuthenticated;
|
||||
|
||||
// SECURITY: With httpOnly cookies, hasTokens() is always false in JS.
|
||||
// Always try getMe() to verify auth; cookies are sent automatically.
|
||||
set({ isLoading: true });
|
||||
try {
|
||||
// Verify authentication by calling getMe()
|
||||
// User data is managed by React Query (useUser hook), not stored here
|
||||
// React Query will deduplicate this call if useUser hook is already fetching
|
||||
// Verify authentication by calling getMe() (cookies sent automatically)
|
||||
await getMe();
|
||||
set({
|
||||
isAuthenticated: true,
|
||||
|
|
|
|||
|
|
@ -45,8 +45,6 @@ export const handlers = [
|
|||
|
||||
http.post('*/api/v1/auth/login', async () => {
|
||||
return HttpResponse.json({
|
||||
access_token: 'mock_access_token_generic',
|
||||
refresh_token: 'mock_refresh_token_generic',
|
||||
user: {
|
||||
id: 1,
|
||||
username: 'StorybookUser',
|
||||
|
|
@ -54,6 +52,11 @@ export const handlers = [
|
|||
created_at: '2024-01-01T00:00:00Z',
|
||||
avatar_url: 'https://i.pravatar.cc/150?u=1',
|
||||
},
|
||||
token: {
|
||||
access_token: 'mock_access_token_generic',
|
||||
refresh_token: 'mock_refresh_token_generic',
|
||||
expires_in: 3600,
|
||||
},
|
||||
});
|
||||
}),
|
||||
|
||||
|
|
|
|||
|
|
@ -1247,10 +1247,11 @@ apiClient.interceptors.response.use(
|
|||
}
|
||||
|
||||
// INT-AUTH-003: Détecter 401 et refresh automatiquement
|
||||
// EXCLURE l'endpoint /auth/refresh pour éviter les boucles infinies
|
||||
// EXCLURE aussi /auth/logout car si le logout échoue, on ne doit pas rafraîchir le token
|
||||
// EXCLURE /auth/refresh et /auth/logout pour éviter les boucles.
|
||||
// EXCLURE /auth/me : 401 = non connecté ; ne pas tenter de refresh ni rediriger (sinon boucle rechargement).
|
||||
const isRefreshEndpoint = originalRequest?.url?.includes('/auth/refresh');
|
||||
const isLogoutEndpoint = originalRequest?.url?.includes('/auth/logout');
|
||||
const isAuthMeEndpoint = originalRequest?.url?.includes('/auth/me');
|
||||
|
||||
// INT-AUTH-003: Handle 401 and 400 on /auth/refresh endpoint - token expired/revoked/invalid, logout and redirect
|
||||
// FIX: Gérer aussi les erreurs 400 (Bad Request) qui indiquent un refresh token invalide
|
||||
|
|
@ -1340,7 +1341,8 @@ apiClient.interceptors.response.use(
|
|||
originalRequest &&
|
||||
!originalRequest._retry &&
|
||||
!isRefreshEndpoint &&
|
||||
!isLogoutEndpoint
|
||||
!isLogoutEndpoint &&
|
||||
!isAuthMeEndpoint
|
||||
) {
|
||||
// INT-AUTH-003: Éviter les refresh multiples simultanés
|
||||
if (isRefreshing) {
|
||||
|
|
@ -1732,22 +1734,22 @@ apiClient.interceptors.response.use(
|
|||
const apiError = parseApiError(error);
|
||||
|
||||
// Action 3.2.1.4: Auth errors redirect to login
|
||||
// Handle 401 errors that didn't trigger refresh (e.g., no originalRequest, already retried, etc.)
|
||||
// EXCLURE aussi /auth/logout pour éviter les boucles
|
||||
if (status === 401 && !isRefreshEndpoint && !isLogoutEndpoint && typeof window !== 'undefined') {
|
||||
// isAuthMeEndpoint déjà défini plus haut : on ne redirige pas pour /auth/me (401 = non connecté, pas de redirect)
|
||||
if (
|
||||
status === 401 &&
|
||||
!isRefreshEndpoint &&
|
||||
!isLogoutEndpoint &&
|
||||
!isAuthMeEndpoint &&
|
||||
typeof window !== 'undefined'
|
||||
) {
|
||||
const errorCategory = getErrorCategory(apiError);
|
||||
if (errorCategory === 'authentication') {
|
||||
// Clear tokens
|
||||
TokenStorage.clearTokens();
|
||||
csrfService.clearToken();
|
||||
|
||||
// Clear auth store state
|
||||
// FIX: Utiliser logoutLocal() pour éviter les boucles infinies
|
||||
import('@/features/auth/store/authStore')
|
||||
.then(({ useAuthStore }) => {
|
||||
const store = useAuthStore.getState();
|
||||
// Utiliser logoutLocal() au lieu de logout() pour éviter les appels API
|
||||
// qui déclencheraient à nouveau le refresh
|
||||
store.logoutLocal();
|
||||
})
|
||||
.catch((err: unknown) => {
|
||||
|
|
@ -1756,12 +1758,10 @@ apiClient.interceptors.response.use(
|
|||
});
|
||||
});
|
||||
|
||||
// Store error message for display after redirect
|
||||
sessionStorage.setItem(
|
||||
'auth_error',
|
||||
'Votre session a expiré. Veuillez vous reconnecter.',
|
||||
);
|
||||
// Redirect to login
|
||||
window.location.href = '/login';
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -242,6 +242,17 @@ export function parseApiError(error: unknown): ApiError {
|
|||
};
|
||||
}
|
||||
|
||||
if (status === 423) {
|
||||
const data = responseData as { message?: string } | null;
|
||||
return {
|
||||
code: 423,
|
||||
message:
|
||||
data?.message ||
|
||||
'This action cannot be completed right now. The resource may be locked or your account may be temporarily restricted. Please try again later.',
|
||||
timestamp: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
// Erreur HTTP sans format standardisé
|
||||
const data = responseData as { message?: string } | null;
|
||||
return {
|
||||
|
|
@ -357,7 +368,10 @@ export function formatErrorMessage(
|
|||
error: ApiError,
|
||||
includeRequestId: boolean = false,
|
||||
): string {
|
||||
let message = error.message;
|
||||
const baseMessage =
|
||||
typeof error.message === 'string' ? error.message : 'An error occurred';
|
||||
|
||||
let message = baseMessage;
|
||||
|
||||
// Si l'erreur a des détails de validation, les inclure
|
||||
if (
|
||||
|
|
@ -366,9 +380,17 @@ export function formatErrorMessage(
|
|||
error.details.length > 0
|
||||
) {
|
||||
const detailsMessages = error.details
|
||||
.map((detail) => `${detail.field}: ${detail.message}`)
|
||||
.map((detail) => {
|
||||
const f =
|
||||
typeof detail.field === 'string' ? detail.field : String(detail.field);
|
||||
const m =
|
||||
typeof detail.message === 'string'
|
||||
? detail.message
|
||||
: String(detail.message);
|
||||
return `${f}: ${m}`;
|
||||
})
|
||||
.join(', ');
|
||||
message = `${error.message} (${detailsMessages})`;
|
||||
message = `${baseMessage} (${detailsMessages})`;
|
||||
}
|
||||
|
||||
// Action 5.3.1.1: Always include request_id when requested (not just in development)
|
||||
|
|
|
|||
|
|
@ -7,8 +7,11 @@ import { visualizer } from 'rollup-plugin-visualizer'
|
|||
// https://vitejs.dev/config/
|
||||
export default defineConfig(({ mode }) => {
|
||||
const isProduction = mode === 'production'
|
||||
const projectRoot = path.resolve(__dirname)
|
||||
|
||||
return {
|
||||
// Ensure dev server and dep scan use apps/web only (avoid picking up storybook-static when run from monorepo root)
|
||||
root: projectRoot,
|
||||
plugins: [
|
||||
react(),
|
||||
// Bundle analyzer for production builds
|
||||
|
|
@ -39,6 +42,15 @@ export default defineConfig(({ mode }) => {
|
|||
server: {
|
||||
port: 5173,
|
||||
host: true,
|
||||
// Allow dev access via local domain names (e.g. /etc/hosts: 127.0.0.1 veza.fr)
|
||||
allowedHosts: ['veza.fr', 'veza.com', 'veza.talas.fr', 'veza.talas.com'],
|
||||
// Exclude Storybook build output from watch and fs access so dep scan never touches it
|
||||
watch: {
|
||||
ignored: ['**/storybook-static/**', '**/dist_verification/**'],
|
||||
},
|
||||
fs: {
|
||||
deny: ['**/storybook-static/**', '**/dist_verification/**'],
|
||||
},
|
||||
// P2.1: Proxy API requests to backend in development
|
||||
// This eliminates CORS issues in dev by making all requests same-origin
|
||||
proxy: {
|
||||
|
|
@ -78,7 +90,9 @@ export default defineConfig(({ mode }) => {
|
|||
},
|
||||
// Standard optimization settings usually work best
|
||||
optimizeDeps: {
|
||||
include: ['react', 'react-dom']
|
||||
include: ['react', 'react-dom'],
|
||||
// Only scan from app entry; avoid storybook-static (and other build outputs) being picked up as entries
|
||||
entries: ['index.html', 'src/main.tsx'],
|
||||
},
|
||||
}
|
||||
})
|
||||
193
docs/FRONTEND_AUDIT_VISUAL.md
Normal file
|
|
@ -0,0 +1,193 @@
|
|||
# Audit visuel exhaustif du frontend Veza
|
||||
|
||||
**Date:** 2026-02-07
|
||||
**Objectif:** Identifier les causes précises de la "mocheté" perçue (layout, composants, couleurs, contrastes, typographie, cohérence).
|
||||
|
||||
---
|
||||
|
||||
## 1. Résumé exécutif
|
||||
|
||||
Le frontend souffre de **plusieurs facteurs cumulés** : palette d’accents incohérente (teal + magenta/purple + vert + rouge), manque de profondeur (cartes trop plates), éléments "placeholder" visibles (ex. "0%" en rouge partout), barre de lecture disproportionnée, et typographie potentiellement dégradée (Rajdhani + erreurs glyph). Les correctifs ciblent des fichiers et variables précis ci-dessous.
|
||||
|
||||
---
|
||||
|
||||
## 2. Palette et couleurs
|
||||
|
||||
### 2.1 Incohérence des couleurs d’accent
|
||||
|
||||
| Contexte | Couleur utilisée | Fichier / token | Problème |
|
||||
|----------|------------------|-----------------|----------|
|
||||
| Élément actif sidebar, bouton play, "NETWORK STABLE" | **Teal / cyan** (primary) | `--primary: oklch(0.75 0.18 195)` dans `index.css` | Cohérent comme accent principal. |
|
||||
| Badges sidebar (Live Sessions 3, Channels 12) | **Magenta / violet** (`secondary`) | `Sidebar.tsx` L195 : `bg-secondary/20 text-secondary` ; `--secondary: oklch(0.65 0.25 330)` | **Hors palette** par rapport au teal ; donne une impression de "troisième couleur" non intégrée. |
|
||||
| Pourcentages positifs, "ACTIVE", "NETWORK STABLE" (dot) | **Vert** (lime/success) | `StatCard.tsx` (lime), succès sémantique | Un **vert** distinct du teal pour "positif" crée une **double convention** (teal vs vert) pour des états similaires. |
|
||||
| Tendances négatives, "Expired Warranty", Sign Out | **Rouge** (destructive) | `AdminDashboardStatCard.tsx`, `Sidebar.tsx` (Live icon) | Correct sémantiquement mais **trop présent** si utilisé aussi pour "0%" (voir §3). |
|
||||
|
||||
**Recommandations :**
|
||||
|
||||
- **Badges sidebar** : remplacer `secondary` (magenta) par une variante du primary (ex. `primary` ou `cyan-500`) ou un token dédié "badge" aligné sur la charte. Fichier : `apps/web/src/components/layout/Sidebar.tsx` (L194–201).
|
||||
- **États "positif"** : unifier soit sur teal, soit sur vert, et documenter (ex. teal = interactif/actif, vert = succès/variation positive uniquement).
|
||||
|
||||
### 2.2 Manque de profondeur (cartes, fonds)
|
||||
|
||||
- **Cartes dashboard** : variante `glass` ou `default` avec bordures/ombres très légères (`border-white/5`, `shadow-black/5`). Fichiers : `components/ui/card.tsx` (variants `glass`, `default`), `AdminDashboardStatCard.tsx`, `AdminDashboardTrafficCard.tsx`.
|
||||
- **Recherche header** : `bg-muted/50 border border-border` — contraste faible avec le fond, la zone "Search Network..." se fond dans le fond.
|
||||
- **Dark mode** : `--card: oklch(0.18 0.02 265)` très proche de `--background: oklch(0.15 0.02 265)` dans `index.css` (.dark), donc **peu de relief**.
|
||||
|
||||
**Recommandations :**
|
||||
|
||||
- Augmenter légèrement la différence luminance card vs background (ex. card à 0.20–0.22, background 0.15).
|
||||
- Donner aux cartes une bordure ou une ombre un peu plus marquée (ex. `border-border` plus visible, `shadow-lg` avec teinte légère).
|
||||
- Barre de recherche : fond ou bordure un peu plus marqués pour l’affordance (ex. `bg-card` ou `border-white/10`).
|
||||
|
||||
### 2.3 Fichiers à modifier (couleurs)
|
||||
|
||||
- `apps/web/src/index.css` : variables `.dark` (--card, --background), éventuellement --border.
|
||||
- `apps/web/src/components/layout/Sidebar.tsx` : classes des badges (remplacer secondary par primary/cyan).
|
||||
- `apps/web/src/components/layout/Header.tsx` : input search (classes bg/border).
|
||||
|
||||
---
|
||||
|
||||
## 3. Composants "placeholder" ou trompeurs
|
||||
|
||||
### 3.1 "0%" en rouge sur toutes les cartes (Admin)
|
||||
|
||||
- **Comportement** : `AdminDashboardStatCard` affiche un badge de tendance (`trend`) avec `trend > 0` → vert, sinon **rouge**. Si l’API ne renvoie pas de tendances (ou renvoie 0), on obtient **"0%" en rouge sur chaque carte**.
|
||||
- **Fichiers** : `AdminDashboardStatCard.tsx` (L46–57), `AdminDashboardView.tsx` (L48–50 : `trend: stats.trends?.users` etc.), `useAdminDashboardView.ts` (stats venant de l’API).
|
||||
- **Impact** : ressemble à une erreur ou à une donnée non implémentée, ce qui renforce l’impression d’interface inachevée.
|
||||
|
||||
**Recommandations :**
|
||||
|
||||
- Ne pas afficher le badge de tendance quand `trend === undefined` (ou null). Afficher "0%" seulement si la métrique a du sens (ex. "0% de variation" explicite).
|
||||
- Si `trend === 0`, éviter le style "erreur" (rouge) : utiliser un style neutre (muted) ou masquer.
|
||||
|
||||
### 3.2 Graphique "Traffic Flux"
|
||||
|
||||
- **Comportement** : `AdminDashboardTrafficCard` utilise des barres **aléatoires** (`Math.random()`) et des labels factices ("SYS_INIT", "BUFFERING_NODES...", "LIVE_DATA"). Aucune donnée réelle, aucun axe Y, pas de grille lisible.
|
||||
- **Fichier** : `apps/web/src/components/admin/admin-dashboard-view/AdminDashboardTrafficCard.tsx`.
|
||||
- **Impact** : l’intitulé "HOLOGRAPHIC STREAMING INTERFACE" promet un élément avancé alors que le rendu est clairement un placeholder.
|
||||
|
||||
**Recommandations :**
|
||||
|
||||
- Soit brancher de vraies données + axes + légende claire, soit remplacer par un message du type "Données à venir" ou un skeleton, et éviter un faux graphique.
|
||||
|
||||
### 3.3 Bouton "Sign In" alors que l’utilisateur est connecté
|
||||
|
||||
- **Constat** (d’après captures) : un bouton "Sign In" peut apparaître à côté d’un utilisateur déjà identifié (ex. "vezadev").
|
||||
- **À vérifier** : `Header.tsx` / `Navbar.tsx` — affichage conditionnel du bouton de connexion vs profil. S’assurer que "Sign In" n’est affiché que lorsque `!isAuthenticated`.
|
||||
|
||||
---
|
||||
|
||||
## 4. Layout et espacement
|
||||
|
||||
### 4.1 Barre de lecture (MiniPlayer / GlobalPlayer)
|
||||
|
||||
- **Taille** : `MiniPlayer` utilise `h-24` (96px) en barre fixe. Les contrôles (notamment le bouton play) sont très mis en avant (teal, grande taille).
|
||||
- **Impact** : la barre occupe une part importante de la hauteur et attire trop l’attention sur les pages où la lecture n’est pas le focus (ex. Gear Locker, Academy, Admin).
|
||||
- **Fichiers** : `apps/web/src/components/player/MiniPlayer.tsx` (L36 : `h-24`), `PlayerControls.tsx`, `PlayPauseButton.tsx`.
|
||||
|
||||
**Recommandations :**
|
||||
|
||||
- Réduire la hauteur sur desktop (ex. `h-20` ou `h-18`) et/ou rendre le contraste du bouton play un peu moins fort (même teal mais moins saturé ou plus petit).
|
||||
- Barre de progression : déjà fine ; envisager une hauteur un peu plus visible pour la partie "remplie" (accessibilité + lisibilité).
|
||||
|
||||
### 4.2 Espacement entre sections (sidebar)
|
||||
|
||||
- **Constat** : les blocs "MY STUDIO", "VEZA NETWORK", etc. ont un espacement vertical serré entre le titre de section et le premier lien.
|
||||
- **Fichier** : `apps/web/src/components/layout/Sidebar.tsx` (structure des sections).
|
||||
- **Recommandation** : ajouter un peu de marge au-dessus des titres de section (ex. `mt-4` ou `space-y-1` entre titre et premier item) pour clarifier la hiérarchie.
|
||||
|
||||
### 4.3 Cartes dashboard (Command Center)
|
||||
|
||||
- **Constat** : les quatre petites cartes (Tracks Listened, Messages Sent, etc.) sont serrées ; le texte et les pourcentages peuvent sembler denses.
|
||||
- **Fichiers** : vues dashboard qui utilisent `StatCard` ou équivalent ; grille (ex. `grid-cols-4`, `gap-6`).
|
||||
- **Recommandation** : garder les layout primitives (pas de valeurs arbitraires) mais ajuster `gap` ou `padding` des cartes pour plus de respiration (ex. `p-6` déjà présent, éventuellement `gap-8`).
|
||||
|
||||
---
|
||||
|
||||
## 5. Typographie
|
||||
|
||||
### 5.1 Police Rajdhani
|
||||
|
||||
- **Usage** : `--font-sans: 'Rajdhani', ...` dans `index.css` (@theme inline). Utilisée pour le corps et une grande partie de l’UI.
|
||||
- **Problème connu** : les erreurs console "downloadable font: Glyph bbox was incorrect" (Rajdhani) indiquent des **glyphes mal déclarés** dans le fichier de police. Conséquences possibles : rendu moins net, décalages, ou fallback partiel vers une autre police.
|
||||
- **Fichiers** : `index.html` (lien Google Fonts), `apps/web/src/index.css` (--font-sans).
|
||||
|
||||
**Recommandations :**
|
||||
|
||||
- Vérifier la source de la police (version, subset) et si possible utiliser une version mise à jour ou un autre fournisseur.
|
||||
- En parallèle, prévoir un fallback explicite (ex. `'Rajdhani', 'Inter', system-ui, sans-serif`) pour limiter les effets si Rajdhani pose problème.
|
||||
|
||||
### 5.2 Hiérarchie et lisibilité
|
||||
|
||||
- **Texte secondaire** : `text-muted-foreground` (oklch(0.70 0.01 265) en dark) peut être trop proche du fond sur certains écrans, ce qui réduit le contraste et la hiérarchie.
|
||||
- **Recommandation** : augmenter très légèrement la luminance ou le contraste de `--muted-foreground` en dark (ex. 0.72–0.75) et valider avec un outil WCAG.
|
||||
|
||||
---
|
||||
|
||||
## 6. Contrastes et accessibilité
|
||||
|
||||
### 6.1 Éléments à faible contraste
|
||||
|
||||
- **Bordures** : `border-white/5`, `border-white/10` — très subtiles, peu visibles pour certains utilisateurs.
|
||||
- **Icônes** : petites icônes en `text-muted-foreground` dans les cartes et le player ; contraste insuffisant pour une identification rapide.
|
||||
- **Progress bar** (player) : barre de progression très fine ; partie "remplie" (teal) lisible, mais le rail peut manquer de contraste.
|
||||
|
||||
**Recommandations :**
|
||||
|
||||
- Utiliser au minimum `border-white/10` pour les séparations importantes, et réserver `white/5` aux détails purement décoratifs.
|
||||
- Icônes secondaires : envisager une couleur un peu plus claire (ex. `text-foreground/70`) ou une taille légèrement supérieure pour les actions importantes.
|
||||
|
||||
### 6.2 Champs mot de passe en HTTP
|
||||
|
||||
- **Constat** : message navigateur "Password fields present on an insecure (http://) page" en dev. Ce n’est pas un problème de design mais de contexte (HTTPS en prod recommandé).
|
||||
|
||||
---
|
||||
|
||||
## 7. Cohérence et système de design
|
||||
|
||||
### 7.1 Double jeu de tokens (KŌDŌ vs design-tokens)
|
||||
|
||||
- **index.css** : variables type `--primary`, `--cyan-500`, `--card`, etc. (oklch).
|
||||
- **design-tokens.css** : variables `--kodo-void`, `--kodo-cyan`, `--kodo-text-dim`, etc. (rgb).
|
||||
- **Composants** : certains utilisent `primary` / `cyan-500`, d’autres `text-kodo-cyan`, `bg-kodo-steel`, etc. (ex. `StatCard.tsx` : `text-kodo-cyan`, `bg-kodo-steel/10`).
|
||||
- **Risque** : dérives de teintes entre les deux systèmes et maintenance plus difficile.
|
||||
|
||||
**Recommandation :**
|
||||
|
||||
- À moyen terme, unifier sur un seul jeu de tokens (idéalement celui de `index.css` étendu en Tailwind) et migrer progressivement les `kodo-*` vers les tokens sémantiques (primary, muted, etc.).
|
||||
|
||||
### 7.2 Variantes de cartes
|
||||
|
||||
- **card.tsx** propose plusieurs variants : `default`, `elevated`, `ghost`, `outline`, `muted`, `glass`, `interactive`, `glow`, `glowMagenta`, `spotlight`. L’usage de `glass` partout (admin, etc.) donne un rendu très uniforme et plat.
|
||||
- **Recommandation** : utiliser `default` ou `elevated` pour les cartes de contenu principal afin de retrouver un peu d’ombre et de relief, et réserver `glass` à des blocs spécifiques (panneaux, overlays).
|
||||
|
||||
---
|
||||
|
||||
## 8. Synthèse des actions prioritaires
|
||||
|
||||
| Priorité | Action | Fichier(s) principal(aux) |
|
||||
|----------|--------|----------------------------|
|
||||
| P0 | Unifier la couleur des badges sidebar (primary au lieu de secondary) | `Sidebar.tsx` |
|
||||
| P0 | Ne pas afficher le badge "0%" en rouge quand trend est 0 ou undefined ; style neutre ou masqué | `AdminDashboardStatCard.tsx` |
|
||||
| P1 | Donner plus de relief aux cartes (card vs background, bordure/ombre) | `index.css` (.dark), `card.tsx` |
|
||||
| P1 | Remplacer ou clarifier le graphique "Traffic Flux" (données réelles ou placeholder explicite) | `AdminDashboardTrafficCard.tsx` |
|
||||
| P1 | Vérifier l’affichage "Sign In" quand l’utilisateur est connecté | `Header.tsx`, `Navbar.tsx` |
|
||||
| P2 | Réduire la prééminence visuelle du player (hauteur, taille du bouton play) | `MiniPlayer.tsx`, `PlayerControls.tsx` |
|
||||
| P2 | Améliorer l’affordance de la barre de recherche (fond/bordure) | `Header.tsx` |
|
||||
| P2 | Renforcer le contraste du texte secondaire et des bordures en dark | `index.css` |
|
||||
| P3 | Unifier les tokens (kodo-* vs primary/muted) et documenter la charte | `design-tokens.css`, `index.css`, composants |
|
||||
| P3 | Corriger ou contourner les glyphes Rajdhani (source font, fallback) | `index.html`, `index.css` |
|
||||
|
||||
---
|
||||
|
||||
## 9. Fichiers modifiables (référence rapide)
|
||||
|
||||
- **Couleurs / thème** : `apps/web/src/index.css` (variables :root et .dark).
|
||||
- **Sidebar** : `apps/web/src/components/layout/Sidebar.tsx`.
|
||||
- **Header** : `apps/web/src/components/layout/Header.tsx`.
|
||||
- **Cartes** : `apps/web/src/components/ui/card.tsx` ; `AdminDashboardStatCard.tsx`, `AdminDashboardTrafficCard.tsx`.
|
||||
- **Player** : `apps/web/src/components/player/MiniPlayer.tsx`, `PlayerControls.tsx`, `PlayPauseButton.tsx`.
|
||||
- **Dashboard** : `apps/web/src/components/admin/admin-dashboard-view/AdminDashboardView.tsx`, `useAdminDashboardView.ts`.
|
||||
- **Typographie** : `apps/web/index.html` (fonts), `apps/web/src/index.css` (--font-sans, @layer base).
|
||||
|
||||
Cet audit peut servir de base pour des tickets (P0 → P3) et pour une checklist avant refonte visuelle plus large.
|
||||
111
docs/MONOREPO_ORCHESTRATION.md
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
# Orchestration et gestion du monorepo Veza
|
||||
|
||||
Ce document décrit la structure du Makefile modulaire et recommande des outils open source pour orchestrer le projet pendant son développement.
|
||||
|
||||
---
|
||||
|
||||
## 1. Structure actuelle du Makefile
|
||||
|
||||
### Organisation
|
||||
|
||||
| Fichier | Rôle |
|
||||
|--------|------|
|
||||
| **Makefile** | Point d’entrée unique ; inclut les fragments et définit le goal par défaut (`help`) |
|
||||
| **make/config.mk** | **Source de vérité** : services, ports, chemins (SERVICE_DIR_*, PORT_*, COMPOSE_*, ROOT, etc.) |
|
||||
| **make/ui.mk** | Couleurs et `ECHO_CMD` |
|
||||
| **make/help.mk** | Cible `help` et dashboard |
|
||||
| **make/tools.mk** | `check-tools`, `install-tools`, `install-deps`, `check-ports` |
|
||||
| **make/infra.mk** | `infra-up`, `infra-down`, `db-migrate`, `db-shell`, `redis-shell` |
|
||||
| **make/dev.mk** | `dev`, `dev-backend`, `dev-web`, `dev-<service>`, `stop-local-services` |
|
||||
| **make/build.mk** | `build-*`, `build-all`, `build-all-native`, `build-service` |
|
||||
| **make/test.mk** | `test`, `test-<service>`, `lint`, `lint-<service>`, `fmt`, `status` |
|
||||
| **make/services.mk** | `start-service`, `stop-service`, `restart-service`, `logs-service` (Docker) |
|
||||
| **make/high.mk** | `setup`, `stop-all`, `clean`, `deploy-docker`, `deploy-incus`, `status-full`, `web-minimal` |
|
||||
| **make/incus.mk** | Toutes les cibles Incus (network, deploy, start, stop, status, logs) |
|
||||
|
||||
### Personnalisation
|
||||
|
||||
- **Ajouter un service** : dans `make/config.mk`, ajouter le nom dans `SERVICES`, puis définir `SERVICE_DIR_<nom>` et `PORT_<nom>` si besoin. Adapter les règles dans les `.mk` qui font du case-by-service (ex. `dev.mk`, `build.mk`).
|
||||
- **Changer un port** : dans `make/config.mk` ou via `.env` (ex. `PORT_web=3000`).
|
||||
- **Changer un chemin** : modifier `SERVICE_DIR_<service>` dans `make/config.mk`.
|
||||
- **Override global** : utiliser `.env` (chargé par `config.mk` avec `-include .env`).
|
||||
|
||||
### Cibles par service
|
||||
|
||||
- `make dev-web`, `make dev-backend-api`, `make dev-chat-server`, `make dev-stream-server`
|
||||
- `make test-web`, `make test-backend-api`, …
|
||||
- `make lint-web`, `make lint-backend-api`, …
|
||||
- `make build-service SERVICE=backend-api`
|
||||
|
||||
---
|
||||
|
||||
## 2. Outils open source recommandés
|
||||
|
||||
En complément (ou en partie en remplacement) du Makefile, ces outils peuvent aider à orchestrer le monorepo.
|
||||
|
||||
### 2.1 Turborepo (JS/TS)
|
||||
|
||||
- **Site** : [turbo.build](https://turbo.build)
|
||||
- **Rôle** : Cache des tâches et pipeline de build/test/lint pour les workspaces npm.
|
||||
- **Atouts** : Cache distant optionnel, parallélisation, `turbo run build --filter=./apps/web`, intégration CI simple.
|
||||
- **Limites** : Ciblé npm/pnpm workspaces (apps/web, packages/design-system). Les backends Go/Rust restent gérés par le Makefile ou des scripts.
|
||||
- **Usage typique** : `turbo run build`, `turbo run test --filter=web`, `turbo run lint`.
|
||||
|
||||
À envisager si tu veux un cache et un graphe de tâches fiable pour la partie Node/TS, tout en gardant Make pour l’infra et les backends.
|
||||
|
||||
---
|
||||
|
||||
### 2.2 Nx
|
||||
|
||||
- **Site** : [nx.dev](https://nx.dev)
|
||||
- **Rôle** : Monorepo “full stack” : graphe de dépendances, cache, affected (build/test uniquement sur ce qui a changé), plugins (React, Node, Go, etc.).
|
||||
- **Atouts** : Très flexible, “affected” puissant, support multi-langage (y compris Go via plugins ou custom targets).
|
||||
- **Limites** : Plus lourd à configurer et à maintenir que Turborepo ; courbe d’apprentissage plus forte.
|
||||
|
||||
Utile si le monorepo grossit encore et que tu veux “affected”, cache partagé et une seule interface pour build/test/lint (y compris backends).
|
||||
|
||||
---
|
||||
|
||||
### 2.3 Just (command runner)
|
||||
|
||||
- **Site** : [github.com/casey/just](https://github.com/casey/just)
|
||||
- **Rôle** : Remplacer une partie des cibles Make par un fichier `justfile` (syntaxe plus lisible, arguments nommés, pas de tabs).
|
||||
- **Atouts** : Scripts lisibles, facile à partager entre devs, multiplateforme (Windows possible).
|
||||
- **Limites** : Pas de vrai “graphe de tâches” ni cache ; plutôt un complément ou un remplacement léger du Make pour les commandes métier.
|
||||
|
||||
Exemple : `just dev`, `just test web`, `just deploy docker`.
|
||||
|
||||
---
|
||||
|
||||
### 2.4 Mise (ex-asdf)
|
||||
|
||||
- **Site** : [mise.jdx.dev](https://mise.jdx.dev)
|
||||
- **Rôle** : Gestion des versions des runtimes (Node, Go, Rust, Python) au niveau du repo ou de la machine.
|
||||
- **Atouts** : Un seul outil pour `.node-version`, `.go-version`, etc. ; reproductibilité des envs entre dev et CI.
|
||||
- **Limites** : Ne remplace pas le Makefile ; il assure seulement que les bonnes versions sont actives.
|
||||
|
||||
Recommandé pour figer Node/Go/Rust et éviter les “ça marche chez moi”.
|
||||
|
||||
---
|
||||
|
||||
### 2.5 Tâche (Task)
|
||||
|
||||
- **Site** : [taskfile.dev](https://taskfile.dev)
|
||||
- **Rôle** : Alternative au Make en YAML, avec variables, includes, dépendances entre tâches.
|
||||
- **Atouts** : Syntaxe YAML, parallélisation, bon pour des pipelines déclaratifs.
|
||||
- **Limites** : Un outil de plus ; si le Make modulaire te convient, pas obligatoire.
|
||||
|
||||
---
|
||||
|
||||
## 3. Synthèse
|
||||
|
||||
| Besoin | Outil suggéré |
|
||||
|--------|----------------|
|
||||
| Un seul endroit pour config (ports, services, chemins) | **make/config.mk** (déjà en place) |
|
||||
| Cache + pipeline pour JS/TS uniquement | **Turborepo** |
|
||||
| Affected + cache + multi-langage (y compris Go/Rust) | **Nx** |
|
||||
| Commandes lisibles, peu de dépendance à Make | **Just** |
|
||||
| Versions Node/Go/Rust reproductibles | **Mise** |
|
||||
| Tout garder en Make mais mieux structuré | **Makefile + make/*.mk** (actuel) |
|
||||
|
||||
Recommandation pragmatique : garder le **Makefile modulaire** comme entrée principale (infra, dev, deploy, Incus). Si la partie frontend/npm devient la plus coûteuse (builds, tests, lint), ajouter **Turborepo** pour les workspaces npm et appeler `turbo` depuis le Makefile si besoin (ex. `make test` → infra-up + turbo run test + tests Go/Rust). Pour les versions des runtimes, **Mise** (ou asdf) dans le repo et en CI améliore la reproductibilité sans toucher au reste.
|
||||
94
go.work.sum
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
|
||||
github.com/Microsoft/hcsshim v0.11.5/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU=
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
|
||||
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
|
||||
github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
|
||||
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
|
||||
github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM=
|
||||
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||
github.com/containerd/imgcrypt v1.1.8/go.mod h1:x6QvFIkMyO2qGIY2zXc88ivEzcbgvLdWjoZyGqDap5U=
|
||||
github.com/containerd/nri v0.6.1/go.mod h1:7+sX3wNx+LR7RzhjnJiUkFDhn18P5Bg/0VnJ/uXpRJM=
|
||||
github.com/containerd/ttrpc v1.2.4/go.mod h1:ojvb8SJBSch0XkqNO0L0YX/5NxR3UnVk2LzFKBK0upc=
|
||||
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
||||
github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
|
||||
github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE=
|
||||
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
|
||||
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
|
||||
github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/dhowden/itl v0.0.0-20170329215456-9fbe21093131/go.mod h1:eVWQJVQ67aMvYhpkDwaH2Goy2vo6v8JCMfGXfQ9sPtw=
|
||||
github.com/dhowden/plist v0.0.0-20141002110153-5db6e0d9931a/go.mod h1:sLjdR6uwx3L6/Py8F+QgAfeiuY87xuYGwCDqRFrvCzw=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
||||
github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
|
||||
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0/go.mod h1:vsh3ySueQCiKPxFLvjWC4Z135gIa34TQ/NSqkDTZYUM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
|
||||
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU=
|
||||
k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
|
||||
k8s.io/apiserver v0.26.2/go.mod h1:GHcozwXgXsPuOJ28EnQ/jXEM9QeG6HT22YxSNmpYNh8=
|
||||
k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU=
|
||||
k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs=
|
||||
k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0=
|
||||
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
tags.cncf.io/container-device-interface v0.7.2/go.mod h1:Xb1PvXv2BhfNb3tla4r9JL129ck1Lxv9KuU6eVOfKto=
|
||||
tags.cncf.io/container-device-interface/specs-go v0.7.0/go.mod h1:hMAwAbMZyBLdmYqWgYcKH0F/yctNpV3P35f+/088A80=
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
export type {
|
||||
CheckoutViewProps,
|
||||
CheckoutFormState,
|
||||
} from './types';
|
||||
export { CheckoutView } from './CheckoutView';
|
||||
export { CheckoutViewSkeleton } from './CheckoutViewSkeleton';
|
||||
export { useCheckoutView } from './useCheckoutView';
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
import type { Meta, StoryObj } from '@storybook/react';
|
||||
import { http, HttpResponse } from 'msw';
|
||||
import {
|
||||
PlaybackHeatmap,
|
||||
PlaybackHeatmapSkeleton,
|
||||
} from './PlaybackHeatmap';
|
||||
import type { PlaybackHeatmapData } from './playback-heatmap';
|
||||
|
||||
const meta: Meta<typeof PlaybackHeatmap> = {
|
||||
title: 'Components/Features/Streaming/PlaybackHeatmap',
|
||||
component: PlaybackHeatmap,
|
||||
parameters: { layout: 'padded' },
|
||||
tags: ['autodocs'],
|
||||
};
|
||||
|
||||
export default meta;
|
||||
|
||||
type Story = StoryObj<typeof meta>;
|
||||
|
||||
const mockHeatmap: PlaybackHeatmapData = {
|
||||
track_id: '123',
|
||||
track_duration: 180,
|
||||
segment_size: 5,
|
||||
total_sessions: 10,
|
||||
max_intensity: 1.0,
|
||||
generated_at: '2024-01-01T00:00:00Z',
|
||||
segments: [
|
||||
{ start_time: 0, end_time: 5, listen_count: 10, skip_count: 0, intensity: 1.0, average_play_time: 5 },
|
||||
{ start_time: 5, end_time: 10, listen_count: 8, skip_count: 2, intensity: 0.8, average_play_time: 4 },
|
||||
{ start_time: 10, end_time: 15, listen_count: 5, skip_count: 3, intensity: 0.5, average_play_time: 2.5 },
|
||||
],
|
||||
};
|
||||
|
||||
/** Données chargées via MSW (GET /api/v1/tracks/:id/playback/heatmap). */
|
||||
export const Default: Story = {
|
||||
args: { trackId: '123' },
|
||||
};
|
||||
|
||||
/** Taille de segment personnalisée */
|
||||
export const CustomSegmentSize: Story = {
|
||||
args: { trackId: '123', segmentSize: 10 },
|
||||
};
|
||||
|
||||
/** État de chargement (skeleton) */
|
||||
export const Loading: Story = {
|
||||
name: 'Chargement',
|
||||
render: () => <PlaybackHeatmapSkeleton />,
|
||||
};
|
||||
|
||||
/** Données vides (aucun segment) */
|
||||
export const Empty: Story = {
|
||||
name: 'Empty',
|
||||
args: {
|
||||
trackId: '123',
|
||||
initialHeatmap: {
|
||||
...mockHeatmap,
|
||||
segments: [],
|
||||
total_sessions: 0,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/** Erreur chargement */
|
||||
export const Error: Story = {
|
||||
name: 'Error',
|
||||
parameters: {
|
||||
msw: {
|
||||
handlers: [
|
||||
http.get('*/api/v1/tracks/:id/playback/heatmap', () =>
|
||||
HttpResponse.json(
|
||||
{ success: false, error: { message: 'Failed to load heatmap' } },
|
||||
{ status: 500 },
|
||||
),
|
||||
),
|
||||
],
|
||||
},
|
||||
},
|
||||
args: { trackId: '123' },
|
||||
};
|
||||
46
make/build.mk
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
# ==============================================================================
|
||||
# BUILD (Docker images and native for Incus)
|
||||
# ==============================================================================
|
||||
|
||||
.PHONY: build-backend-api build-chat-server build-stream-server build-web
|
||||
.PHONY: build-all build-all-native build-service
|
||||
|
||||
build-backend-api: ## [LOW] Build Go backend Docker image
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building backend-api...${NC}"
|
||||
@docker build -t $(PROJECT_NAME)-backend-api:latest -f $(ROOT)/$(SERVICE_DIR_backend-api)/Dockerfile.production $(ROOT)/$(SERVICE_DIR_backend-api) || \
|
||||
($(ECHO_CMD) "${YELLOW}Using local Dockerfile...${NC}" && \
|
||||
docker build -t $(PROJECT_NAME)-backend-api:latest -f $(ROOT)/$(SERVICE_DIR_backend-api)/Dockerfile $(ROOT)/$(SERVICE_DIR_backend-api))
|
||||
|
||||
build-chat-server: ## [LOW] Build Rust chat server Docker image
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building chat-server...${NC}"
|
||||
@docker build -t $(PROJECT_NAME)-chat-server:latest -f $(ROOT)/$(SERVICE_DIR_chat-server)/Dockerfile.production $(ROOT)/$(SERVICE_DIR_chat-server) || \
|
||||
docker build -t $(PROJECT_NAME)-chat-server:latest -f $(ROOT)/$(SERVICE_DIR_chat-server)/Dockerfile $(ROOT)/$(SERVICE_DIR_chat-server))
|
||||
|
||||
build-stream-server: ## [LOW] Build Rust stream server Docker image
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building stream-server...${NC}"
|
||||
@docker build -t $(PROJECT_NAME)-stream-server:latest -f $(ROOT)/$(SERVICE_DIR_stream-server)/Dockerfile.production $(ROOT)/$(SERVICE_DIR_stream-server) || \
|
||||
docker build -t $(PROJECT_NAME)-stream-server:latest -f $(ROOT)/$(SERVICE_DIR_stream-server)/Dockerfile $(ROOT)/$(SERVICE_DIR_stream-server))
|
||||
|
||||
build-web: ## [LOW] Build web frontend Docker image
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building web...${NC}"
|
||||
@docker build -t $(PROJECT_NAME)-web:latest -f $(ROOT)/$(SERVICE_DIR_web)/Dockerfile.production $(ROOT)/$(SERVICE_DIR_web) || \
|
||||
docker build -t $(PROJECT_NAME)-web:latest -f $(ROOT)/$(SERVICE_DIR_web)/Dockerfile $(ROOT)/$(SERVICE_DIR_web))
|
||||
|
||||
build-all: ## [MID] Build all services (Docker images)
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building all services...${NC}"
|
||||
@$(MAKE) -s build-backend-api
|
||||
@$(MAKE) -s build-chat-server
|
||||
@$(MAKE) -s build-stream-server
|
||||
@$(MAKE) -s build-web
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services built.${NC}"
|
||||
|
||||
build-all-native: ## [MID] Build all services natively (for Incus)
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building all services natively...${NC}"
|
||||
@$(INCUS_SCRIPTS)/build-native.sh all
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services built natively.${NC}"
|
||||
|
||||
build-service: ## [MID] Build a specific service (usage: make build-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then $(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; exit 1; fi
|
||||
@$(ECHO_CMD) "${BLUE}🔨 Building $(SERVICE)...${NC}"
|
||||
@$(MAKE) -s build-$(SERVICE)
|
||||
@$(ECHO_CMD) "${GREEN}✅ $(SERVICE) built.${NC}"
|
||||
64
make/config.mk
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
# ==============================================================================
|
||||
# VEZA MONOREPO - CONFIGURATION (single source of truth)
|
||||
# ==============================================================================
|
||||
# Edit this file to add services, change ports, or paths.
|
||||
# Override via .env or environment (e.g. PORT_WEB=3000 make dev).
|
||||
# ==============================================================================
|
||||
|
||||
-include .env
|
||||
|
||||
# --- Project ---
|
||||
PROJECT_NAME ?= veza
|
||||
ROOT ?= $(CURDIR)
|
||||
|
||||
# --- Compose ---
|
||||
COMPOSE_FILE ?= docker-compose.yml
|
||||
COMPOSE_PROD ?= docker-compose.prod.yml
|
||||
|
||||
# --- Services (space-separated; must match keys in SERVICE_DIRS / SERVICE_PORTS)
|
||||
SERVICES := backend-api chat-server stream-server web haproxy
|
||||
INFRA_SERVICES := postgres redis rabbitmq
|
||||
|
||||
# --- Service → Directory mapping (customize paths here)
|
||||
SERVICE_DIR_backend-api := veza-backend-api
|
||||
SERVICE_DIR_chat-server := veza-chat-server
|
||||
SERVICE_DIR_stream-server := veza-stream-server
|
||||
SERVICE_DIR_web := apps/web
|
||||
SERVICE_DIR_haproxy :=
|
||||
|
||||
# --- Ports (override with PORT_<SERVICE>=... from .env)
|
||||
PORT_backend-api ?= 8080
|
||||
PORT_chat-server ?= 3000
|
||||
PORT_stream-server ?= 3001
|
||||
PORT_web ?= 5173
|
||||
PORT_haproxy ?= 80
|
||||
|
||||
# Legacy names for backward compatibility
|
||||
PORT_GO ?= $(PORT_backend-api)
|
||||
PORT_CHAT ?= $(PORT_chat-server)
|
||||
PORT_STREAM ?= $(PORT_stream-server)
|
||||
PORT_WEB ?= $(PORT_web)
|
||||
PORT_HAPROXY ?= $(PORT_haproxy)
|
||||
|
||||
# --- Database & Infra ---
|
||||
DB_USER ?= veza
|
||||
DB_PASS ?= password
|
||||
DB_NAME ?= veza
|
||||
DB_HOST ?= localhost
|
||||
DB_PORT ?= 5432
|
||||
|
||||
DATABASE_URL = postgres://$(DB_USER):$(DB_PASS)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)?sslmode=disable
|
||||
REDIS_URL = redis://localhost:6379
|
||||
AMQP_URL = amqp://$(DB_USER):$(DB_PASS)@localhost:5672
|
||||
|
||||
# --- Incus ---
|
||||
DEPLOY_TARGET ?= docker
|
||||
INCUS_PROFILE ?= veza-profile
|
||||
INCUS_NETWORK ?= veza-network
|
||||
INCUS_SCRIPTS ?= $(ROOT)/config/incus
|
||||
|
||||
# --- NPM workspaces (from root package.json; used for install-deps / lint scope)
|
||||
NPM_WORKSPACES ?= apps/web packages/design-system
|
||||
|
||||
# --- Scripts ---
|
||||
SCRIPTS ?= $(ROOT)/scripts
|
||||
80
make/dev.mk
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
# ==============================================================================
|
||||
# DEVELOPMENT (local run with optional hot reload)
|
||||
# ==============================================================================
|
||||
|
||||
.PHONY: dev dev-backend dev-web dev-backend-api dev-chat-server dev-stream-server
|
||||
.PHONY: stop-local-services start-local-service stop-local-service
|
||||
|
||||
dev: check-ports infra-up ## [HIGH] Start Everything (Detects Hot Reload tools)
|
||||
@$(ECHO_CMD) "${BOLD}${PURPLE}🚀 STARTING HYBRID DEV ENVIRONMENT${NC}"
|
||||
@$(ECHO_CMD) " Go: http://localhost:$(PORT_backend-api)"
|
||||
@$(ECHO_CMD) " Chat: http://localhost:$(PORT_chat-server)"
|
||||
@$(ECHO_CMD) " Web: http://localhost:$(PORT_web)"
|
||||
@$(ECHO_CMD) "${YELLOW}Hit Ctrl+C to stop all.${NC}"
|
||||
@(trap 'kill 0' SIGINT; \
|
||||
if command -v air >/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN}[Go] Hot Reload Active (Air)${NC}" && cd $(ROOT)/$(SERVICE_DIR_backend-api) && air & \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW}[Go] Standard Run${NC}" && cd $(ROOT)/$(SERVICE_DIR_backend-api) && go run cmd/modern-server/main.go & \
|
||||
fi; \
|
||||
if command -v cargo-watch >/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN}[Chat] Hot Reload Active${NC}" && cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo watch -x run -q & \
|
||||
$(ECHO_CMD) "${GREEN}[Stream] Hot Reload Active${NC}" && cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo watch -x run -q & \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW}[Chat] Standard Run${NC}" && cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo run -q & \
|
||||
$(ECHO_CMD) "${YELLOW}[Stream] Standard Run${NC}" && cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo run -q & \
|
||||
fi; \
|
||||
$(ECHO_CMD) "${GREEN}[Web] Starting Vite...${NC}" && cd $(ROOT)/$(SERVICE_DIR_web) && npm run dev & \
|
||||
wait)
|
||||
|
||||
dev-backend: check-ports infra-up ## [MID] Start Backends Only (Hot Reload supported)
|
||||
@$(ECHO_CMD) "${BOLD}${PURPLE}🚀 STARTING BACKEND ONLY${NC}"
|
||||
@(trap 'kill 0' SIGINT; \
|
||||
if command -v air >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_backend-api) && air & else cd $(ROOT)/$(SERVICE_DIR_backend-api) && go run cmd/modern-server/main.go & fi; \
|
||||
if command -v cargo-watch >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo watch -x run -q & else cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo run -q & fi; \
|
||||
if command -v cargo-watch >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo watch -x run -q & else cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo run -q & fi; \
|
||||
wait)
|
||||
|
||||
dev-web: check-ports infra-up ## [MID] Start Web app only (assumes backend elsewhere or mocked)
|
||||
@$(ECHO_CMD) "${GREEN}[Web] Starting Vite...${NC}"
|
||||
@cd $(ROOT)/$(SERVICE_DIR_web) && npm run dev
|
||||
|
||||
dev-backend-api: check-ports infra-up ## [MID] Start Go backend only
|
||||
@$(ECHO_CMD) "${GREEN}[Backend API] Starting...${NC}"
|
||||
@if command -v air >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_backend-api) && air; else cd $(ROOT)/$(SERVICE_DIR_backend-api) && go run cmd/modern-server/main.go; fi
|
||||
|
||||
dev-chat-server: check-ports infra-up ## [MID] Start Chat server only
|
||||
@$(ECHO_CMD) "${GREEN}[Chat] Starting...${NC}"
|
||||
@if command -v cargo-watch >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo watch -x run -q; else cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo run -q; fi
|
||||
|
||||
dev-stream-server: check-ports infra-up ## [MID] Start Stream server only
|
||||
@$(ECHO_CMD) "${GREEN}[Stream] Starting...${NC}"
|
||||
@if command -v cargo-watch >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo watch -x run -q; else cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo run -q; fi
|
||||
|
||||
stop-local-services: ## [LOW] Stop all local processes (air, cargo watch, vite)
|
||||
@pkill -f "air\|cargo watch\|npm run dev\|go run.*modern-server" 2>/dev/null || true
|
||||
|
||||
start-local-service: ## [LOW] Start a service locally (usage: make start-local-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then $(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; exit 1; fi
|
||||
@$(ECHO_CMD) "${BLUE}🚀 Starting $(SERVICE)...${NC}"
|
||||
@case "$(SERVICE)" in \
|
||||
backend-api) \
|
||||
if command -v air >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_backend-api) && air & else cd $(ROOT)/$(SERVICE_DIR_backend-api) && go run cmd/modern-server/main.go & fi ;; \
|
||||
chat-server) \
|
||||
if command -v cargo-watch >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo watch -x run -q & else cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo run -q & fi ;; \
|
||||
stream-server) \
|
||||
if command -v cargo-watch >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo watch -x run -q & else cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo run -q & fi ;; \
|
||||
web) \
|
||||
cd $(ROOT)/$(SERVICE_DIR_web) && npm run dev & ;; \
|
||||
*) \
|
||||
$(ECHO_CMD) "${RED}Unknown service: $(SERVICE)${NC}"; exit 1 ;; \
|
||||
esac
|
||||
|
||||
stop-local-service: ## [LOW] Stop a local service (usage: make stop-local-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then $(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; exit 1; fi
|
||||
@case "$(SERVICE)" in \
|
||||
backend-api) pkill -f "air\|go run.*modern-server" 2>/dev/null || true ;; \
|
||||
chat-server|stream-server) pkill -f "cargo.*$(SERVICE)" 2>/dev/null || true ;; \
|
||||
web) pkill -f "npm run dev\|vite" 2>/dev/null || true ;; \
|
||||
*) $(ECHO_CMD) "${RED}Unknown service: $(SERVICE)${NC}" ;; \
|
||||
esac
|
||||
27
make/help.mk
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
# ==============================================================================
|
||||
# HELP & DASHBOARD
|
||||
# ==============================================================================
|
||||
|
||||
.PHONY: help
|
||||
help: ## [HIGH] Show this dashboard
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}${PURPLE}⚡ VEZA MONOREPO CLI ⚡${NC}"
|
||||
@$(ECHO_CMD) "================================================================="
|
||||
@$(ECHO_CMD) "${BOLD}INFRASTRUCTURE:${NC}"
|
||||
@printf " ${CYAN}%-15s${NC} %s\n" "Postgres" "$(DATABASE_URL)"
|
||||
@printf " ${CYAN}%-15s${NC} %s\n" "Redis" "$(REDIS_URL)"
|
||||
@printf " ${CYAN}%-15s${NC} %s\n" "RabbitMQ" "UI: http://localhost:15672 (veza/password)"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}${GREEN}HIGH LEVEL:${NC}"
|
||||
@grep -h -E '^[a-zA-Z0-9_-]+:.*?## \[HIGH\] .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " ${YELLOW}%-25s${NC} %s\n", $$1, $$2}'
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}${BLUE}INTERMEDIATE:${NC}"
|
||||
@grep -h -E '^[a-zA-Z0-9_-]+:.*?## \[MID\] .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " ${CYAN}%-25s${NC} %s\n", $$1, $$2}'
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}${PURPLE}LOW LEVEL / DEBUG:${NC}"
|
||||
@grep -h -E '^[a-zA-Z0-9_-]+:.*?## \[LOW\] .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " ${PURPLE}%-25s${NC} %s\n", $$1, $$2}'
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}PER-SERVICE (e.g. make dev-web, make test-backend-api):${NC}"
|
||||
@$(ECHO_CMD) " ${CYAN}dev-<service>${NC} test-<service> lint-<service> build-<service>"
|
||||
@$(ECHO_CMD) " Services: backend-api, chat-server, stream-server, web"
|
||||
@$(ECHO_CMD) ""
|
||||
77
make/high.mk
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
# ==============================================================================
|
||||
# HIGH LEVEL: setup, stop-all, clean, deploy, status
|
||||
# ==============================================================================
|
||||
|
||||
.PHONY: setup stop-all restart-all clean clean-deep deploy-docker deploy-incus status-full
|
||||
.PHONY: web-minimal stop-minimal
|
||||
|
||||
setup: check-tools install-tools install-deps ## [HIGH] Full project initialization
|
||||
@$(ECHO_CMD) "${BOLD}${GREEN}✅ Setup Complete! Ready to rock with 'make dev'.${NC}"
|
||||
|
||||
web-minimal: ## [HIGH] Start Veza Web Minimal Journey (Backend + Frontend + DB)
|
||||
@$(SCRIPTS)/start_minimal.sh
|
||||
|
||||
stop-minimal: ## [HIGH] Stop Minimal Stack
|
||||
@$(SCRIPTS)/stop_minimal.sh
|
||||
|
||||
stop-all: ## [HIGH] Stop all services (Docker + Local)
|
||||
@$(ECHO_CMD) "${RED}🛑 Stopping all services...${NC}"
|
||||
@docker compose -f $(COMPOSE_FILE) down 2>/dev/null || true
|
||||
@docker compose -f $(COMPOSE_PROD) down 2>/dev/null || true
|
||||
@$(MAKE) -s stop-local-services
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services stopped.${NC}"
|
||||
|
||||
restart-all: stop-all ## [HIGH] Restart all services
|
||||
@$(ECHO_CMD) "${BLUE}🔄 Restarting all services...${NC}"
|
||||
@$(MAKE) -s infra-up
|
||||
@$(MAKE) -s dev
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services restarted.${NC}"
|
||||
|
||||
clean: ## [HIGH] Clean build artifacts and caches
|
||||
@$(ECHO_CMD) "${YELLOW}🧹 Cleaning build artifacts...${NC}"
|
||||
@rm -rf $(ROOT)/$(SERVICE_DIR_web)/node_modules/.cache
|
||||
@rm -rf $(ROOT)/$(SERVICE_DIR_chat-server)/target/debug $(ROOT)/$(SERVICE_DIR_stream-server)/target/debug
|
||||
@find $(ROOT) -type d -name "node_modules" -prune -o -type f -name "*.log" -delete 2>/dev/null || true
|
||||
@$(ECHO_CMD) "${GREEN}✅ Clean complete.${NC}"
|
||||
|
||||
clean-deep: ## [HIGH] ⚠️ Nuclear Clean (Confirm required)
|
||||
@read -p "${RED}Are you sure? This will delete ALL builds, volumes, and caches! [y/N]${NC} " ans && [ $${ans:-N} = y ]
|
||||
@$(ECHO_CMD) "${RED}☢️ DESTROYING ARTIFACTS...${NC}"
|
||||
@rm -rf $(ROOT)/$(SERVICE_DIR_web)/node_modules
|
||||
@rm -rf $(ROOT)/$(SERVICE_DIR_chat-server)/target $(ROOT)/$(SERVICE_DIR_stream-server)/target
|
||||
@docker compose -f $(COMPOSE_FILE) down -v 2>/dev/null || true
|
||||
@docker compose -f $(COMPOSE_PROD) down -v 2>/dev/null || true
|
||||
@$(ECHO_CMD) "${GREEN}System Cleaned.${NC}"
|
||||
|
||||
deploy-docker: build-all ## [HIGH] Deploy all services with Docker + HAProxy
|
||||
@$(ECHO_CMD) "${BOLD}${BLUE}🐳 Deploying with Docker...${NC}"
|
||||
@docker compose -f $(COMPOSE_PROD) up -d --build
|
||||
@$(MAKE) -s wait-for-services
|
||||
@$(ECHO_CMD) "${GREEN}✅ Deployment complete! Access via http://localhost:$(PORT_haproxy)${NC}"
|
||||
|
||||
deploy-incus: build-all-native ## [HIGH] Deploy all services with Incus containers (native, no Docker)
|
||||
@$(ECHO_CMD) "${BOLD}${BLUE}📦 Deploying with Incus (native)...${NC}"
|
||||
@$(MAKE) -s incus-setup-network
|
||||
@$(MAKE) -s incus-deploy-infra
|
||||
@$(MAKE) -s incus-deploy-all-native
|
||||
@$(MAKE) -s incus-start-all
|
||||
@$(ECHO_CMD) "${GREEN}✅ Incus deployment complete!${NC}"
|
||||
@$(ECHO_CMD) "${BLUE}Access services at:${NC}"
|
||||
@$(ECHO_CMD) " Backend API: http://10.10.10.2:8080"
|
||||
@$(ECHO_CMD) " Chat Server: http://10.10.10.3:8081"
|
||||
@$(ECHO_CMD) " Stream Server: http://10.10.10.4:3002"
|
||||
@$(ECHO_CMD) " Web Frontend: http://10.10.10.5:80"
|
||||
@$(ECHO_CMD) " HAProxy: http://10.10.10.6:80"
|
||||
|
||||
status-full: ## [HIGH] Show complete system status
|
||||
@$(ECHO_CMD) "${BOLD}${CYAN}📊 SYSTEM STATUS${NC}"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}Docker Containers:${NC}"
|
||||
@docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -E "NAME|veza" || echo " No containers running"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}Local Processes:${NC}"
|
||||
@lsof -i :$(PORT_backend-api) -i :$(PORT_chat-server) -i :$(PORT_stream-server) -i :$(PORT_web) 2>/dev/null | grep LISTEN || echo " No local processes"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}Incus Containers:${NC}"
|
||||
@incus list veza- 2>/dev/null | grep -E "NAME|veza" || echo " No Incus containers"
|
||||
@$(ECHO_CMD) ""
|
||||
201
make/incus.mk
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
# ==============================================================================
|
||||
# INCUS / LXD DEPLOYMENT
|
||||
# ==============================================================================
|
||||
|
||||
.PHONY: incus-setup-network incus-deploy-all incus-deploy-all-native incus-deploy-service incus-deploy-service-native incus-deploy-infra incus-start-all incus-stop-all incus-status incus-logs
|
||||
|
||||
incus-setup-network: ## [LOW] Setup Incus network profile
|
||||
@$(ECHO_CMD) "${BLUE}📦 Setting up Incus network...${NC}"
|
||||
@if ! incus network show $(INCUS_NETWORK) >/dev/null 2>&1; then \
|
||||
$(ECHO_CMD) "Creating network $(INCUS_NETWORK)..."; \
|
||||
incus network create $(INCUS_NETWORK) \
|
||||
ipv4.address=10.10.10.1/24 \
|
||||
ipv4.nat=true \
|
||||
ipv4.dhcp=true \
|
||||
dns.mode=managed \
|
||||
dns.nameservers=8.8.8.8,1.1.1.1; \
|
||||
else \
|
||||
$(ECHO_CMD) "Updating network configuration..."; \
|
||||
incus network set $(INCUS_NETWORK) ipv4.dhcp=true 2>/dev/null || true; \
|
||||
incus network set $(INCUS_NETWORK) dns.mode=managed 2>/dev/null || true; \
|
||||
incus network set $(INCUS_NETWORK) dns.nameservers=8.8.8.8,1.1.1.1 2>/dev/null || true; \
|
||||
fi
|
||||
@if ! incus profile show $(INCUS_PROFILE) >/dev/null 2>&1; then \
|
||||
$(ECHO_CMD) "Creating profile $(INCUS_PROFILE)..."; \
|
||||
incus profile create $(INCUS_PROFILE); \
|
||||
incus profile device add $(INCUS_PROFILE) root disk path=/ pool=default 2>/dev/null || \
|
||||
incus profile device add $(INCUS_PROFILE) root disk path=/ 2>/dev/null || true; \
|
||||
incus profile device add $(INCUS_PROFILE) eth0 nic network=$(INCUS_NETWORK) 2>/dev/null || true; \
|
||||
else \
|
||||
$(ECHO_CMD) "Ensuring profile devices..."; \
|
||||
if ! incus profile show $(INCUS_PROFILE) | grep -q "root:"; then \
|
||||
incus profile device add $(INCUS_PROFILE) root disk path=/ pool=default 2>/dev/null || \
|
||||
incus profile device add $(INCUS_PROFILE) root disk path=/ 2>/dev/null || true; \
|
||||
fi; \
|
||||
if ! incus profile show $(INCUS_PROFILE) | grep -q "eth0:"; then \
|
||||
incus profile device add $(INCUS_PROFILE) eth0 nic network=$(INCUS_NETWORK) 2>/dev/null || true; \
|
||||
fi; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${GREEN}✅ Incus network ready.${NC}"
|
||||
|
||||
incus-deploy-all: incus-setup-network ## [MID] Deploy all services to Incus (legacy Docker method)
|
||||
@$(ECHO_CMD) "${BLUE}📦 Deploying all services to Incus (Docker)...${NC}"
|
||||
@$(MAKE) -s incus-deploy-service SERVICE=backend-api
|
||||
@$(MAKE) -s incus-deploy-service SERVICE=chat-server
|
||||
@$(MAKE) -s incus-deploy-service SERVICE=stream-server
|
||||
@$(MAKE) -s incus-deploy-service SERVICE=web
|
||||
@$(MAKE) -s incus-deploy-service SERVICE=haproxy
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services deployed to Incus.${NC}"
|
||||
|
||||
incus-deploy-all-native: incus-setup-network ## [MID] Deploy all services to Incus (native, no Docker) - excludes Rust services
|
||||
@$(ECHO_CMD) "${BLUE}📦 Deploying all services to Incus (native, excluding Rust services)...${NC}"
|
||||
@$(ECHO_CMD) "${YELLOW}⚠️ Note: chat-server and stream-server are excluded${NC}"
|
||||
@$(MAKE) -s incus-deploy-service-native SERVICE=backend-api
|
||||
@$(MAKE) -s incus-deploy-service-native SERVICE=web
|
||||
@$(MAKE) -s incus-deploy-service-native SERVICE=haproxy
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services deployed to Incus.${NC}"
|
||||
|
||||
incus-deploy-service: ## [LOW] Deploy a service to Incus with Docker (usage: make incus-deploy-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${BLUE}📦 Deploying $(SERVICE) to Incus (Docker)...${NC}"
|
||||
@if incus list -c n --format csv | grep -q "^veza-$(SERVICE)$$"; then \
|
||||
$(ECHO_CMD) "${YELLOW}Container exists, removing...${NC}"; \
|
||||
incus delete veza-$(SERVICE) --force; \
|
||||
fi
|
||||
@incus init images:debian/13 veza-$(SERVICE) --profile $(INCUS_PROFILE)
|
||||
@incus start veza-$(SERVICE)
|
||||
@$(ECHO_CMD) "${BLUE}Installing Docker in container...${NC}"
|
||||
@incus exec veza-$(SERVICE) -- bash -c "apt-get update && apt-get install -y docker.io docker-compose && systemctl enable docker && systemctl start docker" || true
|
||||
@$(ECHO_CMD) "${GREEN}✅ $(SERVICE) deployed.${NC}"
|
||||
|
||||
incus-deploy-service-native: ## [LOW] Deploy a service to Incus natively (usage: make incus-deploy-service-native SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${BLUE}📦 Deploying $(SERVICE) to Incus (native)...${NC}"
|
||||
@$(INCUS_SCRIPTS)/deploy-service-native.sh $(SERVICE)
|
||||
|
||||
incus-deploy-infra: incus-setup-network ## [LOW] Deploy infrastructure services (PostgreSQL, Redis)
|
||||
@$(ECHO_CMD) "${BLUE}📦 Deploying infrastructure services...${NC}"
|
||||
@$(MAKE) -s incus-deploy-service-native SERVICE=infra
|
||||
@$(ECHO_CMD) "${BLUE}Waiting for infrastructure to be ready...${NC}"
|
||||
@for i in $$(seq 1 30); do \
|
||||
if incus exec veza-infra -- systemctl is-active postgresql >/dev/null 2>&1 && \
|
||||
incus exec veza-infra -- systemctl is-active redis-server >/dev/null 2>&1; then \
|
||||
$(ECHO_CMD) "${GREEN}✅ Infrastructure services ready${NC}"; \
|
||||
break; \
|
||||
fi; \
|
||||
sleep 1; \
|
||||
done
|
||||
@$(ECHO_CMD) "${GREEN}✅ Infrastructure deployed.${NC}"
|
||||
|
||||
incus-start-all: ## [MID] Start all Incus services (excluding Rust services)
|
||||
@$(ECHO_CMD) "${BLUE}🚀 Starting all Incus services (excluding Rust services)...${NC}"
|
||||
@for service in backend-api; do \
|
||||
if incus list -c n --format csv | grep -q "^veza-$$service$$"; then \
|
||||
$(ECHO_CMD) "Starting veza-$$service..."; \
|
||||
if incus exec veza-$$service -- systemctl start veza-$$service 2>/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN} ✅ veza-$$service started${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW} ⚠️ veza-$$service failed to start (check logs)${NC}"; \
|
||||
fi; \
|
||||
fi; \
|
||||
done
|
||||
@if incus list -c n --format csv | grep -q "^veza-web$$"; then \
|
||||
$(ECHO_CMD) "Starting veza-web..."; \
|
||||
if incus exec veza-web -- systemctl start apache2 2>/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN} ✅ Apache started${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW} ⚠️ Apache failed to start${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@if incus list -c n --format csv | grep -q "^veza-haproxy$$"; then \
|
||||
$(ECHO_CMD) "Starting veza-haproxy..."; \
|
||||
if incus exec veza-haproxy -- systemctl start haproxy 2>/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN} ✅ HAProxy started${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW} ⚠️ HAProxy failed to start${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@if incus list -c n --format csv | grep -q "^veza-infra$$"; then \
|
||||
$(ECHO_CMD) "Starting infrastructure services..."; \
|
||||
if incus exec veza-infra -- systemctl start postgresql 2>/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN} ✅ PostgreSQL started${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW} ⚠️ PostgreSQL failed to start${NC}"; \
|
||||
fi; \
|
||||
if incus exec veza-infra -- systemctl start redis-server 2>/dev/null; then \
|
||||
$(ECHO_CMD) "${GREEN} ✅ Redis started${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${YELLOW} ⚠️ Redis failed to start${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${GREEN}✅ All services started.${NC}"
|
||||
@$(ECHO_CMD) "${BLUE}Run 'make incus-status' to check service status${NC}"
|
||||
|
||||
incus-stop-all: ## [MID] Stop all Incus containers
|
||||
@$(ECHO_CMD) "${YELLOW}🛑 Stopping all Incus containers...${NC}"
|
||||
@for container in $$(incus list -c n --format csv | grep veza-); do \
|
||||
incus stop $$container 2>/dev/null || true; \
|
||||
done
|
||||
@$(ECHO_CMD) "${GREEN}✅ All Incus containers stopped.${NC}"
|
||||
|
||||
incus-status: ## [MID] Show status of all Incus services
|
||||
@$(ECHO_CMD) "${BOLD}${CYAN}📊 INCUS DEPLOYMENT STATUS${NC}"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}Containers:${NC}"
|
||||
@incus list veza- --format table 2>/dev/null || echo " No containers found"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}Service Status:${NC}"
|
||||
@for service in backend-api chat-server stream-server; do \
|
||||
if incus list -c n --format csv 2>/dev/null | grep -q "^veza-$$service$$"; then \
|
||||
STATUS=$$(incus exec veza-$$service -- systemctl is-active veza-$$service 2>/dev/null || echo "inactive"); \
|
||||
if [ "$$STATUS" = "active" ]; then \
|
||||
$(ECHO_CMD) " ${GREEN}✅ veza-$$service: active${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) " ${YELLOW}⚠️ veza-$$service: $$STATUS${NC}"; \
|
||||
fi; \
|
||||
fi; \
|
||||
done
|
||||
@if incus list -c n --format csv 2>/dev/null | grep -q "^veza-web$$"; then \
|
||||
STATUS=$$(incus exec veza-web -- systemctl is-active apache2 2>/dev/null || echo "inactive"); \
|
||||
if [ "$$STATUS" = "active" ]; then \
|
||||
$(ECHO_CMD) " ${GREEN}✅ veza-web (Apache): active${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) " ${YELLOW}⚠️ veza-web (Apache): $$STATUS${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@if incus list -c n --format csv 2>/dev/null | grep -q "^veza-haproxy$$"; then \
|
||||
STATUS=$$(incus exec veza-haproxy -- systemctl is-active haproxy 2>/dev/null || echo "inactive"); \
|
||||
if [ "$$STATUS" = "active" ]; then \
|
||||
$(ECHO_CMD) " ${GREEN}✅ veza-haproxy: active${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) " ${YELLOW}⚠️ veza-haproxy: $$STATUS${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@if incus list -c n --format csv 2>/dev/null | grep -q "^veza-infra$$"; then \
|
||||
PG_STATUS=$$(incus exec veza-infra -- systemctl is-active postgresql 2>/dev/null || echo "inactive"); \
|
||||
REDIS_STATUS=$$(incus exec veza-infra -- systemctl is-active redis-server 2>/dev/null || echo "inactive"); \
|
||||
if [ "$$PG_STATUS" = "active" ]; then \
|
||||
$(ECHO_CMD) " ${GREEN}✅ PostgreSQL: active${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) " ${YELLOW}⚠️ PostgreSQL: $$PG_STATUS${NC}"; \
|
||||
fi; \
|
||||
if [ "$$REDIS_STATUS" = "active" ]; then \
|
||||
$(ECHO_CMD) " ${GREEN}✅ Redis: active${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) " ${YELLOW}⚠️ Redis: $$REDIS_STATUS${NC}"; \
|
||||
fi; \
|
||||
fi
|
||||
@$(ECHO_CMD) ""
|
||||
|
||||
incus-logs: ## [LOW] Show logs from Incus container (usage: make incus-logs SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@incus exec veza-$(SERVICE) -- journalctl -f
|
||||
45
make/infra.mk
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
# ==============================================================================
|
||||
# INFRASTRUCTURE (Docker: Postgres, Redis, RabbitMQ)
|
||||
# ==============================================================================
|
||||
|
||||
.PHONY: infra-up infra-down wait-for-infra wait-for-services db-shell redis-shell db-migrate
|
||||
|
||||
infra-up: ## [MID] Start Docker Infra (with health checks)
|
||||
@$(ECHO_CMD) "${BLUE}🐳 Starting Infrastructure...${NC}"
|
||||
@docker compose -f $(COMPOSE_FILE) up -d
|
||||
@$(MAKE) -s wait-for-infra
|
||||
|
||||
infra-down: ## [MID] Stop Docker Infra
|
||||
@$(ECHO_CMD) "${BLUE}🛑 Stopping Infrastructure...${NC}"
|
||||
@docker compose -f $(COMPOSE_FILE) down
|
||||
|
||||
wait-for-infra: ## [LOW] Wait for infrastructure to be ready
|
||||
@printf "${BLUE}⏳ Waiting for services...${NC}"
|
||||
@until docker compose -f $(COMPOSE_FILE) exec -T postgres pg_isready -U $(DB_USER) > /dev/null 2>&1; do printf "."; sleep 1; done
|
||||
@until docker compose -f $(COMPOSE_FILE) exec -T redis redis-cli ping > /dev/null 2>&1; do printf "."; sleep 1; done
|
||||
@$(ECHO_CMD) " ${GREEN}OK${NC}"
|
||||
|
||||
wait-for-services: ## [LOW] Wait for all application services
|
||||
@printf "${BLUE}⏳ Waiting for services...${NC}"
|
||||
@for service in backend-api chat-server stream-server web; do \
|
||||
until docker compose -f $(COMPOSE_PROD) exec -T $$service echo "ready" > /dev/null 2>&1; do \
|
||||
printf "."; sleep 1; \
|
||||
done; \
|
||||
done
|
||||
@$(ECHO_CMD) " ${GREEN}OK${NC}"
|
||||
|
||||
db-shell: ## [MID] Connect to Postgres shell
|
||||
@docker compose -f $(COMPOSE_FILE) exec postgres psql -U $(DB_USER) -d $(DB_NAME)
|
||||
|
||||
redis-shell: ## [MID] Connect to Redis shell
|
||||
@docker compose -f $(COMPOSE_FILE) exec redis redis-cli
|
||||
|
||||
db-migrate: infra-up ## [MID] Run all database migrations
|
||||
@$(ECHO_CMD) "${BLUE}🔄 Running Migrations...${NC}"
|
||||
@$(ECHO_CMD) " -> [Go] Migrating..."
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_backend-api) && go run cmd/migrate_tool/main.go up || $(ECHO_CMD) "${YELLOW}Warning: Go migration failed${NC}")
|
||||
@$(ECHO_CMD) " -> [Chat] Migrating..."
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_chat-server) && sqlx migrate run || $(ECHO_CMD) "${YELLOW}Warning: Chat migration failed${NC}")
|
||||
@$(ECHO_CMD) " -> [Stream] Migrating..."
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_stream-server) && sqlx migrate run || $(ECHO_CMD) "${YELLOW}Warning: Stream migration failed${NC}")
|
||||
@$(ECHO_CMD) "${GREEN}✅ Migrations done.${NC}"
|
||||
38
make/services.mk
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
# ==============================================================================
|
||||
# SERVICE LIFECYCLE (Docker: start/stop/restart/logs per service)
|
||||
# ==============================================================================
|
||||
|
||||
.PHONY: start-service stop-service restart-service logs-service
|
||||
|
||||
start-service: ## [MID] Start a specific service (usage: make start-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${BLUE}🚀 Starting $(SERVICE)...${NC}"
|
||||
@docker compose -f $(COMPOSE_PROD) up -d $(SERVICE) 2>/dev/null || \
|
||||
$(MAKE) -s start-local-service SERVICE=$(SERVICE)
|
||||
@$(ECHO_CMD) "${GREEN}✅ $(SERVICE) started.${NC}"
|
||||
|
||||
stop-service: ## [MID] Stop a specific service (usage: make stop-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@$(ECHO_CMD) "${YELLOW}🛑 Stopping $(SERVICE)...${NC}"
|
||||
@docker compose -f $(COMPOSE_PROD) stop $(SERVICE) 2>/dev/null || \
|
||||
$(MAKE) -s stop-local-service SERVICE=$(SERVICE)
|
||||
@$(ECHO_CMD) "${GREEN}✅ $(SERVICE) stopped.${NC}"
|
||||
|
||||
restart-service: stop-service ## [MID] Restart a specific service (usage: make restart-service SERVICE=backend-api)
|
||||
@$(ECHO_CMD) "${BLUE}🔄 Restarting $(SERVICE)...${NC}"
|
||||
@$(MAKE) -s start-service SERVICE=$(SERVICE)
|
||||
@$(ECHO_CMD) "${GREEN}✅ $(SERVICE) restarted.${NC}"
|
||||
|
||||
logs-service: ## [MID] Show logs for a service (usage: make logs-service SERVICE=backend-api)
|
||||
@if [ -z "$(SERVICE)" ]; then \
|
||||
$(ECHO_CMD) "${RED}❌ Please specify SERVICE=name${NC}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@docker compose -f $(COMPOSE_PROD) logs -f $(SERVICE) 2>/dev/null || \
|
||||
$(ECHO_CMD) "${YELLOW}Service not running in Docker, check local logs${NC}"
|
||||
71
make/test.mk
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
# ==============================================================================
|
||||
# TEST & QUALITY (unit tests, lint, format)
|
||||
# ==============================================================================
|
||||
|
||||
.PHONY: test test-tmt lint fmt status test-web test-backend-api test-chat-server test-stream-server
|
||||
.PHONY: lint-web lint-backend-api lint-chat-server lint-stream-server
|
||||
|
||||
test: infra-up ## [MID] Run All Tests (Fastest strategy)
|
||||
@$(ECHO_CMD) "${BLUE}🧪 Running Tests...${NC}"
|
||||
@$(ECHO_CMD) " [Go] Unit Tests..."
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_backend-api) && go test ./... -short)
|
||||
@$(ECHO_CMD) " [Rust] Unit Tests..."
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo test --lib -q)
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo test --lib -q)
|
||||
@$(ECHO_CMD) " [Web] Unit Tests..."
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_web) && npm run test -- --run)
|
||||
@$(ECHO_CMD) "${GREEN}✅ All tests passed.${NC}"
|
||||
|
||||
test-tmt: ## [MID] Run Unified TMT Pipeline
|
||||
@$(ECHO_CMD) "${BLUE}🧪 Running TMT Pipeline...${NC}"
|
||||
@command -v tmt >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ tmt is missing! Install with 'pip install tmt'${NC}"; exit 1; }
|
||||
@tmt run
|
||||
|
||||
test-web: ## [MID] Run Web tests only
|
||||
@$(ECHO_CMD) "${BLUE}🧪 Running Web tests...${NC}"
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_web) && npm run test -- --run)
|
||||
|
||||
test-backend-api: ## [MID] Run Go backend tests only
|
||||
@$(ECHO_CMD) "${BLUE}🧪 Running Backend API tests...${NC}"
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_backend-api) && go test ./... -short)
|
||||
|
||||
test-chat-server: ## [MID] Run Chat server tests only
|
||||
@$(ECHO_CMD) "${BLUE}🧪 Running Chat server tests...${NC}"
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo test --lib -q)
|
||||
|
||||
test-stream-server: ## [MID] Run Stream server tests only
|
||||
@$(ECHO_CMD) "${BLUE}🧪 Running Stream server tests...${NC}"
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo test --lib -q)
|
||||
|
||||
lint: ## [MID] Lint everything
|
||||
@$(ECHO_CMD) "${BLUE}🔍 Linting Codebase...${NC}"
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo clippy -- -D warnings) || true
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo clippy -- -D warnings) || true
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_backend-api) && golangci-lint run ./...) || true
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_web) && npm run lint) || true
|
||||
|
||||
lint-web: ## [MID] Lint web app only
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_web) && npm run lint)
|
||||
|
||||
lint-backend-api: ## [MID] Lint Go backend only
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_backend-api) && golangci-lint run ./...)
|
||||
|
||||
lint-chat-server: ## [MID] Lint Chat server only
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo clippy -- -D warnings)
|
||||
|
||||
lint-stream-server: ## [MID] Lint Stream server only
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo clippy -- -D warnings)
|
||||
|
||||
fmt: ## [MID] Format everything
|
||||
@$(ECHO_CMD) "${BLUE}✨ Formatting...${NC}"
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_backend-api) && go fmt ./...)
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo fmt)
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo fmt)
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_web) && npm run format) || true
|
||||
|
||||
status: ## [MID] Show system health & stats
|
||||
@$(ECHO_CMD) "${BOLD}DOCKER STATS:${NC}"
|
||||
@docker stats --no-stream --format "table {{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}" 2>/dev/null | grep -E "NAME|veza" || echo "No containers running"
|
||||
@$(ECHO_CMD) ""
|
||||
@$(ECHO_CMD) "${BOLD}LOCAL PORTS:${NC}"
|
||||
@lsof -i :$(PORT_backend-api) -i :$(PORT_chat-server) -i :$(PORT_stream-server) -i :$(PORT_web) 2>/dev/null | grep LISTEN || echo "No apps listening."
|
||||
49
make/tools.mk
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
# ==============================================================================
|
||||
# TOOLS: check, install deps, ports
|
||||
# ==============================================================================
|
||||
|
||||
.PHONY: check-tools check-tools-incus install-tools install-deps check-ports
|
||||
|
||||
check-tools: ## [LOW] Check required tools
|
||||
@$(ECHO_CMD) "${BLUE}Checking core requirements...${NC}"
|
||||
@for tool in docker go cargo npm; do \
|
||||
command -v $$tool >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ $$tool is missing!${NC}"; exit 1; }; \
|
||||
done
|
||||
@$(ECHO_CMD) "${GREEN}✅ All tools present.${NC}"
|
||||
|
||||
check-tools-incus: ## [LOW] Check required tools for Incus deployment
|
||||
@$(ECHO_CMD) "${BLUE}Checking Incus deployment requirements...${NC}"
|
||||
@command -v incus >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ incus is missing! Install with: sudo snap install incus${NC}"; exit 1; }
|
||||
@command -v go >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ go is missing!${NC}"; exit 1; }
|
||||
@command -v cargo >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ cargo is missing!${NC}"; exit 1; }
|
||||
@command -v npm >/dev/null 2>&1 || { $(ECHO_CMD) "${RED}❌ npm is missing!${NC}"; exit 1; }
|
||||
@$(ECHO_CMD) "${GREEN}✅ All Incus tools present.${NC}"
|
||||
|
||||
install-tools: ## [LOW] Install Power User tools (Hot Reload, Linters)
|
||||
@$(ECHO_CMD) "${BLUE}🛠️ Installing Dev Tools...${NC}"
|
||||
@command -v air >/dev/null 2>&1 || go install github.com/air-verse/air@latest
|
||||
@command -v cargo-watch >/dev/null 2>&1 || cargo install cargo-watch
|
||||
@command -v sqlx >/dev/null 2>&1 || cargo install sqlx-cli --no-default-features --features native-tls,postgres
|
||||
@$(ECHO_CMD) "${GREEN}✅ Tools installed.${NC}"
|
||||
|
||||
install-deps: ## [LOW] Install code dependencies (all backends + npm workspaces)
|
||||
@$(ECHO_CMD) "${BLUE}📦 Installing dependencies...${NC}"
|
||||
@$(ECHO_CMD) " -> [Go] Downloading modules..."
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_backend-api) && go mod download)
|
||||
@$(ECHO_CMD) " -> [Rust Chat] Fetching crates..."
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_chat-server) && cargo fetch)
|
||||
@$(ECHO_CMD) " -> [Rust Stream] Fetching crates..."
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo fetch)
|
||||
@$(ECHO_CMD) " -> [Web] Installing npm packages..."
|
||||
@(cd $(ROOT)/$(SERVICE_DIR_web) && npm install --silent)
|
||||
@$(ECHO_CMD) "${GREEN}✅ Dependencies installed.${NC}"
|
||||
|
||||
check-ports: ## [LOW] Check if ports are available
|
||||
@$(ECHO_CMD) "${BLUE}🔍 Checking ports...${NC}"
|
||||
@for port in $(PORT_backend-api) $(PORT_chat-server) $(PORT_stream-server) $(PORT_web); do \
|
||||
if lsof -i :$$port -t >/dev/null 2>&1; then \
|
||||
$(ECHO_CMD) "${YELLOW}⚠️ Port $$port is busy${NC}"; \
|
||||
else \
|
||||
$(ECHO_CMD) "${GREEN}✅ Port $$port is free${NC}"; \
|
||||
fi; \
|
||||
done
|
||||
14
make/ui.mk
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
# ==============================================================================
|
||||
# UI: colors and echo helper
|
||||
# ==============================================================================
|
||||
|
||||
BOLD := \033[1m
|
||||
RED := \033[0;31m
|
||||
GREEN := \033[0;32m
|
||||
YELLOW := \033[0;33m
|
||||
BLUE := \033[0;34m
|
||||
PURPLE := \033[0;35m
|
||||
CYAN := \033[0;36m
|
||||
NC := \033[0m
|
||||
|
||||
ECHO_CMD := echo -e
|
||||
72
veza-backend-api/docs/ACCOUNT_LOCKOUT.md
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
# Account lockout (BE-SEC-007)
|
||||
|
||||
## How it works
|
||||
|
||||
After too many **failed login attempts**, the account is temporarily **locked** to slow down brute-force attacks.
|
||||
|
||||
1. **Storage**: Lock state and attempt counts are stored in **Redis** (keys `account_lockout:attempts:{email}` and `account_lockout:locked:{email}`).
|
||||
2. **Failed attempt**: Each failed login (wrong password, user not found, or email not verified) calls `RecordFailedAttempt(email)`. The attempt counter is incremented; it expires after a **window** (default 15 minutes).
|
||||
3. **Lock**: When the number of failed attempts in the window reaches **max attempts** (default 5), the account is **locked** for a **lockout duration** (default 30 minutes).
|
||||
4. **While locked**: Any login for that email returns **HTTP 423 Locked** with message *"Account is locked. Please try again later."*
|
||||
5. **Unlock**: The lock key has a TTL; when it expires, the account is automatically unlocked. A **successful login** also clears the lock and the attempt counter.
|
||||
|
||||
**Defaults** (see `internal/services/account_lockout_service.go`):
|
||||
|
||||
- Max attempts: **5**
|
||||
- Window: **15 minutes**
|
||||
- Lockout duration: **30 minutes**
|
||||
|
||||
If Redis is unavailable, lockout is disabled (no locking, no recording).
|
||||
|
||||
---
|
||||
|
||||
## Unlock an account
|
||||
|
||||
### Option 1: Admin API (recommended)
|
||||
|
||||
As an **admin** user, send:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/api/v1/admin/auth/unlock-account \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer <ADMIN_ACCESS_TOKEN>" \
|
||||
-d '{"email":"user@example.com"}'
|
||||
```
|
||||
|
||||
Response: `200 OK` with `{"message":"account unlocked","email":"user@example.com"}`.
|
||||
|
||||
### Option 2: Redis CLI
|
||||
|
||||
If you have access to Redis:
|
||||
|
||||
```bash
|
||||
# Replace with the locked user's email
|
||||
EMAIL="user@example.com"
|
||||
|
||||
redis-cli DEL "account_lockout:locked:${EMAIL}" "account_lockout:attempts:${EMAIL}"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Disable lockout for specific accounts
|
||||
|
||||
Use **exempt emails** so those accounts are never locked and failed attempts are not recorded.
|
||||
|
||||
**Environment variable** (comma-separated list):
|
||||
|
||||
```bash
|
||||
ACCOUNT_LOCKOUT_EXEMPT_EMAILS=testuser@example.com,admin@test.com
|
||||
```
|
||||
|
||||
Example in `.env` or `.env.development`:
|
||||
|
||||
```
|
||||
ACCOUNT_LOCKOUT_EXEMPT_EMAILS=testuser@example.com
|
||||
```
|
||||
|
||||
After restarting the API, that email will:
|
||||
|
||||
- Never be considered locked (`IsAccountLocked` returns false).
|
||||
- Not have failed attempts recorded (`RecordFailedAttempt` is a no-op).
|
||||
|
||||
This is intended for **test / dev accounts** only; avoid exempting real user emails in production.
|
||||
|
|
@ -48,6 +48,7 @@ type APIRouter struct {
|
|||
logger *zap.Logger
|
||||
versionManager *VersionManager // BE-SVC-019: API versioning manager
|
||||
monitoringService *services.MonitoringAlertingService // INT-021: API monitoring and alerting
|
||||
authService *authcore.AuthService // Set by setupAuthRoutes for admin unlock
|
||||
}
|
||||
|
||||
// NewAPIRouter crée une nouvelle instance de APIRouter
|
||||
|
|
@ -280,12 +281,12 @@ func (r *APIRouter) Setup(router *gin.Engine) error {
|
|||
// Groupe API v1 (nouveau frontend React)
|
||||
v1 := router.Group("/api/v1")
|
||||
{
|
||||
// Routes core protégées (sessions, uploads, audit, admin, conversations)
|
||||
r.setupCoreProtectedRoutes(v1)
|
||||
|
||||
// Auth routes first so r.authService is set for admin unlock in setupCoreProtectedRoutes
|
||||
if err := r.setupAuthRoutes(v1); err != nil {
|
||||
return err
|
||||
}
|
||||
// Routes core protégées (sessions, uploads, audit, admin, conversations)
|
||||
r.setupCoreProtectedRoutes(v1)
|
||||
|
||||
// Action 5.2.1.1: Validation endpoint for pre-validation
|
||||
r.setupValidateRoutes(v1)
|
||||
|
|
@ -407,11 +408,18 @@ func (r *APIRouter) setupAuthRoutes(router *gin.RouterGroup) error {
|
|||
|
||||
// BE-SEC-007: Initialize account lockout service and set it on auth service
|
||||
if r.config.RedisClient != nil {
|
||||
accountLockoutService := services.NewAccountLockoutService(r.config.RedisClient, r.logger)
|
||||
lockoutConfig := &services.AccountLockoutConfig{
|
||||
MaxAttempts: 5,
|
||||
LockoutDuration: 30 * time.Minute,
|
||||
WindowDuration: 15 * time.Minute,
|
||||
ExemptEmails: r.config.AccountLockoutExemptEmails,
|
||||
}
|
||||
accountLockoutService := services.NewAccountLockoutServiceWithConfig(r.config.RedisClient, r.logger, lockoutConfig)
|
||||
authService.SetAccountLockoutService(accountLockoutService)
|
||||
} else {
|
||||
r.logger.Warn("Redis not available - account lockout disabled")
|
||||
}
|
||||
r.authService = authService
|
||||
|
||||
// 2.5. User Service for GetMe endpoint
|
||||
userRepo := repositories.NewGormUserRepository(r.db.GormDB)
|
||||
|
|
@ -1421,6 +1429,11 @@ func (r *APIRouter) setupCoreProtectedRoutes(v1 *gin.RouterGroup) {
|
|||
|
||||
// MOD-P2-006: Profiling pprof (protégé par auth admin)
|
||||
admin.Any("/debug/pprof/*path", gin.WrapH(http.DefaultServeMux))
|
||||
|
||||
// BE-SEC-007: Unlock account locked by failed login attempts (admin only)
|
||||
if r.authService != nil {
|
||||
admin.POST("/auth/unlock-account", handlers.UnlockAccount(r.authService, r.logger))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||