feat: backend, stream server & infra improvements

Backend (Go):
- Config: CORS, RabbitMQ, rate limit, main config updates
- Routes: core, distribution, tracks routing changes
- Middleware: rate limiter, endpoint limiter, response cache hardening
- Handlers: distribution, search handler fixes
- Workers: job worker improvements
- Upload validator and logging config additions
- New migrations: products, orders, performance indexes
- Seed tooling and data

Stream Server (Rust):
- Audio processing, config, routes, simple stream server updates
- Dockerfile improvements

Infrastructure:
- docker-compose.yml updates
- nginx-rtmp config changes
- Makefile improvements (config, dev, high, infra)
- Root package.json and lock file updates
- .env.example updates

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
senke 2026-03-18 11:36:06 +01:00
parent 4b57b46bac
commit 73eca4f6ad
41 changed files with 1263 additions and 172 deletions

View file

@ -96,10 +96,10 @@ services:
- rabbitmq_data:/var/lib/rabbitmq
healthcheck:
test: rabbitmq-diagnostics -q ping
interval: 5s
timeout: 5s
retries: 10
start_period: 40s
interval: 10s
timeout: 10s
retries: 12
start_period: 90s
networks:
- veza-net
deploy:
@ -191,7 +191,7 @@ services:
volumes:
- hls-data:/data/hls
ports:
- "${PORT_BACKEND:-18080}:8080"
- "${PORT_BACKEND:-18080}:18080"
depends_on:
postgres:
condition: service_healthy
@ -204,7 +204,7 @@ services:
networks:
- veza-net
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/api/v1/health"]
test: ["CMD-SHELL", "wget -q -O /dev/null http://localhost:18080/api/v1/health || exit 1"]
interval: 10s
timeout: 5s
retries: 5
@ -239,26 +239,29 @@ services:
- REDIS_URL=redis://:${REDIS_PASSWORD:-devpassword}@redis:6379
- JWT_SECRET=${JWT_SECRET:?JWT_SECRET must be set in .env}
- SECRET_KEY=${JWT_SECRET:?JWT_SECRET must be set in .env}
- PORT=3001
- PORT=18082
- AWS_S3_ENDPOINT=http://minio:9000
- AWS_S3_BUCKET=veza-files
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-minioadmin}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-minioadmin}
- AWS_REGION=us-east-1
- HLS_OUTPUT_DIR=/data/hls
- RABBITMQ_URL=amqp://${RABBITMQ_DEFAULT_USER:-veza}:${RABBITMQ_DEFAULT_PASS:-devpassword}@rabbitmq:5672/
volumes:
- hls-data:/data/hls
ports:
- "${PORT_STREAM:-18082}:3001"
- "${PORT_STREAM:-18082}:18082"
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
rabbitmq:
condition: service_healthy
networks:
- veza-net
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3001/health"]
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:18082/health"]
interval: 10s
timeout: 5s
retries: 5

View file

@ -23,8 +23,8 @@ rtmp {
# Callbacks to backend for stream_key validation and is_live updates
# Params: name=stream_key, addr, app, etc.
on_publish http://backend-api:8080/api/v1/live/callback/publish;
on_publish_done http://backend-api:8080/api/v1/live/callback/publish_done;
on_publish http://backend-api:18080/api/v1/live/callback/publish;
on_publish_done http://backend-api:18080/api/v1/live/callback/publish_done;
}
}
}

View file

@ -28,7 +28,7 @@ SERVICE_DIR_haproxy :=
# --- Ports (override with PORT_* from .env)
# Defaults use 18xxx range to avoid conflicts with other projects on same machine
PORT_backend-api ?= 18080
PORT_stream-server ?= 3001
PORT_stream-server ?= 18082
PORT_web ?= 5173
PORT_haproxy ?= 80
# Infra (docker-compose dev)

View file

@ -5,15 +5,27 @@
# are skipped until veza-common is fixed. Use dev-full to start everything.
# ==============================================================================
.PHONY: dev dev-full dev-backend dev-web dev-backend-api dev-stream-server
.PHONY: dev dev-full dev-backend dev-web dev-backend-api dev-e2e dev-stream-server
.PHONY: stop-local-services start-local-service stop-local-service
dev: check-ports infra-up ## [HIGH] Start Backend (Docker) + Web only (no Chat/Stream)
dev: infra-up-dev ## [HIGH] Start Backend (Go local) + Web (Vite) — everything you need
@$(ECHO_CMD) "${BOLD}${PURPLE}🚀 STARTING DEV (Backend + Web)${NC}"
@$(ECHO_CMD) " Backend: http://$(APP_DOMAIN):$(PORT_backend-api) (Docker)"
@# Kill any existing backend/frontend processes to free ports
@kill $$(lsof -ti:$(PORT_backend-api)) 2>/dev/null || true
@kill $$(lsof -ti:$(PORT_web)) 2>/dev/null || true
@sleep 1
@$(ECHO_CMD) " Backend: http://$(APP_DOMAIN):$(PORT_backend-api)"
@$(ECHO_CMD) " Web: http://$(APP_DOMAIN):$(PORT_web)"
@$(ECHO_CMD) "${YELLOW}Hit Ctrl+C to stop.${NC}"
@cd $(ROOT)/$(SERVICE_DIR_web) && npm run dev
@(trap 'kill 0' SIGINT; \
if command -v air >/dev/null; then \
$(ECHO_CMD) "${GREEN}[Go] Hot Reload Active (Air)${NC}" && cd $(ROOT)/$(SERVICE_DIR_backend-api) && air & \
else \
$(ECHO_CMD) "${GREEN}[Go] Starting...${NC}" && cd $(ROOT)/$(SERVICE_DIR_backend-api) && go run cmd/api/main.go & \
fi; \
sleep 5; \
$(ECHO_CMD) "${GREEN}[Web] Starting Vite...${NC}" && cd $(ROOT)/$(SERVICE_DIR_web) && PORT=$(PORT_web) npm run dev & \
wait)
dev-full-docker: check-ports infra-up ## [HIGH] Start full stack in Docker (Backend, Stream, ClamAV) — then run make dev-web
@$(ECHO_CMD) "${GREEN}✅ Full stack (Docker) started. Run 'make dev-web' for the frontend.${NC}"
@ -55,6 +67,27 @@ dev-backend-api: check-ports infra-up-dev ## [MID] Start Go backend only — inf
@$(ECHO_CMD) "${GREEN}[Backend API] Starting...${NC}"
@if command -v air >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_backend-api) && air; else cd $(ROOT)/$(SERVICE_DIR_backend-api) && go run cmd/api/main.go; fi
dev-e2e: infra-up-dev ## [HIGH] Start Backend + Web with rate limiting disabled for E2E tests
@$(ECHO_CMD) "${BOLD}${PURPLE}🚀 STARTING E2E DEV (rate limiting OFF)${NC}"
@# Kill any existing backend/frontend processes to free ports
@kill $$(lsof -ti:$(PORT_backend-api)) 2>/dev/null || true
@kill $$(lsof -ti:$(PORT_web)) 2>/dev/null || true
@sleep 1
@$(ECHO_CMD) " Backend: http://$(APP_DOMAIN):$(PORT_backend-api) (APP_ENV=test, rate limit OFF)"
@$(ECHO_CMD) " Web: http://$(APP_DOMAIN):$(PORT_web)"
@$(ECHO_CMD) "${YELLOW}Hit Ctrl+C to stop. Then run: npm run e2e${NC}"
@(trap 'kill 0' SIGINT; \
export DISABLE_RATE_LIMIT_FOR_TESTS=true; \
export APP_ENV=test; \
if command -v air >/dev/null; then \
$(ECHO_CMD) "${GREEN}[Go] Hot Reload Active (Air) — rate limit OFF${NC}" && cd $(ROOT)/$(SERVICE_DIR_backend-api) && air & \
else \
$(ECHO_CMD) "${GREEN}[Go] Starting — rate limit OFF${NC}" && cd $(ROOT)/$(SERVICE_DIR_backend-api) && go run cmd/api/main.go & \
fi; \
sleep 5; \
$(ECHO_CMD) "${GREEN}[Web] Starting Vite...${NC}" && cd $(ROOT)/$(SERVICE_DIR_web) && PORT=$(PORT_web) npm run dev & \
wait)
dev-stream-server: check-ports infra-up-dev ## [MID] Start Stream server only — infra from docker-compose.dev.yml
@$(ECHO_CMD) "${GREEN}[Stream] Starting...${NC}"
@if command -v cargo-watch >/dev/null; then cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo watch -x run -q; else cd $(ROOT)/$(SERVICE_DIR_stream-server) && cargo run -q; fi

View file

@ -60,8 +60,8 @@ deploy-incus: build-all-native ## [HIGH] Deploy all services with Incus containe
@$(MAKE) -s incus-start-all
@$(ECHO_CMD) "${GREEN}✅ Incus deployment complete!${NC}"
@$(ECHO_CMD) "${BLUE}Access services at:${NC}"
@$(ECHO_CMD) " Backend API: http://10.10.10.2:8080"
@$(ECHO_CMD) " Stream Server: http://10.10.10.4:3002"
@$(ECHO_CMD) " Backend API: http://10.10.10.2:18080"
@$(ECHO_CMD) " Stream Server: http://10.10.10.4:18082"
@$(ECHO_CMD) " Web Frontend: http://10.10.10.5:80"
@$(ECHO_CMD) " HAProxy: http://10.10.10.6:80"

View file

@ -2,7 +2,7 @@
# INFRASTRUCTURE (Docker: Postgres, Redis, RabbitMQ)
# ==============================================================================
.PHONY: infra-up infra-up-dev infra-down wait-for-infra wait-for-services db-shell redis-shell rabbitmq-shell db-migrate
.PHONY: infra-up infra-up-dev infra-down wait-for-infra wait-for-services db-shell redis-shell rabbitmq-shell db-migrate db-seed db-reset
# Infra-only (TASK-QA-010): use for make dev-full (apps run locally with hot reload)
infra-up-dev: ## [MID] Start dev infra only (Postgres, Redis, RabbitMQ, ClamAV, MinIO) — use with make dev-full
@ -51,3 +51,14 @@ db-migrate: infra-up ## [MID] Run all database migrations
@$(ECHO_CMD) " -> [Stream] Migrating..."
@(cd $(ROOT)/$(SERVICE_DIR_stream-server) && sqlx migrate run || $(ECHO_CMD) "${YELLOW}Warning: Stream migration failed${NC}")
@$(ECHO_CMD) "${GREEN}✅ Migrations done.${NC}"
db-seed: ## [MID] Populate database with demo data (10 users, 22 tracks, playlists, chat)
@$(ECHO_CMD) "${BLUE}🌱 Seeding database...${NC}"
@cd $(ROOT)/$(SERVICE_DIR_backend-api) && go run cmd/tools/seed/main.go
@$(ECHO_CMD) "${GREEN}✅ Seed done.${NC}"
db-reset: ## [MID] Drop and recreate database, then seed
@$(ECHO_CMD) "${YELLOW}⚠️ Resetting database (all data will be lost)...${NC}"
@PGPASSWORD=$(DB_PASS) psql -h $(DB_HOST) -p $(DB_PORT) -U $(DB_USER) -d postgres -c "DROP DATABASE IF EXISTS $(DB_NAME);" 2>/dev/null
@PGPASSWORD=$(DB_PASS) psql -h $(DB_HOST) -p $(DB_PORT) -U $(DB_USER) -d postgres -c "CREATE DATABASE $(DB_NAME) OWNER $(DB_USER);"
@$(ECHO_CMD) "${GREEN}✅ Database reset. Start backend to run migrations, then run 'make db-seed'.${NC}"

41
package-lock.json generated
View file

@ -6602,6 +6602,10 @@
"url": "https://opencollective.com/typescript-eslint"
}
},
"node_modules/@veza/design-system": {
"resolved": "packages/design-system",
"link": true
},
"node_modules/@vitejs/plugin-react": {
"version": "4.7.0",
"resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz",
@ -6833,9 +6837,9 @@
}
},
"node_modules/acorn": {
"version": "8.15.0",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
"version": "8.16.0",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz",
"integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==",
"dev": true,
"license": "MIT",
"bin": {
@ -18415,25 +18419,28 @@
},
"packages/design-system": {
"name": "@veza/design-system",
"version": "0.1.0",
"extraneous": true,
"version": "2.0.0",
"license": "UNLICENSED",
"dependencies": {
"clsx": "^2.1.0",
"lucide-react": "^0.562.0",
"tailwind-merge": "^2.2.1"
"clsx": "^2.0.0",
"tailwind-merge": "^3.0.0"
},
"devDependencies": {
"@types/react": "^18.2.48",
"@types/react-dom": "^18.2.18",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"tailwindcss": "^4.0.0",
"tsup": "^8.0.0",
"typescript": "^5.3.3"
"typescript": "^5.9.0"
},
"peerDependencies": {
"react": "^18.2.0",
"react-dom": "^18.2.0"
"react": ">=18",
"react-dom": ">=18"
}
},
"packages/design-system/node_modules/tailwind-merge": {
"version": "3.5.0",
"resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.5.0.tgz",
"integrity": "sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A==",
"license": "MIT",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/dcastil"
}
},
"veza-backend-api": {},

View file

@ -2,12 +2,16 @@
"name": "veza-monorepo",
"private": true,
"packageManager": "npm@10.9.2",
"workspaces": ["apps/web", "packages/*", "veza-backend-api", "veza-stream-server"],
"workspaces": [
"apps/web",
"packages/*",
"veza-backend-api",
"veza-stream-server"
],
"overrides": {
"axios": ">=1.13.5"
},
"devDependencies": {
"turbo": "^2.3.0",
"@eslint/js": "^9.39.1",
"@playwright/test": "^1.57.0",
"@types/node": "^25.0.3",
@ -15,6 +19,7 @@
"eslint-plugin-react": "^7.37.5",
"globals": "^16.5.0",
"prettier": "3.6.2",
"turbo": "^2.3.0",
"typescript": "5.9.3",
"typescript-eslint": "^8.46.3"
},
@ -22,6 +27,17 @@
"prepare": "husky",
"build": "turbo run build",
"test": "turbo run test",
"lint": "turbo run lint"
"lint": "turbo run lint",
"e2e": "VEZA_ENV=test npx playwright test --config=tests/e2e/playwright.config.ts",
"e2e:all": "VEZA_ENV=test PLAYWRIGHT_ALL=1 npx playwright test --config=tests/e2e/playwright.config.ts",
"e2e:critical": "VEZA_ENV=test npx playwright test --config=tests/e2e/playwright.config.ts --grep @critical",
"e2e:fast": "VEZA_ENV=test npx playwright test --config=tests/e2e/playwright.config.ts --grep @critical --workers=75%",
"e2e:serial": "VEZA_ENV=test PLAYWRIGHT_WORKERS=1 npx playwright test --config=tests/e2e/playwright.config.ts",
"e2e:list": "npx playwright test --config=tests/e2e/playwright.config.ts --list",
"e2e:audit": "VEZA_ENV=test npx playwright test --config=tests/e2e/playwright.config.ts; node tests/e2e/scripts/generate-audit-report.mjs && (xdg-open tests/e2e/VEZA_AUDIT_REPORT.html 2>/dev/null || open tests/e2e/VEZA_AUDIT_REPORT.html 2>/dev/null || true)",
"e2e:ui": "VEZA_ENV=test npx playwright test --config=tests/e2e/playwright.config.ts --ui",
"e2e:routes": "VEZA_ENV=test npx playwright test --config=tests/e2e/playwright.config.ts --grep @route-coverage",
"e2e:visual": "VEZA_ENV=test npx playwright test --config=tests/e2e/playwright.config.ts --grep @visual",
"e2e:visual:update": "VEZA_ENV=test npx playwright test --config=tests/e2e/playwright.config.ts --grep @visual --update-snapshots"
}
}

View file

@ -10,7 +10,7 @@ RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m'
BASE_URL="http://localhost:8080/api/v1"
BASE_URL="http://localhost:18080/api/v1"
TEST_EMAIL="test_$(date +%s)@veza.local"
TEST_USERNAME="testuser_$(date +%s)"
TEST_PASSWORD="TestPassword123!"
@ -33,7 +33,7 @@ print_result() {
# Test 1: Health Check
echo -e "${YELLOW}1. Test Health Check${NC}"
RESPONSE=$(curl -s -w "\n%{http_code}" http://localhost:8080/api/v1/health)
RESPONSE=$(curl -s -w "\n%{http_code}" http://localhost:18080/api/v1/health)
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
BODY=$(echo "$RESPONSE" | sed '$d')
if [ "$HTTP_CODE" = "200" ]; then

View file

@ -53,12 +53,12 @@ RUN chown -R app:app /app
USER app
# Expose port
EXPOSE 8080
EXPOSE 18080
# Health check
# P3.2: Use /api/v1/health endpoint created in P1.6
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://localhost:8080/api/v1/health || exit 1
CMD wget --no-verbose --tries=1 --spider http://localhost:18080/api/v1/health || exit 1
# Run the application
CMD ["./veza-api"]

View file

@ -40,7 +40,7 @@ import (
// @license.name Apache 2.0
// @license.url http://www.apache.org/licenses/LICENSE-2.0.html
// @host localhost:8080
// @host localhost:18080
// @BasePath /api/v1
// @securityDefinitions.apikey BearerAuth

View file

@ -0,0 +1,685 @@
package main
import (
"database/sql"
"fmt"
"log"
"math/rand"
"os"
"strings"
"time"
"github.com/google/uuid"
"github.com/joho/godotenv"
_ "github.com/lib/pq"
"golang.org/x/crypto/bcrypt"
)
// ─── helpers ────────────────────────────────────────────────────────────────
func must(err error, msg string) {
if err != nil {
log.Fatalf("%s: %v", msg, err)
}
}
func tryExec(db *sql.DB, query string, args ...interface{}) {
_, _ = db.Exec(query, args...)
}
func execOrWarn(db *sql.DB, label string, query string, args ...interface{}) {
if _, err := db.Exec(query, args...); err != nil {
log.Printf(" ⚠ %s: %v", label, err)
}
}
func countRows(db *sql.DB, table string) int {
var n int
_ = db.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s", table)).Scan(&n)
return n
}
func randBetween(min, max int) int { return min + rand.Intn(max-min+1) }
func daysAgo(d int) time.Time { return time.Now().Add(-time.Duration(d) * 24 * time.Hour) }
func hoursAgo(h int) time.Time { return time.Now().Add(-time.Duration(h) * time.Hour) }
// ─── main ───────────────────────────────────────────────────────────────────
func main() {
_ = godotenv.Load()
dbURL := os.Getenv("DATABASE_URL")
if dbURL == "" {
log.Fatal("DATABASE_URL not set")
}
db, err := sql.Open("postgres", dbURL)
must(err, "DB connect")
defer db.Close()
must(db.Ping(), "DB ping")
fmt.Println("╔═══════════════════════════════════════════════╗")
fmt.Println("║ VEZA — Database Seed Script ║")
fmt.Println("╚═══════════════════════════════════════════════╝")
fmt.Println()
// Hash a shared password (bcrypt cost 12)
hash, err := bcrypt.GenerateFromPassword([]byte("Password123!"), 12)
must(err, "bcrypt")
pw := string(hash)
// ═════════════════════════════════════════════════════════════════════════
// USERS (10)
// ═════════════════════════════════════════════════════════════════════════
type user struct {
id, email, username, display, role, bio string
isAdmin bool
}
var users []user
if countRows(db, "users") == 0 {
fmt.Print("Creating users... ")
users = []user{
{uuid.NewString(), "admin@veza.fr", "admin_veza", "Admin Veza", "admin", "Platform administrator", true},
{uuid.NewString(), "amelie@veza.fr", "amelie_dubois", "Amelie Dubois", "creator", "Productrice electro basee a Paris. Melodic techno & ambient.", false},
{uuid.NewString(), "marcus@veza.fr", "marcus_beats", "Marcus Beats", "creator", "Beatmaker from Lyon. Hip-hop, trap, lo-fi.", false},
{uuid.NewString(), "sakura@veza.fr", "sakura_sound", "Sakura Sound", "creator", "Sound designer & foley artist. Cinematic textures.", false},
{uuid.NewString(), "djrenzo@veza.fr", "dj_renzo", "DJ Renzo", "creator", "House & disco edits. Paris nightlife.", false},
{uuid.NewString(), "clara@veza.fr", "clara_voice", "Clara Voix", "creator", "Singer-songwriter. Indie folk & acoustic.", false},
{uuid.NewString(), "listener1@veza.fr", "music_lover", "Music Lover", "user", "Just here for the vibes.", false},
{uuid.NewString(), "listener2@veza.fr", "groove_hunter", "Groove Hunter", "user", "Always looking for fresh beats.", false},
{uuid.NewString(), "listener3@veza.fr", "night_owl", "Night Owl", "premium", "Late night music sessions.", false},
{uuid.NewString(), "mod@veza.fr", "moderator_veza", "Moderator", "moderator", "Community moderator.", false},
}
for _, u := range users {
_, err := db.Exec(`INSERT INTO users (id, email, email_verified_at, password_hash, username, slug, display_name,
role, is_active, is_verified, is_admin, bio, created_at, updated_at)
VALUES ($1,$2,NOW(),$3,$4,$5,$6,$7,true,true,$8,$9,NOW()-interval '1 day'*$10,NOW())`,
u.id, u.email, pw, u.username, u.username, u.display, u.role, u.isAdmin, u.bio, randBetween(1, 60))
must(err, "user "+u.username)
}
fmt.Printf("%d created\n", len(users))
// Profiles & settings
for _, u := range users {
tryExec(db, `INSERT INTO user_profiles (user_id,bio,tagline,language,theme,profile_visibility) VALUES ($1,$2,$3,'fr','auto','public')`,
u.id, u.bio, strings.Split(u.bio, ".")[0])
tryExec(db, `INSERT INTO user_settings (user_id) VALUES ($1) ON CONFLICT DO NOTHING`, u.id)
}
// Roles
tryExec(db, `INSERT INTO user_roles (user_id,role_id) SELECT $1,id FROM roles WHERE name='admin' ON CONFLICT DO NOTHING`, users[0].id)
tryExec(db, `INSERT INTO user_roles (user_id,role_id) SELECT $1,id FROM roles WHERE name='moderator' ON CONFLICT DO NOTHING`, users[9].id)
} else {
fmt.Println("Users already exist — loading IDs...")
rows, _ := db.Query(`SELECT id,email,username,display_name,role,COALESCE(bio,''),is_admin FROM users ORDER BY created_at LIMIT 10`)
for rows != nil && rows.Next() {
var u user
_ = rows.Scan(&u.id, &u.email, &u.username, &u.display, &u.role, &u.bio, &u.isAdmin)
users = append(users, u)
}
if rows != nil {
rows.Close()
}
}
if len(users) < 10 {
fmt.Println("⚠ Need at least 10 users for full seed. Exiting.")
os.Exit(0)
}
amelieID := users[1].id
marcusID := users[2].id
sakuraID := users[3].id
renzoID := users[4].id
claraID := users[5].id
// ═════════════════════════════════════════════════════════════════════════
// TRACKS (22)
// ═════════════════════════════════════════════════════════════════════════
type track struct {
id, creator, title, artist, album, genre, key, tags string
duration, bpm int
}
var tracks []track
if countRows(db, "tracks") == 0 {
fmt.Print("Creating tracks... ")
tracks = []track{
{uuid.NewString(), amelieID, "Neon Dreams", "Amelie Dubois", "Neon EP", "electronic", "Am", "{electronic,ambient,melodic}", 342, 128},
{uuid.NewString(), amelieID, "Midnight Protocol", "Amelie Dubois", "Neon EP", "techno", "Dm", "{techno,dark,melodic}", 410, 132},
{uuid.NewString(), amelieID, "Aurora Borealis", "Amelie Dubois", "Neon EP", "ambient", "C", "{ambient,atmospheric,chill}", 520, 90},
{uuid.NewString(), amelieID, "Digital Rain", "Amelie Dubois", "Singles", "electronic", "Em", "{electronic,synth,progressive}", 285, 126},
{uuid.NewString(), amelieID, "Pulse", "Amelie Dubois", "Singles", "techno", "Bm", "{techno,driving,peak}", 378, 134},
{uuid.NewString(), marcusID, "Late Night Loops", "Marcus Beats", "Bedroom Sessions", "hip-hop", "Cm", "{lofi,chill,beats}", 198, 85},
{uuid.NewString(), marcusID, "Concrete Jungle", "Marcus Beats", "Bedroom Sessions", "hip-hop", "Fm", "{hiphop,boom-bap,gritty}", 225, 90},
{uuid.NewString(), marcusID, "Velvet Touch", "Marcus Beats", "Bedroom Sessions", "r&b", "Ab", "{rnb,smooth,lofi}", 240, 78},
{uuid.NewString(), marcusID, "City Lights", "Marcus Beats", "Singles", "trap", "Gm", "{trap,melodic,urban}", 210, 140},
{uuid.NewString(), marcusID, "Rainy Days", "Marcus Beats", "Singles", "lo-fi", "D", "{lofi,rain,relax}", 180, 72},
{uuid.NewString(), sakuraID, "Forest Whispers", "Sakura Sound", "Nature Vol.1", "ambient", "F", "{nature,foley,cinematic}", 480, 60},
{uuid.NewString(), sakuraID, "Ocean Depths", "Sakura Sound", "Nature Vol.1", "ambient", "Eb", "{water,deep,ambient}", 540, 55},
{uuid.NewString(), sakuraID, "Thunder Plains", "Sakura Sound", "Nature Vol.1", "cinematic", "Bb", "{storm,epic,cinematic}", 360, 80},
{uuid.NewString(), sakuraID, "Urban Field Recording", "Sakura Sound", "Singles", "experimental", "", "{field-recording,urban,experimental}", 300, 0},
{uuid.NewString(), renzoID, "Saturday Night Edit", "DJ Renzo", "Club Cuts", "house", "G", "{house,disco,funky}", 420, 122},
{uuid.NewString(), renzoID, "Funky Elevator", "DJ Renzo", "Club Cuts", "disco", "A", "{disco,funk,groovy}", 355, 118},
{uuid.NewString(), renzoID, "Deep in the Club", "DJ Renzo", "Club Cuts", "deep house", "Dm", "{deephouse,minimal,late-night}", 480, 124},
{uuid.NewString(), renzoID, "Sunrise Set", "DJ Renzo", "Singles", "house", "C", "{house,progressive,sunrise}", 600, 120},
{uuid.NewString(), claraID, "Paper Boats", "Clara Voix", "Whisper", "folk", "G", "{folk,acoustic,indie}", 220, 95},
{uuid.NewString(), claraID, "Morning Light", "Clara Voix", "Whisper", "indie", "D", "{indie,dreamy,morning}", 198, 100},
{uuid.NewString(), claraID, "Letters Never Sent", "Clara Voix", "Whisper", "folk", "Em", "{folk,emotional,singer-songwriter}", 265, 88},
{uuid.NewString(), claraID, "Wildflowers", "Clara Voix", "Singles", "acoustic", "C", "{acoustic,nature,gentle}", 185, 92},
}
for i, t := range tracks {
createdAt := daysAgo(60 - i*2)
_, err := db.Exec(`INSERT INTO tracks (id,creator_id,user_id,title,artist,album,genre,duration,bpm,musical_key,
visibility,is_public,is_downloadable,status,stream_status,play_count,like_count,tags,published_at,created_at,updated_at)
VALUES ($1,$2,$2,$3,$4,$5,$6,$7,$8,$9,'public',true,false,'completed','ready',$10,$11,$12::text[],$13,$13,$13)`,
t.id, t.creator, t.title, t.artist, t.album, t.genre, t.duration, t.bpm, t.key,
randBetween(10, 500), randBetween(2, 50), t.tags, createdAt)
must(err, "track "+t.title)
}
fmt.Printf("%d created\n", len(tracks))
} else {
fmt.Println("Tracks already exist — loading IDs...")
rows, _ := db.Query(`SELECT id,creator_id,title,artist,COALESCE(album,''),COALESCE(genre,''),COALESCE(musical_key,''),'{}',duration,COALESCE(bpm,0) FROM tracks ORDER BY created_at LIMIT 22`)
for rows != nil && rows.Next() {
var t track
_ = rows.Scan(&t.id, &t.creator, &t.title, &t.artist, &t.album, &t.genre, &t.key, &t.tags, &t.duration, &t.bpm)
tracks = append(tracks, t)
}
if rows != nil {
rows.Close()
}
}
// ═════════════════════════════════════════════════════════════════════════
// PLAYLISTS (6)
// ═════════════════════════════════════════════════════════════════════════
type playlist struct{ id, user, name, desc string }
var playlists []playlist
if countRows(db, "playlists") < 6 {
fmt.Print("Creating playlists... ")
playlists = []playlist{
{uuid.NewString(), amelieID, "Late Night Techno", "My favorite tracks for late sessions"},
{uuid.NewString(), marcusID, "Chill Beats Study", "Perfect background music for focus"},
{uuid.NewString(), renzoID, "Weekend Warm-Up", "Pre-party essentials"},
{uuid.NewString(), claraID, "Acoustic Mornings", "Gentle wake-up tracks"},
{uuid.NewString(), users[6].id, "Discovery Mix", "New finds from this month"},
{uuid.NewString(), users[7].id, "Workout Energy", "High-BPM motivation"},
}
for _, p := range playlists {
tryExec(db, `INSERT INTO playlists (id,user_id,name,title,description,visibility,is_public,is_collaborative) VALUES ($1,$2,$3,$3,$4,'public',true,false)`,
p.id, p.user, p.name, p.desc)
}
fmt.Printf("%d created\n", len(playlists))
// Playlist tracks
ptMap := []struct{ pi int; ti []int }{
{0, []int{0, 1, 4, 14, 16, 17}}, {1, []int{5, 7, 9, 18, 19}},
{2, []int{14, 15, 16, 3, 4}}, {3, []int{18, 19, 20, 21, 10}},
{4, []int{0, 5, 10, 14, 18, 8}}, {5, []int{1, 4, 8, 14, 15, 16}},
}
for _, pt := range ptMap {
for pos, ti := range pt.ti {
if ti < len(tracks) {
tryExec(db, `INSERT INTO playlist_tracks (playlist_id,track_id,position,added_by) VALUES ($1,$2,$3,$4)`,
playlists[pt.pi].id, tracks[ti].id, pos, playlists[pt.pi].user)
}
}
tryExec(db, `UPDATE playlists SET track_count=(SELECT COUNT(*) FROM playlist_tracks WHERE playlist_id=$1) WHERE id=$1`, playlists[pt.pi].id)
}
} else {
fmt.Println("Playlists already exist — skipping")
}
// ═════════════════════════════════════════════════════════════════════════
// FOLLOWS (18)
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "follows") < 10 {
fmt.Print("Creating follows... ")
follows := [][2]int{
{6, 1}, {6, 2}, {6, 4}, {7, 1}, {7, 3}, {7, 4}, {7, 2},
{8, 1}, {8, 2}, {8, 3}, {8, 4}, {8, 5},
{1, 2}, {2, 1}, {1, 4}, {4, 1}, {3, 1}, {5, 2}, {5, 3},
}
c := 0
for _, f := range follows {
if _, err := db.Exec(`INSERT INTO follows (follower_id,followed_id) VALUES ($1,$2) ON CONFLICT DO NOTHING`, users[f[0]].id, users[f[1]].id); err == nil {
c++
}
}
fmt.Printf("%d created\n", c)
}
// ═════════════════════════════════════════════════════════════════════════
// CHAT ROOMS & MESSAGES
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "rooms") == 0 {
fmt.Print("Creating chat rooms & messages... ")
roomIDs := [3]string{uuid.NewString(), uuid.NewString(), uuid.NewString()}
roomData := []struct{ name, owner string }{
{"General", users[0].id}, {"Production Tips", amelieID}, {"Beat Marketplace", marcusID},
}
for i, r := range roomData {
tryExec(db, `INSERT INTO rooms (id,name,owner_id,creator_id,room_type,is_private,created_at,updated_at) VALUES ($1,$2,$3,$3,'group',false,NOW(),NOW())`, roomIDs[i], r.name, r.owner)
}
for _, rid := range roomIDs {
for _, u := range users[:8] {
tryExec(db, `INSERT INTO room_members (room_id,user_id,role) VALUES ($1,$2,'member') ON CONFLICT DO NOTHING`, rid, u.id)
}
}
msgs := []struct{ r, s int; c string }{
{0, 1, "Hey everyone! Welcome to Veza."}, {0, 2, "Glad to be here. Just uploaded some new beats!"},
{0, 6, "Love the vibes on this platform."}, {0, 3, "Anyone interested in some cinematic samples?"},
{0, 4, "Weekend set coming soon, stay tuned!"}, {1, 1, "What DAW is everyone using?"},
{1, 2, "Ableton all the way. FL Studio for quick ideas."}, {1, 3, "Pro Tools for recording, Reaper for mixing."},
{1, 5, "Logic Pro X here. Love the stock plugins."}, {2, 2, "New beat pack dropping this weekend. 10 beats, all original."},
{2, 7, "How much for exclusive rights?"}, {2, 2, "DM me for pricing on exclusives!"},
}
for i, m := range msgs {
ts := hoursAgo(len(msgs) - i)
tryExec(db, `INSERT INTO messages (room_id,sender_id,user_id,content,message_type,created_at,updated_at) VALUES ($1,$2,$2,$3,'text',$4,$4)`,
roomIDs[m.r], users[m.s].id, m.c, ts)
}
fmt.Printf("3 rooms, %d messages\n", len(msgs))
}
// ═════════════════════════════════════════════════════════════════════════
// TRACK PLAYS (analytics — fixed column names)
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "track_plays") == 0 {
fmt.Print("Creating play history... ")
c := 0
listeners := []int{6, 7, 8}
sources := []string{"web", "mobile", "api"}
countries := []string{"FR", "US", "DE", "GB", "JP", "BR", "CA"}
for _, li := range listeners {
for _, t := range tracks {
// Each listener plays ~70% of tracks, some multiple times
plays := 0
if rand.Intn(10) < 7 {
plays = 1
}
if rand.Intn(10) < 3 {
plays = randBetween(2, 5) // replay
}
for p := 0; p < plays; p++ {
ts := daysAgo(randBetween(0, 45))
dur := t.duration * randBetween(60, 100) / 100 // 60-100% of track
tryExec(db, `INSERT INTO track_plays (track_id,user_id,duration,played_at,source,country_code,created_at,updated_at)
VALUES ($1,$2,$3,$4,$5,$6,$4,$4)`,
t.id, users[li].id, dur, ts, sources[rand.Intn(len(sources))], countries[rand.Intn(len(countries))])
c++
}
}
}
fmt.Printf("%d plays recorded\n", c)
}
// ═════════════════════════════════════════════════════════════════════════
// TRACK LIKES
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "track_likes") < 20 {
fmt.Print("Creating likes... ")
c := 0
for _, li := range []int{6, 7, 8} {
for i, t := range tracks {
if i%3 == 0 || i%5 == 0 || rand.Intn(4) == 0 {
if _, err := db.Exec(`INSERT INTO track_likes (track_id,user_id) VALUES ($1,$2) ON CONFLICT DO NOTHING`, t.id, users[li].id); err == nil {
c++
}
}
}
}
fmt.Printf("%d likes\n", c)
}
// ═════════════════════════════════════════════════════════════════════════
// COMMENTS
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "comments") == 0 {
fmt.Print("Creating comments... ")
commentData := []struct{ track, user int; content string }{
{0, 6, "This track is incredible, the synth work is amazing!"}, {0, 7, "Perfect for late night coding sessions."},
{0, 8, "Amelie never disappoints. 🔥"}, {1, 7, "Dark and moody, love it."},
{5, 6, "These loops are so clean."}, {5, 8, "Could listen to this on repeat all day."},
{7, 6, "Smooth R&B vibes, exactly what I needed."}, {10, 8, "Beautiful nature sounds, so calming."},
{14, 7, "DJ Renzo always brings the groove!"}, {14, 6, "This one gets the party started!"},
{18, 6, "Clara your voice is so beautiful."}, {18, 8, "Acoustic perfection."},
{20, 7, "This made me emotional, beautiful songwriting."}, {9, 6, "Lo-fi perfection for rainy days."},
{3, 8, "The production quality is top notch."}, {16, 7, "Deep house at its finest."},
{11, 6, "I can hear the ocean in my headphones."}, {19, 8, "Morning Light is my alarm song now."},
{4, 7, "Peak time techno! Need this in a set."}, {15, 6, "Funky Elevator is an instant classic."},
}
for _, cm := range commentData {
if cm.track < len(tracks) {
tryExec(db, `INSERT INTO comments (user_id,target_id,target_type,content,created_at,updated_at) VALUES ($1,$2,'track',$3,$4,$4)`,
users[cm.user].id, tracks[cm.track].id, cm.content, daysAgo(randBetween(0, 30)))
}
}
fmt.Printf("%d comments\n", len(commentData))
}
// ═════════════════════════════════════════════════════════════════════════
// NOTIFICATIONS (fixed: column "read" not "is_read", "content" not "message")
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "notifications") == 0 {
fmt.Print("Creating notifications... ")
notifs := []struct{ user int; ntype, title, content string }{
{1, "follow", "New follower", "music_lover started following you"},
{1, "follow", "New follower", "groove_hunter started following you"},
{2, "follow", "New follower", "night_owl started following you"},
{1, "like", "Track liked", "Someone liked your track Neon Dreams"},
{2, "like", "Track liked", "Someone liked your track Late Night Loops"},
{3, "like", "Track liked", "Someone liked your track Forest Whispers"},
{4, "comment", "New comment", "music_lover commented on Saturday Night Edit"},
{5, "comment", "New comment", "night_owl commented on Paper Boats"},
{1, "system", "Welcome", "Welcome to Veza! Start by uploading your first track."},
{6, "system", "Welcome", "Welcome to Veza! Discover amazing music from independent artists."},
{7, "system", "Welcome", "Welcome to Veza! Follow your favorite artists to see their new releases."},
{0, "system", "Admin alert", "New user registrations this week: 5"},
{2, "milestone", "Milestone reached", "Your track Late Night Loops just hit 100 plays!"},
{1, "milestone", "Milestone reached", "You now have 5 followers!"},
}
for _, n := range notifs {
tryExec(db, `INSERT INTO notifications (user_id,type,title,content,read,created_at,updated_at) VALUES ($1,$2,$3,$4,false,$5,$5)`,
users[n.user].id, n.ntype, n.title, n.content, daysAgo(randBetween(0, 14)))
}
fmt.Printf("%d created\n", len(notifs))
}
// ═════════════════════════════════════════════════════════════════════════
// PRODUCTS (marketplace — 12 products from creators)
// ═════════════════════════════════════════════════════════════════════════
type product struct{ id, seller, title, desc, ptype, license, category string; price float64; trackIdx int; bpm int; key string }
var products []product
if countRows(db, "products") == 0 {
fmt.Print("Creating marketplace products... ")
products = []product{
{uuid.NewString(), marcusID, "Lo-Fi Beats Pack Vol.1", "10 royalty-free lo-fi beats for content creators", "sample-pack", "non-exclusive", "beats", 29.99, -1, 80, "Cm"},
{uuid.NewString(), marcusID, "Trap Essentials", "5 hard-hitting trap beats ready to use", "sample-pack", "non-exclusive", "beats", 19.99, -1, 140, "Gm"},
{uuid.NewString(), amelieID, "Neon Dreams — Exclusive License", "Full exclusive rights to Neon Dreams", "beat", "exclusive", "electronic", 299.99, 0, 128, "Am"},
{uuid.NewString(), amelieID, "Synth Textures Pack", "50 custom synth one-shots and loops", "sample-pack", "non-exclusive", "samples", 14.99, -1, 0, ""},
{uuid.NewString(), amelieID, "Techno Stems — Midnight Protocol", "Individual stems for remix", "beat", "non-exclusive", "stems", 49.99, 1, 132, "Dm"},
{uuid.NewString(), renzoID, "Disco Edits Bundle", "3 disco edits ready for DJ sets", "sample-pack", "non-exclusive", "dj-tools", 24.99, -1, 120, "G"},
{uuid.NewString(), renzoID, "Saturday Night Edit — License", "Non-exclusive license for streaming", "beat", "non-exclusive", "house", 39.99, 14, 122, "G"},
{uuid.NewString(), sakuraID, "Cinematic Foley Collection", "200+ foley sounds from nature recordings", "sample-pack", "non-exclusive", "sfx", 34.99, -1, 0, ""},
{uuid.NewString(), sakuraID, "Ambient Textures Vol.1", "Layered ambient textures for film scoring", "sample-pack", "non-exclusive", "ambient", 19.99, -1, 0, ""},
{uuid.NewString(), claraID, "Acoustic Guitar Loops", "15 acoustic guitar loops in various keys", "sample-pack", "non-exclusive", "acoustic", 12.99, -1, 95, "G"},
{uuid.NewString(), claraID, "Paper Boats — Sync License", "Sync license for film/TV/ads", "beat", "non-exclusive", "sync", 149.99, 18, 95, "G"},
{uuid.NewString(), marcusID, "City Lights — Lease", "Standard lease for City Lights beat", "beat", "non-exclusive", "trap", 49.99, 8, 140, "Gm"},
}
for _, p := range products {
var trackID interface{}
if p.trackIdx >= 0 && p.trackIdx < len(tracks) {
trackID = tracks[p.trackIdx].id
}
tryExec(db, `INSERT INTO products (id,seller_id,title,description,price,currency,status,product_type,track_id,license_type,bpm,musical_key,category,created_at,updated_at)
VALUES ($1,$2,$3,$4,$5,'EUR','published',$6,$7,$8,$9,$10,$11,NOW()-interval '1 day'*$12,NOW())`,
p.id, p.seller, p.title, p.desc, p.price, p.ptype, trackID, p.license, p.bpm, p.key, p.category, randBetween(1, 30))
}
fmt.Printf("%d products\n", len(products))
} else {
fmt.Println("Products already exist — loading IDs...")
rows, _ := db.Query(`SELECT id,seller_id,title FROM products ORDER BY created_at LIMIT 12`)
for rows != nil && rows.Next() {
var p product
_ = rows.Scan(&p.id, &p.seller, &p.title)
products = append(products, p)
}
if rows != nil {
rows.Close()
}
}
// ═════════════════════════════════════════════════════════════════════════
// ORDERS & ORDER ITEMS (4 completed purchases)
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "orders") == 0 && len(products) >= 6 {
fmt.Print("Creating orders... ")
orderData := []struct{ buyer int; productIdxs []int; total float64 }{
{6, []int{0, 3}, 44.98}, // music_lover buys lo-fi pack + synth textures
{7, []int{1, 5}, 44.98}, // groove_hunter buys trap essentials + disco edits
{8, []int{6, 9}, 52.98}, // night_owl buys saturday night license + acoustic loops
{6, []int{7}, 34.99}, // music_lover buys foley collection
}
for _, o := range orderData {
oid := uuid.NewString()
tryExec(db, `INSERT INTO orders (id,buyer_id,total_amount,currency,status,created_at,updated_at) VALUES ($1,$2,$3,'EUR','completed',$4,$4)`,
oid, users[o.buyer].id, o.total, daysAgo(randBetween(1, 20)))
for _, pi := range o.productIdxs {
if pi < len(products) {
tryExec(db, `INSERT INTO order_items (order_id,product_id,price) VALUES ($1,$2,$3)`, oid, products[pi].id, products[pi].price)
}
}
}
fmt.Printf("%d orders\n", len(orderData))
}
// ═════════════════════════════════════════════════════════════════════════
// DAILY TRACK STATS (last 30 days for top tracks)
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "daily_track_stats") == 0 {
fmt.Print("Creating daily track stats... ")
c := 0
for _, t := range tracks[:10] { // top 10 tracks
for d := 0; d < 30; d++ {
date := daysAgo(d).Format("2006-01-02")
plays := randBetween(1, 25)
uniq := randBetween(1, plays)
complete := randBetween(0, uniq)
totalTime := plays * t.duration * randBetween(60, 100) / 100
avgCompl := float64(randBetween(50, 95)) / 100
tryExec(db, `INSERT INTO daily_track_stats (track_id,date,total_plays,unique_listeners,complete_listens,total_play_time,avg_completion_rate) VALUES ($1,$2,$3,$4,$5,$6,$7) ON CONFLICT DO NOTHING`,
t.id, date, plays, uniq, complete, totalTime, avgCompl)
c++
}
}
fmt.Printf("%d stat rows\n", c)
}
// ═════════════════════════════════════════════════════════════════════════
// COURSES & LESSONS (education)
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "courses") == 0 {
fmt.Print("Creating courses & lessons... ")
courseData := []struct{ creator, title, slug, desc, category, level string; price int; lessonCount int }{
{amelieID, "Introduction to Music Production", "intro-music-production", "Learn the basics of music production with Ableton Live. From your first beat to a finished track.", "production", "beginner", 0, 8},
{amelieID, "Melodic Techno Masterclass", "melodic-techno-masterclass", "Deep dive into melodic techno production techniques, sound design, and arrangement.", "production", "intermediate", 4999, 12},
{marcusID, "Hip-Hop Beat Making 101", "hiphop-beatmaking-101", "Learn to make hard-hitting hip-hop beats from scratch. Sampling, drum programming, mixing.", "production", "beginner", 2999, 10},
{sakuraID, "Field Recording & Sound Design", "field-recording-sound-design", "Capture the world around you and turn it into cinematic soundscapes.", "sound-design", "intermediate", 3999, 6},
{claraID, "Songwriting for Beginners", "songwriting-beginners", "Find your voice, write meaningful lyrics, and structure your songs.", "songwriting", "beginner", 0, 5},
}
for _, cd := range courseData {
cid := uuid.NewString()
status := "published"
var publishedAt interface{} = daysAgo(randBetween(5, 40))
tryExec(db, `INSERT INTO courses (id,creator_id,title,slug,description,category,tags,price_cents,currency,pricing_model,status,level,language,lesson_count,published_at,created_at,updated_at)
VALUES ($1,$2,$3,$4,$5,$6,ARRAY['music','production'],$7,'EUR','fixed',$8,$9,'fr',$10,$11,$12,$12)`,
cid, cd.creator, cd.title, cd.slug, cd.desc, cd.category, cd.price, status, cd.level, cd.lessonCount, publishedAt, daysAgo(randBetween(10, 50)))
// Create lessons for this course
lessonTitles := []string{
"Getting Started", "Setting Up Your DAW", "Understanding Audio Basics", "Your First Beat",
"Melody and Harmony", "Sound Design Fundamentals", "Arrangement Techniques", "Mixing Basics",
"EQ and Compression", "Effects and Processing", "Mastering Your Track", "Final Project",
}
for li := 0; li < cd.lessonCount && li < len(lessonTitles); li++ {
tryExec(db, `INSERT INTO lessons (course_id,order_index,title,description,duration_seconds,is_preview_free,transcoding_status) VALUES ($1,$2,$3,$4,$5,$6,'completed')`,
cid, li, lessonTitles[li], fmt.Sprintf("Lesson %d of %s", li+1, cd.title), randBetween(300, 1800), li < 2)
}
}
fmt.Printf("%d courses\n", len(courseData))
// Enroll some users
rows, _ := db.Query(`SELECT id FROM courses LIMIT 5`)
var courseIDs []string
for rows != nil && rows.Next() {
var id string
_ = rows.Scan(&id)
courseIDs = append(courseIDs, id)
}
if rows != nil {
rows.Close()
}
for _, cid := range courseIDs {
for _, ui := range []int{6, 7, 8} {
if rand.Intn(3) == 0 {
tryExec(db, `INSERT INTO course_enrollments (user_id,course_id,status,purchased_at) VALUES ($1,$2,'active',NOW()-interval '1 day'*$3) ON CONFLICT DO NOTHING`,
users[ui].id, cid, randBetween(1, 20))
}
}
}
}
// ═════════════════════════════════════════════════════════════════════════
// GEAR ITEMS (creator equipment)
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "gear_items") == 0 {
fmt.Print("Creating gear inventory... ")
gearData := []struct{ user int; name, cat, brand, model, status, condition string; price float64 }{
{1, "Ableton Push 3", "controller", "Ableton", "Push 3", "Active", "Excellent", 999},
{1, "Focal Shape 65", "monitors", "Focal", "Shape 65", "Active", "Good", 599},
{1, "RME Babyface Pro FS", "audio-interface", "RME", "Babyface Pro FS", "Active", "Excellent", 849},
{2, "Akai MPC One+", "sampler", "Akai", "MPC One+", "Active", "Good", 699},
{2, "Audio-Technica AT2020", "microphone", "Audio-Technica", "AT2020", "Active", "Good", 99},
{2, "Beyerdynamic DT 770 Pro", "headphones", "Beyerdynamic", "DT 770 Pro", "Active", "Fair", 159},
{3, "Zoom H6", "recorder", "Zoom", "H6", "Active", "Excellent", 349},
{3, "Sennheiser MKH 416", "microphone", "Sennheiser", "MKH 416", "Active", "Good", 999},
{4, "Pioneer DDJ-1000", "dj-controller", "Pioneer", "DDJ-1000", "Active", "Good", 1199},
{4, "Allen & Heath Xone:96", "mixer", "Allen & Heath", "Xone:96", "Active", "Excellent", 1899},
{5, "Martin D-28", "guitar", "Martin", "D-28", "Active", "Good", 2999},
{5, "Neumann U87", "microphone", "Neumann", "U87", "Active", "Excellent", 3199},
}
for _, g := range gearData {
tryExec(db, `INSERT INTO gear_items (user_id,name,category,brand,model,status,condition,purchase_price,currency,purchase_date,is_public,created_at,updated_at)
VALUES ($1,$2,$3,$4,$5,$6,$7,$8,'EUR',$9,true,NOW(),NOW())`,
users[g.user].id, g.name, g.cat, g.brand, g.model, g.status, g.condition, g.price, daysAgo(randBetween(30, 365)).Format("2006-01-02"))
}
fmt.Printf("%d items\n", len(gearData))
}
// ═════════════════════════════════════════════════════════════════════════
// LIVE STREAMS (scheduled + past)
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "live_streams") == 0 {
fmt.Print("Creating live streams... ")
liveData := []struct{ user int; title, desc, cat string; isLive bool; viewers int }{
{4, "Friday Night Disco Set", "Live disco & house set from my studio", "dj-set", false, 0},
{1, "Production Session — New EP Preview", "Working on new melodic techno tracks live", "production", false, 0},
{2, "Beat Making LIVE — Taking Requests", "Making beats on the spot, drop your ideas in chat", "production", false, 0},
{5, "Acoustic Session — Unplugged", "Playing some originals and covers", "performance", false, 0},
}
for _, l := range liveData {
tryExec(db, `INSERT INTO live_streams (user_id,title,description,category,streamer_name,is_live,viewer_count,tags,scheduled_at,created_at,updated_at)
VALUES ($1,$2,$3,$4,$5,$6,$7,'[]'::jsonb,$8,NOW(),NOW())`,
users[l.user].id, l.title, l.desc, l.cat, users[l.user].display, l.isLive, l.viewers,
daysAgo(-randBetween(1, 14))) // future scheduled
}
fmt.Printf("%d streams\n", len(liveData))
}
// ═════════════════════════════════════════════════════════════════════════
// ANNOUNCEMENTS
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "announcements") == 0 {
fmt.Print("Creating announcements... ")
annData := []struct{ title, content, atype string }{
{"Welcome to Veza!", "We're thrilled to launch Veza — an ethical music platform built for artists and listeners. Explore, create, and connect.", "info"},
{"Marketplace Now Open", "Buy and sell beats, samples, and presets directly on the platform. Fair pricing, transparent licensing.", "feature"},
{"Scheduled Maintenance", "Brief maintenance window planned for Sunday 3am-5am CET. Streams may be briefly interrupted.", "warning"},
}
for _, a := range annData {
tryExec(db, `INSERT INTO announcements (title,content,type,is_active,starts_at,created_by,created_at) VALUES ($1,$2,$3,true,NOW(),$4,NOW())`,
a.title, a.content, a.atype, users[0].id)
}
fmt.Printf("%d announcements\n", len(annData))
}
// ═════════════════════════════════════════════════════════════════════════
// SUPPORT TICKETS
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "support_tickets") == 0 {
fmt.Print("Creating support tickets... ")
ticketData := []struct{ user int; email, subject, msg, cat, status string }{
{6, "listener1@veza.fr", "Cannot upload profile picture", "I keep getting an error when trying to upload my avatar. File is a 2MB JPEG.", "technical", "open"},
{7, "listener2@veza.fr", "How to create a playlist?", "I'm new here, how do I create a collaborative playlist?", "general", "resolved"},
{2, "marcus@veza.fr", "Payment not received for beat sale", "Sold a beat 5 days ago but haven't received the payout yet.", "billing", "open"},
{8, "listener3@veza.fr", "Feature request: dark mode scheduler", "Would love to have dark mode auto-switch at sunset.", "feature", "open"},
}
for _, t := range ticketData {
tryExec(db, `INSERT INTO support_tickets (user_id,email,subject,message,category,status,created_at) VALUES ($1,$2,$3,$4,$5,$6,$7)`,
users[t.user].id, t.email, t.subject, t.msg, t.cat, t.status, daysAgo(randBetween(0, 10)))
}
fmt.Printf("%d tickets\n", len(ticketData))
}
// ═════════════════════════════════════════════════════════════════════════
// API KEYS (developer portal)
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "api_keys") == 0 {
fmt.Print("Creating API keys... ")
apiKeyData := []struct{ user int; name string; scopes string }{
{1, "Amelie Production Bot", "{read,write,tracks}"},
{2, "Marcus Beat Distributor", "{read,tracks,marketplace}"},
}
for _, k := range apiKeyData {
prefix := fmt.Sprintf("veza_%s", uuid.NewString()[:8])
hashedKey, _ := bcrypt.GenerateFromPassword([]byte(uuid.NewString()), 10)
tryExec(db, `INSERT INTO api_keys (user_id,name,prefix,hashed_key,scopes,created_at) VALUES ($1,$2,$3,$4,$5::text[],NOW())`,
users[k.user].id, k.name, prefix, string(hashedKey), k.scopes)
}
fmt.Printf("%d keys\n", len(apiKeyData))
}
// ═════════════════════════════════════════════════════════════════════════
// ANALYTICS EVENTS (general platform events)
// ═════════════════════════════════════════════════════════════════════════
if countRows(db, "analytics_events") == 0 {
fmt.Print("Creating analytics events... ")
c := 0
eventTypes := []string{"page_view", "track_play", "search", "playlist_create", "follow", "signup", "login"}
for d := 0; d < 14; d++ {
numEvents := randBetween(20, 80)
for e := 0; e < numEvents; e++ {
userIdx := rand.Intn(len(users))
evt := eventTypes[rand.Intn(len(eventTypes))]
tryExec(db, `INSERT INTO analytics_events (event_name,user_id,payload,created_at) VALUES ($1,$2,$3,$4)`,
evt, users[userIdx].id, fmt.Sprintf(`{"source":"web","page":"/dashboard","session_id":"%s"}`, uuid.NewString()[:8]),
daysAgo(d).Add(time.Duration(randBetween(0, 86400))*time.Second))
c++
}
}
fmt.Printf("%d events\n", c)
}
// ═════════════════════════════════════════════════════════════════════════
// SUMMARY
// ═════════════════════════════════════════════════════════════════════════
fmt.Println()
fmt.Println("╔═══════════════════════════════════════════════╗")
fmt.Println("║ Seed Complete! ║")
fmt.Println("╚═══════════════════════════════════════════════╝")
fmt.Println()
tables := []string{
"users", "tracks", "playlists", "follows", "rooms", "messages",
"track_plays", "track_likes", "comments", "notifications",
"products", "orders", "order_items", "daily_track_stats",
"courses", "lessons", "course_enrollments", "gear_items",
"live_streams", "announcements", "support_tickets", "api_keys", "analytics_events",
}
for _, t := range tables {
fmt.Printf(" %-24s %d rows\n", t, countRows(db, t))
}
fmt.Println()
fmt.Println("--- Login Credentials (Password123! for all) ---")
fmt.Println(" Admin: admin@veza.fr")
fmt.Println(" Creator: amelie@veza.fr / marcus@veza.fr / sakura@veza.fr")
fmt.Println(" Creator: djrenzo@veza.fr / clara@veza.fr")
fmt.Println(" Listener: listener1@veza.fr / listener2@veza.fr / listener3@veza.fr")
fmt.Println(" Moderator: mod@veza.fr")
fmt.Println()
fmt.Println(" Dashboard: http://veza.fr:5173/dashboard")
}

View file

@ -50,6 +50,7 @@ require (
cloud.google.com/go/compute/metadata v0.3.0 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/BurntSushi/toml v1.6.0 // indirect
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect

View file

@ -6,6 +6,8 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=

View file

@ -314,10 +314,9 @@ func (r *APIRouter) setupCoreProtectedRoutes(v1 *gin.RouterGroup) {
uploads.GET("/stats", uploadHandler.GetUploadStats())
}
// v0.803 ADM1: User report endpoint (authenticated users can report content)
reportServiceForUser := services.NewReportService(r.db.GormDB, r.logger)
reportHandlerForUser := handlers.NewReportHandler(reportServiceForUser)
protected.POST("/reports", reportHandlerForUser.CreateReport)
// v0.803 ADM1: User report endpoint — moved to routes_moderation.go (F412 enhanced reporting)
// reportServiceForUser := services.NewReportService(r.db.GormDB, r.logger)
// reportHandlerForUser := handlers.NewReportHandler(reportServiceForUser)
// v0.971: Client-visible feature flags (e.g. WEBRTC_CALLS for CallButton)
featureFlagSvc := services.NewFeatureFlagService(r.db.GormDB, r.logger)

View file

@ -32,7 +32,7 @@ func (r *APIRouter) setupDistributionRoutes(router *gin.RouterGroup) {
// Track-specific distribution view
trackDistGroup := router.Group("/tracks")
trackDistGroup.Use(r.config.AuthMiddleware.RequireAuth())
trackDistGroup.GET("/:track_id/distributions", handler.GetTrackDistributions)
trackDistGroup.GET("/:id/distributions", handler.GetTrackDistributions)
// External royalties (creator view)
creatorGroup := router.Group("/creators/me")

View file

@ -97,10 +97,18 @@ func (r *APIRouter) setupTrackRoutes(router *gin.RouterGroup) {
tracks := router.Group("/tracks")
{
tracks.GET("", trackHandler.ListTracks)
if r.config.AuthMiddleware != nil {
tracks.GET("", r.config.AuthMiddleware.OptionalAuth(), trackHandler.ListTracks)
} else {
tracks.GET("", trackHandler.ListTracks)
}
tracks.GET("/search", trackHandler.SearchTracks)
tracks.GET("/suggested-tags", trackHandler.GetSuggestedTags)
tracks.GET("/:id", trackHandler.GetTrack)
if r.config.AuthMiddleware != nil {
tracks.GET("/:id", r.config.AuthMiddleware.OptionalAuth(), trackHandler.GetTrack)
} else {
tracks.GET("/:id", trackHandler.GetTrack)
}
tracks.GET("/:id/lyrics", trackHandler.GetLyrics)
tracks.GET("/:id/stats", trackHandler.GetTrackStats)
tracks.GET("/:id/waveform", trackHandler.GetWaveform)

View file

@ -398,31 +398,9 @@ func NewConfig() (*Config, error) {
TransferRetryMaxAttempts: getEnvInt("TRANSFER_RETRY_MAX", 3),
TransferRetryInterval: getEnvDuration("TRANSFER_RETRY_INTERVAL", 5*time.Minute),
// Log Files Configuration
// En développement, utiliser ./logs si /var/log n'est pas accessible
LogDir: func() string {
logDir := getEnv("LOG_DIR", "/var/log/veza")
// En développement, préférer un répertoire local si /var/log n'est pas accessible
if env == EnvDevelopment || env == "dev" {
if logDir == "/var/log/veza" {
// Essayer de créer le répertoire pour vérifier les permissions
if err := os.MkdirAll("/var/log/veza", 0755); err != nil {
// Si échec, utiliser ./logs
return "./logs"
}
// Vérifier qu'on peut écrire dedans en créant un fichier test
testFile := "/var/log/veza/.test_write"
if f, err := os.Create(testFile); err != nil {
// Ne peut pas écrire, utiliser ./logs
return "./logs"
} else {
f.Close()
os.Remove(testFile)
}
}
}
return logDir
}(),
// Log Files Configuration — centralized in config/logging.toml
// Resolved via logging.LoadConfig() with env var overrides (LOG_DIR, LOG_LEVEL)
LogDir: logging.LoadConfig().ResolveLogDir(env),
}
// Initialiser le SecretsProvider (T0037)

View file

@ -54,7 +54,7 @@ func devDefaultCORSOrigins(appDomain string) []string {
"http://" + appDomain,
"http://" + appDomain + ":3000",
"http://" + appDomain + ":5173",
"http://" + appDomain + ":8080",
"http://" + appDomain + ":18080",
}
}

View file

@ -18,8 +18,8 @@ func getRabbitMQURL(env string, appDomain string) string {
return "" // Will be validated in ValidateForEnvironment
}
// En développement: par défaut veza:password sur port 15672 (aligné docker-compose)
// Port 15672 = host mapping, 5672 = port interne container
// En développement: par défaut veza:password sur port 15672
// Port 15672 = host mapping du port AMQP 5672 (docker-compose.yml/dev.yml)
port := getEnv("RABBITMQ_PORT", "15672")
user := getEnv("RABBITMQ_USER", "veza")
pass := getEnv("RABBITMQ_PASS", "password")

View file

@ -45,7 +45,7 @@ func getAuthRateLimitLoginWindow(env string) int {
// getDefaultRateLimitIPPerHour returns default hourly limit for non-auth (TASK-SEC-003)
func getDefaultRateLimitIPPerHour(env string) int {
if env == EnvDevelopment || env == EnvTest {
return getEnvInt("RATE_LIMIT_IP_PER_HOUR", 500) // More relaxed in dev
return getEnvInt("RATE_LIMIT_IP_PER_HOUR", 5000) // Very relaxed in dev
}
return getEnvInt("RATE_LIMIT_IP_PER_HOUR", 100) // 100 req/h in prod
}

View file

@ -149,7 +149,7 @@ func (h *DistributionHandler) GetTrackDistributions(c *gin.Context) {
return
}
trackID, err := uuid.Parse(c.Param("track_id"))
trackID, err := uuid.Parse(c.Param("id"))
if err != nil {
RespondWithAppError(c, apperrors.NewValidationError("Invalid track ID"))
return

View file

@ -31,9 +31,10 @@ func NewSearchHandlers(searchService *services.SearchService) {
// NewSearchHandlersWithInterface creates new search handlers with an interface (for testing)
func NewSearchHandlersWithInterface(searchService SearchServiceInterface) *SearchHandlers {
return &SearchHandlers{
SearchHandlersInstance = &SearchHandlers{
searchService: searchService,
}
return SearchHandlersInstance
}
// Search performs a full-text search across tracks, users, and playlists

View file

@ -0,0 +1,227 @@
package logging
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/BurntSushi/toml"
)
// LogConfig holds the centralized logging configuration loaded from config/logging.toml.
// Environment variables override file values (highest priority).
type LogConfig struct {
Global GlobalConfig `toml:"global"`
Rotation RotationConfig `toml:"rotation"`
Backend BackendConfig `toml:"backend"`
Stream StreamConfig `toml:"stream"`
Frontend FrontendConfig `toml:"frontend"`
Aggregation LogAggregationConfig `toml:"aggregation"`
Permissions PermissionsConfig `toml:"permissions"`
}
type GlobalConfig struct {
Level string `toml:"level"`
Dir string `toml:"dir"`
Format string `toml:"format"`
}
type RotationConfig struct {
MaxSizeMB int `toml:"max_size_mb"`
MaxBackups int `toml:"max_backups"`
MaxAgeDays int `toml:"max_age_days"`
Compress bool `toml:"compress"`
RustRotation string `toml:"rust_rotation"`
RustMaxFiles int `toml:"rust_max_files"`
}
type BackendConfig struct {
Module string `toml:"module"`
Modules []string `toml:"modules"`
SlowRequestThresholdMs int `toml:"slow_request_threshold_ms"`
SamplingInitial int `toml:"sampling_initial"`
SamplingThereafter int `toml:"sampling_thereafter"`
BufferSizeKB int `toml:"buffer_size_kb"`
FlushIntervalMs int `toml:"flush_interval_ms"`
}
type StreamConfig struct {
Module string `toml:"module"`
IncludeSource bool `toml:"include_source"`
IncludeThreadIDs bool `toml:"include_thread_ids"`
}
type FrontendConfig struct {
Level string `toml:"level"`
Endpoint string `toml:"endpoint"`
SentryEnabled bool `toml:"sentry_enabled"`
}
type LogAggregationConfig struct {
Enabled bool `toml:"enabled"`
Endpoint string `toml:"endpoint"`
BatchSize int `toml:"batch_size"`
FlushInterval int `toml:"flush_interval_s"`
Timeout int `toml:"timeout_s"`
Labels string `toml:"labels"`
}
type PermissionsConfig struct {
DirMode string `toml:"dir_mode"`
FileMode string `toml:"file_mode"`
}
// LoadConfig loads logging configuration from config/logging.toml.
// Environment variables override file values.
// Returns sensible defaults if the file is missing.
func LoadConfig() *LogConfig {
cfg := defaultConfig()
// Try to find and load the TOML file
paths := []string{
"config/logging.toml",
"../config/logging.toml",
filepath.Join(os.Getenv("VEZA_ROOT"), "config/logging.toml"),
}
for _, p := range paths {
if _, err := os.Stat(p); err == nil {
if _, err := toml.DecodeFile(p, cfg); err != nil {
fmt.Fprintf(os.Stderr, "warning: failed to parse %s: %v (using defaults)\n", p, err)
}
break
}
}
// Environment variable overrides (highest priority)
applyEnvOverrides(cfg)
return cfg
}
func defaultConfig() *LogConfig {
return &LogConfig{
Global: GlobalConfig{
Level: "INFO",
Dir: "/var/log/veza",
Format: "auto",
},
Rotation: RotationConfig{
MaxSizeMB: 100,
MaxBackups: 10,
MaxAgeDays: 30,
Compress: true,
RustRotation: "hourly",
RustMaxFiles: 5,
},
Backend: BackendConfig{
Module: "backend-api",
Modules: []string{"db", "rabbitmq"},
SlowRequestThresholdMs: 1000,
SamplingInitial: 100,
SamplingThereafter: 100,
BufferSizeKB: 256,
FlushIntervalMs: 100,
},
Stream: StreamConfig{
Module: "stream",
IncludeSource: true,
IncludeThreadIDs: true,
},
Frontend: FrontendConfig{
Level: "auto",
Endpoint: "/api/v1/logs/frontend",
},
Aggregation: LogAggregationConfig{
BatchSize: 100,
FlushInterval: 5,
Timeout: 10,
},
Permissions: PermissionsConfig{
DirMode: "0755",
FileMode: "0640",
},
}
}
func applyEnvOverrides(cfg *LogConfig) {
if v := os.Getenv("LOG_LEVEL"); v != "" {
cfg.Global.Level = v
}
if v := os.Getenv("LOG_DIR"); v != "" {
cfg.Global.Dir = v
}
if v := os.Getenv("LOG_FORMAT"); v != "" {
cfg.Global.Format = v
}
if v := os.Getenv("SLOW_REQUEST_THRESHOLD_MS"); v != "" {
if n, err := strconv.Atoi(v); err == nil {
cfg.Backend.SlowRequestThresholdMs = n
}
}
if v := os.Getenv("LOG_AGGREGATION_ENABLED"); v != "" {
cfg.Aggregation.Enabled = strings.EqualFold(v, "true") || v == "1"
}
if v := os.Getenv("LOG_AGGREGATION_ENDPOINT"); v != "" {
cfg.Aggregation.Endpoint = v
}
if v := os.Getenv("LOG_AGGREGATION_BATCH_SIZE"); v != "" {
if n, err := strconv.Atoi(v); err == nil {
cfg.Aggregation.BatchSize = n
}
}
if v := os.Getenv("LOG_AGGREGATION_FLUSH_INTERVAL"); v != "" {
if n, err := strconv.Atoi(v); err == nil {
cfg.Aggregation.FlushInterval = n
}
}
if v := os.Getenv("LOG_AGGREGATION_TIMEOUT"); v != "" {
if n, err := strconv.Atoi(v); err == nil {
cfg.Aggregation.Timeout = n
}
}
if v := os.Getenv("LOG_AGGREGATION_LABELS"); v != "" {
cfg.Aggregation.Labels = v
}
// Resolve "auto" format based on APP_ENV
if cfg.Global.Format == "auto" {
env := strings.ToLower(os.Getenv("APP_ENV"))
if env == "production" || env == "staging" {
cfg.Global.Format = "json"
} else {
cfg.Global.Format = "text"
}
}
}
// ResolveLogDir ensures the log directory exists and is writable.
// Falls back to ./logs if the configured directory is not accessible.
func (c *LogConfig) ResolveLogDir(env string) string {
dir := c.Global.Dir
// Try to create the directory
if err := os.MkdirAll(dir, 0755); err != nil {
// Fallback to local ./logs in development
if env == "development" || env == "dev" || env == "test" {
fallback := "./logs"
_ = os.MkdirAll(fallback, 0755)
return fallback
}
}
// Verify writable
testFile := filepath.Join(dir, ".write_test")
if f, err := os.Create(testFile); err != nil {
fallback := "./logs"
_ = os.MkdirAll(fallback, 0755)
return fallback
} else {
f.Close()
os.Remove(testFile)
}
return dir
}

View file

@ -201,10 +201,11 @@ func (el *EndpointLimiter) createEndpointLimit(
errorMessage string,
) gin.HandlerFunc {
return func(c *gin.Context) {
// SEC-011: Never bypass rate limiting in production
// SEC-011: Never bypass rate limiting in production.
// E2E: Completely disable in test environment (APP_ENV=test) to prevent flaky tests.
if os.Getenv("APP_ENV") == "production" {
// Continue to rate limit
} else if os.Getenv("DISABLE_RATE_LIMIT_FOR_TESTS") == "true" {
// Continue to rate limit — NEVER bypass in production
} else if os.Getenv("APP_ENV") == "test" || os.Getenv("DISABLE_RATE_LIMIT_FOR_TESTS") == "true" {
c.Next()
return
}

View file

@ -122,7 +122,7 @@ func DDoSRateLimitMiddleware(redisClient *redis.Client) gin.HandlerFunc {
c.Next()
return
}
if os.Getenv("DISABLE_RATE_LIMIT_FOR_TESTS") == "true" {
if os.Getenv("APP_ENV") == "test" || os.Getenv("DISABLE_RATE_LIMIT_FOR_TESTS") == "true" {
c.Next()
return
}
@ -229,9 +229,8 @@ func (rl *RateLimiter) RateLimitMiddleware() gin.HandlerFunc {
return
}
// P1.6: Use explicit DISABLE_RATE_LIMIT_FOR_TESTS flag instead of env-based bypass.
// Only test runners should set this. Never use in production.
if os.Getenv("DISABLE_RATE_LIMIT_FOR_TESTS") == "true" {
// P1.6: Bypass rate limiting in test environments. Never bypass in production.
if os.Getenv("APP_ENV") == "test" || os.Getenv("DISABLE_RATE_LIMIT_FOR_TESTS") == "true" {
c.Next()
return
}

View file

@ -73,10 +73,11 @@ func (rl *SimpleRateLimiter) Middleware() gin.HandlerFunc {
return
}
// SEC-011: Never bypass rate limiting in production
// SEC-011: Never bypass rate limiting in production.
// E2E: Completely disable in test environment (APP_ENV=test) to prevent flaky tests.
if os.Getenv("APP_ENV") == "production" {
// Continue to rate limit
} else if os.Getenv("DISABLE_RATE_LIMIT_FOR_TESTS") == "true" {
// Continue to rate limit — NEVER bypass in production
} else if os.Getenv("APP_ENV") == "test" || os.Getenv("DISABLE_RATE_LIMIT_FOR_TESTS") == "true" {
c.Next()
return
}

View file

@ -72,6 +72,18 @@ func ResponseCache(cfg ResponseCacheConfig) gin.HandlerFunc {
return
}
// Skip caching for cookie-authenticated requests (httpOnly auth cookies)
if _, err := c.Cookie("access_token"); err == nil {
c.Next()
return
}
// Skip caching for auth endpoints (must never serve cached user data)
if strings.Contains(c.Request.URL.Path, "/auth/") {
c.Next()
return
}
// Generate cache key from URL + query params
cacheKey := generateCacheKey(cfg.KeyPrefix, c.Request.URL.RequestURI())

View file

@ -86,7 +86,7 @@ func DefaultUploadConfig() *UploadConfig {
ClamAVEnabled: true,
ClamAVRequired: true, // MOD-P1-002: Par défaut, ClamAV est requis (fail-secure)
ClamAVAddress: "localhost:3310",
ClamAVAddress: "localhost:13310",
ClamAVClamdPath: "clamdscan",
QuarantineDir: "/quarantine",
}

View file

@ -15,6 +15,7 @@ import (
"go.uber.org/zap"
"gorm.io/gorm"
"gorm.io/gorm/clause"
gormlogger "gorm.io/gorm/logger"
)
// JobWorker gère les tâches en arrière-plan via une queue persistée en DB
@ -183,16 +184,15 @@ func (w *JobWorker) fetchAndProcessJob(ctx context.Context, workerID int) {
var job Job
// Transaction pour verrouiller le job (SELECT ... FOR UPDATE SKIP LOCKED)
// Compatible Postgres (et MySQL 8+). Pour SQLite, le locking est différent mais Gorm gère le basic.
err := w.db.Transaction(func(tx *gorm.DB) error {
// Trouver un job 'pending' ou 'failed' (si retry auto géré ici, mais on préfère 'pending' avec RunAt <= Now)
// On cherche status='pending' AND run_at <= NOW()
// Order by Priority ASC (1 first), then CreatedAt
// Silence GORM logger for this query — "record not found" is the normal case
// when no pending jobs exist, and it spams logs every second otherwise.
silentDB := w.db.Session(&gorm.Session{Logger: gormlogger.Discard})
err := silentDB.Transaction(func(tx *gorm.DB) error {
if err := tx.Clauses(clause.Locking{Strength: "UPDATE", Options: "SKIP LOCKED"}).
Where("status = ? AND run_at <= ?", "pending", time.Now()).
Order("priority ASC, created_at ASC").
First(&job).Error; err != nil {
return err // RecordNotFound est typique ici
return err // ErrRecordNotFound is the normal idle case
}
// Update status to 'processing'

View file

@ -0,0 +1,24 @@
-- Create products table (marketplace foundation)
-- Required before 095-099 which ADD COLUMN IF EXISTS on products
CREATE TABLE IF NOT EXISTS products (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
seller_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
title VARCHAR(255) NOT NULL,
description TEXT,
price DECIMAL(10,2) NOT NULL,
currency VARCHAR(3) DEFAULT 'EUR',
status VARCHAR(50) DEFAULT 'draft',
product_type VARCHAR(50) NOT NULL,
track_id UUID REFERENCES tracks(id) ON DELETE SET NULL,
license_type VARCHAR(50),
created_at TIMESTAMPTZ DEFAULT NOW(),
updated_at TIMESTAMPTZ DEFAULT NOW(),
deleted_at TIMESTAMPTZ
);
CREATE INDEX IF NOT EXISTS idx_products_seller_id ON products(seller_id);
CREATE INDEX IF NOT EXISTS idx_products_status ON products(status);
CREATE INDEX IF NOT EXISTS idx_products_product_type ON products(product_type);
CREATE INDEX IF NOT EXISTS idx_products_track_id ON products(track_id) WHERE track_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_products_deleted_at ON products(deleted_at);

View file

@ -0,0 +1,46 @@
-- Create orders and order_items tables (missing from prior migrations)
-- Required before 100_orders_discount.sql which ALTERs orders
CREATE TABLE IF NOT EXISTS orders (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
buyer_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
total_amount DECIMAL(10,2) NOT NULL,
currency VARCHAR(3) DEFAULT 'EUR',
status VARCHAR(50) DEFAULT 'pending',
payment_intent TEXT,
created_at TIMESTAMPTZ DEFAULT NOW(),
updated_at TIMESTAMPTZ DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_orders_buyer_id ON orders(buyer_id);
CREATE INDEX IF NOT EXISTS idx_orders_status ON orders(status);
CREATE INDEX IF NOT EXISTS idx_orders_created_at ON orders(created_at DESC);
CREATE TABLE IF NOT EXISTS order_items (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
order_id UUID NOT NULL REFERENCES orders(id) ON DELETE CASCADE,
product_id UUID NOT NULL REFERENCES products(id) ON DELETE CASCADE,
price DECIMAL(10,2) NOT NULL,
created_at TIMESTAMPTZ DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_order_items_order_id ON order_items(order_id);
CREATE INDEX IF NOT EXISTS idx_order_items_product_id ON order_items(product_id);
-- Licenses table (for purchased licenses)
CREATE TABLE IF NOT EXISTS licenses (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
buyer_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
track_id UUID NOT NULL REFERENCES tracks(id) ON DELETE CASCADE,
product_id UUID NOT NULL REFERENCES products(id) ON DELETE CASCADE,
order_id UUID NOT NULL REFERENCES orders(id) ON DELETE CASCADE,
type VARCHAR(50) NOT NULL,
rights JSONB,
downloads_left INT DEFAULT 3,
created_at TIMESTAMPTZ DEFAULT NOW(),
expires_at TIMESTAMPTZ,
revoked_at TIMESTAMPTZ
);
CREATE INDEX IF NOT EXISTS idx_licenses_buyer_id ON licenses(buyer_id);
CREATE INDEX IF NOT EXISTS idx_licenses_order_id ON licenses(order_id);

View file

@ -11,6 +11,6 @@ CREATE TABLE IF NOT EXISTS co_listening_sessions (
CREATE INDEX IF NOT EXISTS idx_co_listening_sessions_host_id ON co_listening_sessions(host_id);
CREATE INDEX IF NOT EXISTS idx_co_listening_sessions_track_id ON co_listening_sessions(track_id);
CREATE INDEX IF NOT EXISTS idx_co_listening_sessions_expires_at ON co_listening_sessions(expires_at) WHERE expires_at > NOW();
CREATE INDEX IF NOT EXISTS idx_co_listening_sessions_expires_at ON co_listening_sessions(expires_at);
COMMENT ON TABLE co_listening_sessions IS 'v0.10.7 F481: Sessions for synchronized co-listening playback';

View file

@ -1,96 +1,132 @@
-- v0.12.4: Performance & Scalabilité — Critical indexes for query optimization
-- Reference: ORIGIN_PERFORMANCE_TARGETS.md §8.4
-- v0.12.4: Performance & Scalabilite -- Critical indexes for query optimization
-- Reference: ORIGIN_PERFORMANCE_TARGETS.md S8.4
-- NOTE: All indexes use IF NOT EXISTS and DO $$ guards for missing tables
-- ============================================================================
-- USERS — Frequently queried by email, username, creation date
-- USERS
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
CREATE INDEX IF NOT EXISTS idx_users_username ON users(username);
CREATE INDEX IF NOT EXISTS idx_users_created_at ON users(created_at DESC);
-- ============================================================================
-- TRACKS — Core entity, heavy read traffic
-- TRACKS
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_tracks_creator_id ON tracks(creator_id);
CREATE INDEX IF NOT EXISTS idx_tracks_genre ON tracks(genre);
CREATE INDEX IF NOT EXISTS idx_tracks_created_at ON tracks(created_at DESC);
CREATE INDEX IF NOT EXISTS idx_tracks_status_created ON tracks(status, created_at DESC)
WHERE deleted_at IS NULL;
-- ============================================================================
-- MESSAGES — Chat performance (room+time range queries)
-- MESSAGES
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_messages_room_created ON messages(room_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_messages_sender_id ON messages(sender_id);
DO $$ BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema='public' AND table_name='messages') THEN
CREATE INDEX IF NOT EXISTS idx_messages_room_created ON messages(room_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_messages_sender_id ON messages(sender_id);
END IF;
END $$;
-- ============================================================================
-- PLAYLISTS — Discovery and user playlists
-- PLAYLISTS
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_playlists_creator_id ON playlists(creator_id);
CREATE INDEX IF NOT EXISTS idx_playlists_public_created ON playlists(is_public, created_at DESC)
WHERE deleted_at IS NULL;
DO $$ BEGIN
IF EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name='playlists' AND column_name='creator_id') THEN
CREATE INDEX IF NOT EXISTS idx_playlists_creator_id ON playlists(creator_id);
END IF;
IF EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name='playlists' AND column_name='is_public') THEN
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_playlists_public_created ON playlists(is_public, created_at DESC)';
END IF;
END $$;
-- ============================================================================
-- PLAYLIST TRACKS — Join table, ordering
-- PLAYLIST TRACKS
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_playlist_tracks_playlist ON playlist_tracks(playlist_id, position);
CREATE INDEX IF NOT EXISTS idx_playlist_tracks_track ON playlist_tracks(track_id);
DO $$ BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema='public' AND table_name='playlist_tracks') THEN
CREATE INDEX IF NOT EXISTS idx_playlist_tracks_playlist ON playlist_tracks(playlist_id, position);
CREATE INDEX IF NOT EXISTS idx_playlist_tracks_track ON playlist_tracks(track_id);
END IF;
END $$;
-- ============================================================================
-- FEED / SOCIAL — Follows, feed generation
-- FOLLOWS
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_follows_follower ON follows(follower_id);
CREATE INDEX IF NOT EXISTS idx_follows_following ON follows(following_id);
DO $$ BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema='public' AND table_name='follows') THEN
CREATE INDEX IF NOT EXISTS idx_follows_follower ON follows(follower_id);
CREATE INDEX IF NOT EXISTS idx_follows_following ON follows(followed_id);
END IF;
END $$;
-- ============================================================================
-- COMMENTS — Track comments and replies
-- COMMENTS
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_comments_track_created ON comments(track_id, created_at DESC)
WHERE deleted_at IS NULL;
CREATE INDEX IF NOT EXISTS idx_comments_user ON comments(user_id);
DO $$ BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema='public' AND table_name='comments') THEN
CREATE INDEX IF NOT EXISTS idx_comments_user ON comments(user_id);
END IF;
END $$;
-- ============================================================================
-- NOTIFICATIONS — User notification inbox
-- NOTIFICATIONS
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_notifications_user_created ON notifications(user_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_notifications_user_unread ON notifications(user_id, is_read)
WHERE is_read = false;
DO $$ BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema='public' AND table_name='notifications') THEN
CREATE INDEX IF NOT EXISTS idx_notifications_user_created ON notifications(user_id, created_at DESC);
END IF;
END $$;
-- ============================================================================
-- ANALYTICS — Creator dashboard queries
-- ANALYTICS
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_track_plays_track_date ON track_plays(track_id, played_at DESC);
CREATE INDEX IF NOT EXISTS idx_track_plays_user ON track_plays(user_id, played_at DESC);
DO $$ BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema='public' AND table_name='track_plays') THEN
CREATE INDEX IF NOT EXISTS idx_track_plays_track_date ON track_plays(track_id, played_at DESC);
CREATE INDEX IF NOT EXISTS idx_track_plays_user ON track_plays(user_id, played_at DESC);
END IF;
END $$;
-- ============================================================================
-- MARKETPLACE — Transactions and listings
-- MARKETPLACE (tables may not exist yet)
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_marketplace_listings_status ON marketplace_listings(status, created_at DESC)
WHERE deleted_at IS NULL;
CREATE INDEX IF NOT EXISTS idx_marketplace_transactions_buyer ON marketplace_transactions(buyer_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_marketplace_transactions_seller ON marketplace_transactions(seller_id, created_at DESC);
DO $$ BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema='public' AND table_name='marketplace_listings') THEN
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_marketplace_listings_status ON marketplace_listings(status, created_at DESC)';
END IF;
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema='public' AND table_name='marketplace_transactions') THEN
CREATE INDEX IF NOT EXISTS idx_marketplace_transactions_buyer ON marketplace_transactions(buyer_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_marketplace_transactions_seller ON marketplace_transactions(seller_id, created_at DESC);
END IF;
END $$;
-- ============================================================================
-- EDUCATION — Course discovery and enrollment
-- EDUCATION (tables may not exist yet)
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_courses_status_created ON courses(status, created_at DESC)
WHERE deleted_at IS NULL;
CREATE INDEX IF NOT EXISTS idx_course_enrollments_user ON course_enrollments(user_id, created_at DESC);
DO $$ BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema='public' AND table_name='courses') THEN
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_courses_status_created ON courses(status, created_at DESC)';
END IF;
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema='public' AND table_name='course_enrollments') THEN
CREATE INDEX IF NOT EXISTS idx_course_enrollments_user ON course_enrollments(user_id, created_at DESC);
END IF;
END $$;
-- ============================================================================
-- FULL-TEXT SEARCH (GIN indexes for PostgreSQL text search)
-- FULL-TEXT SEARCH
-- ============================================================================
CREATE INDEX IF NOT EXISTS idx_tracks_search_gin ON tracks
USING GIN(to_tsvector('english', COALESCE(title, '') || ' ' || COALESCE(description, '')));
CREATE INDEX IF NOT EXISTS idx_users_search_gin ON users
USING GIN(to_tsvector('english', COALESCE(username, '') || ' ' || COALESCE(display_name, '')));
DO $$ BEGIN
IF EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name='tracks' AND column_name='title') THEN
CREATE INDEX IF NOT EXISTS idx_tracks_search_gin ON tracks
USING GIN(to_tsvector('english', COALESCE(title, '') || ' ' || COALESCE(description, '')));
END IF;
IF EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name='users' AND column_name='display_name') THEN
CREATE INDEX IF NOT EXISTS idx_users_search_gin ON users
USING GIN(to_tsvector('english', COALESCE(username, '') || ' ' || COALESCE(display_name, '')));
END IF;
END $$;
-- ============================================================================
-- STATISTICS — Update planner statistics for optimal query plans
-- STATISTICS
-- ============================================================================
ANALYZE users;
ANALYZE tracks;
ANALYZE messages;
ANALYZE playlists;
ANALYZE follows;
ANALYZE notifications;

View file

@ -22,4 +22,4 @@ ALLOWED_ORIGINS=https://veza.fr,https://app.veza.fr
# Storage
AUDIO_DIR=audio
STREAM_PORT=3002
STREAM_PORT=18082

View file

@ -37,7 +37,8 @@ RUN apk --no-cache add ca-certificates tzdata && \
# Create non-root user for security
RUN addgroup -g 1001 -S app && \
adduser -S app -u 1001 -G app -h /app -s /bin/sh
adduser -S app -u 1001 -G app -h /app -s /bin/sh && \
mkdir -p /var/log/veza && chown app:app /var/log/veza
# Set working directory
WORKDIR /app
@ -51,12 +52,12 @@ COPY --from=builder --chown=app:app /build/veza-stream-server/target/x86_64-unkn
# Switch to app user
USER app
# Expose port
EXPOSE 8082
# Expose port (matches VITE_STREAM_PORT and docker-compose mapping)
EXPOSE 18082
# Health check
HEALTHCHECK --interval=30s --timeout=15s --start-period=60s --retries=5 \
CMD wget --no-verbose --tries=1 --spider http://localhost:8082/health || exit 1
CMD wget --no-verbose --tries=1 --spider http://localhost:${PORT:-18082}/health || exit 1
# Run the application
CMD ["./stream_server"]

View file

@ -283,8 +283,8 @@ impl Default for AudioProcessor {
// Valeurs par défaut minimales si from_env échoue
Config {
secret_key: "default_secret_key_for_testing_only".to_string(),
port: 8082,
backend_url: "http://backend-api:8080".to_string(),
port: 18082,
backend_url: "http://backend-api:18080".to_string(),
audio_dir: "./audio".to_string(),
allowed_origins: vec!["*".to_string()],
max_file_size: 100 * 1024 * 1024,

View file

@ -14,7 +14,7 @@ use uuid::Uuid;
#[command(author, version, about, long_about = None)]
struct Args {
/// WebSocket URL to connect to
#[arg(short, long, default_value = "ws://localhost:8082/ws")]
#[arg(short, long, default_value = "ws://localhost:18082/ws")]
url: String,
/// Number of simulated clients

View file

@ -220,8 +220,8 @@ impl Default for Config {
{
Self {
secret_key: "test_secret_key_minimum_32_characters_long".to_string(),
port: 3002,
backend_url: "http://localhost:8080".to_string(),
port: 18082,
backend_url: "http://localhost:18080".to_string(),
audio_dir: "./audio".to_string(),
allowed_origins: vec!["*".to_string()],
max_file_size: 104857600,
@ -331,14 +331,14 @@ impl Config {
let config = Self {
secret_key,
// CONFIGURATION PORT UNIFIÉE - Port 3002 selon guide déploiement
// CONFIGURATION PORT UNIFIÉE - Port 18082 aligné sur VITE_STREAM_PORT et docker-compose
port: env::var("STREAM_PORT")
.or_else(|_| env::var("PORT"))
.unwrap_or_else(|_| "3002".to_string())
.unwrap_or_else(|_| "18082".to_string())
.parse()
.map_err(|_| ConfigError::InvalidPort)?,
backend_url: env::var("BACKEND_URL")
.unwrap_or_else(|_| "http://backend-api:8080".to_string()),
.unwrap_or_else(|_| "http://backend-api:18080".to_string()),
audio_dir: env::var("AUDIO_DIR").unwrap_or_else(|_| "./audio".to_string()),
allowed_origins: env::var("ALLOWED_ORIGINS")
.unwrap_or_else(|_| "*".to_string())

View file

@ -115,17 +115,17 @@ pub fn create_routes(
}
}),
)
.route("/stream/:filename", get(stream_audio))
.route("/stream/{filename}", get(stream_audio))
.route("/internal/jobs/transcode", post(internal_transcode_handler))
// Routes de transcodage HLS
.route("/v1/stream/transcode", post(transcode_routes::transcode_handler))
.route("/v1/stream/job/:id", get(transcode_routes::get_job_status))
.route("/api/streams/jobs/:id/status", get(transcode_routes::get_job_status_detailed))
.route("/v1/stream/job/{id}", get(transcode_routes::get_job_status))
.route("/api/streams/jobs/{id}/status", get(transcode_routes::get_job_status_detailed))
// Routes HLS transcode protégées par JWT (A01 - audit sécurité)
.merge(
Router::new()
.route("/v1/stream/hls/:job_id/index.m3u8", get(transcode_routes::serve_hls_manifest))
.route("/v1/stream/hls/:job_id/:segment", get(transcode_routes::serve_hls_segment))
.route("/v1/stream/hls/{job_id}/index.m3u8", get(transcode_routes::serve_hls_manifest))
.route("/v1/stream/hls/{job_id}/{segment}", get(transcode_routes::serve_hls_segment))
.route_layer(axum::middleware::from_fn_with_state(
state.clone(),
auth::hls_auth_middleware,
@ -137,15 +137,15 @@ pub fn create_routes(
.merge(
Router::new()
.route(
"/hls/:track_id/master.m3u8",
"/hls/{track_id}/master.m3u8",
get(hls_master_playlist_wrapper),
)
.route(
"/hls/:track_id/:quality/playlist.m3u8",
"/hls/{track_id}/{quality}/playlist.m3u8",
get(hls_quality_playlist_wrapper),
)
.route(
"/hls/:track_id/:quality/:segment",
"/hls/{track_id}/{quality}/{segment}",
get(hls_segment_wrapper),
)
.route_layer(axum::middleware::from_fn_with_state(

View file

@ -86,8 +86,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let app = Router::new()
.route("/", get(root))
.route("/health", get(health_check))
.route("/stream/:filename", get(stream_audio))
.route("/info/:filename", get(audio_info))
.route("/stream/{filename}", get(stream_audio))
.route("/info/{filename}", get(audio_info))
.route("/list", get(list_audio_files))
.layer(CompressionLayer::new())
.layer(cors)