veza/veza-backend-api/tests/load/upload_load_test.go
senke a1000ce7fb style(backend): gofmt -w on 85 files (whitespace only)
backend-ci.yml's `test -z "$(gofmt -l .)"` strict gate (added in
13c21ac11) failed on a backlog of unformatted files. None of the
85 files in this commit had been edited since the gate was added
because no push touched veza-backend-api/** in between, so the
gate never fired until today's CI fixes triggered it.

The diff is exclusively whitespace alignment in struct literals
and trailing-space comments. `go build ./...` and the full test
suite (with VEZA_SKIP_INTEGRATION=1 -short) pass identically.
2026-04-14 12:22:14 +02:00

563 lines
15 KiB
Go

//go:build load
// +build load
package load
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
"os"
"sync"
"testing"
"time"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"veza-backend-api/internal/core/track"
"veza-backend-api/internal/models"
"veza-backend-api/internal/services"
)
// setupLoadTestRouter crée un router de test pour les tests de charge
func setupLoadTestRouter(t *testing.T) (*gin.Engine, *gorm.DB, *redis.Client, func()) {
gin.SetMode(gin.TestMode)
logger := zaptest.NewLogger(t)
// Setup in-memory SQLite database
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
db.Exec("PRAGMA foreign_keys = ON")
// Auto-migrate models
err = db.AutoMigrate(
&models.User{},
&models.Track{},
&models.Role{},
&models.UserRole{},
&models.Permission{},
)
require.NoError(t, err)
// Setup Redis (use REDIS_ADDR or REDIS_TEST_URL; default localhost:6379)
redisAddr := os.Getenv("REDIS_ADDR")
if redisAddr == "" {
if u := os.Getenv("REDIS_TEST_URL"); u != "" {
if opts, err := redis.ParseURL(u); err == nil {
redisAddr = opts.Addr
}
}
if redisAddr == "" {
redisAddr = "localhost:6379"
}
}
redisClient := redis.NewClient(&redis.Options{
Addr: redisAddr,
DB: 15, // Use DB 15 for tests
})
ctx := context.Background()
_, err = redisClient.Ping(ctx).Result()
if err != nil {
t.Logf("Redis not available, skipping Redis-dependent tests: %v", err)
redisClient = nil
} else {
// Cleanup Redis
redisClient.FlushDB(ctx)
t.Cleanup(func() {
if redisClient != nil {
redisClient.FlushDB(ctx)
redisClient.Close()
}
})
}
// Setup services
uploadDir := t.TempDir()
chunksDir := uploadDir + "/chunks"
os.MkdirAll(chunksDir, 0755)
trackService := track.NewTrackService(db, logger, uploadDir)
trackUploadService := services.NewTrackUploadService(db, logger)
chunkService := services.NewTrackChunkService(chunksDir, redisClient, logger)
likeService := services.NewTrackLikeService(db, logger)
streamService := services.NewStreamService("http://localhost:8082", logger)
trackHandler := track.NewTrackHandler(trackService, trackUploadService, chunkService, likeService, streamService)
// Setup UploadValidator (disable ClamAV for load tests)
uploadConfig := &services.UploadConfig{
ClamAVEnabled: false,
ClamAVRequired: false,
MaxAudioSize: 100 * 1024 * 1024, // 100MB
AllowedAudioTypes: []string{"audio/mpeg", "audio/flac", "audio/wav", "audio/ogg"},
}
uploadValidator, err := services.NewUploadValidator(uploadConfig, logger)
require.NoError(t, err)
trackHandler.SetUploadValidator(uploadValidator)
// Create router
router := gin.New()
// Mock authentication middleware
router.Use(func(c *gin.Context) {
userIDStr := c.GetHeader("X-User-ID")
if userIDStr != "" {
uid, err := uuid.Parse(userIDStr)
if err == nil {
c.Set("user_id", uid)
}
}
c.Next()
})
// Upload routes
tracksGroup := router.Group("/api/v1/tracks")
{
tracksGroup.POST("", trackHandler.UploadTrack)
tracksGroup.POST("/initiate", trackHandler.InitiateChunkedUpload)
tracksGroup.POST("/chunk", trackHandler.UploadChunk)
tracksGroup.POST("/complete", trackHandler.CompleteChunkedUpload)
tracksGroup.GET("/:id/status", trackHandler.GetUploadStatus)
}
cleanup := func() {
// Cleanup handled by t.TempDir() and t.Cleanup
}
return router, db, redisClient, cleanup
}
// createTestFile crée un fichier de test
func createTestFile(t *testing.T, size int) *bytes.Buffer {
data := make([]byte, size)
for i := range data {
data[i] = byte(i % 256)
}
return bytes.NewBuffer(data)
}
// createMultipartFormData crée un multipart form avec un fichier
func createMultipartFormData(t *testing.T, fieldName, filename string, fileData *bytes.Buffer) ([]byte, string) {
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, err := writer.CreateFormFile(fieldName, filename)
require.NoError(t, err)
_, err = io.Copy(part, fileData)
require.NoError(t, err)
writer.Close()
return buf.Bytes(), writer.FormDataContentType()
}
// TestLoad_ConcurrentSimpleUploads teste les uploads simples concurrents
func TestLoad_ConcurrentSimpleUploads(t *testing.T) {
if testing.Short() {
t.Skip("Skipping load test in short mode")
}
router, db, _, cleanup := setupLoadTestRouter(t)
defer cleanup()
// Create test users
numUsers := 10
users := make([]uuid.UUID, numUsers)
for i := 0; i < numUsers; i++ {
userID := uuid.New()
users[i] = userID
user := &models.User{
ID: userID,
Email: fmt.Sprintf("user%d@example.com", i),
Username: fmt.Sprintf("user%d", i),
}
db.Create(user)
}
// Test parameters
concurrentUploads := 20
fileSize := 1024 * 100 // 100KB per file
var wg sync.WaitGroup
var mu sync.Mutex
successCount := 0
errorCount := 0
var durations []time.Duration
startTime := time.Now()
// Launch concurrent uploads
for i := 0; i < concurrentUploads; i++ {
wg.Add(1)
go func(index int) {
defer wg.Done()
userID := users[index%numUsers]
fileData := createTestFile(t, fileSize)
body, contentType := createMultipartFormData(t, "file", fmt.Sprintf("test%d.mp3", index), fileData)
req := httptest.NewRequest("POST", "/api/v1/tracks", bytes.NewBuffer(body))
req.Header.Set("Content-Type", contentType)
req.Header.Set("X-User-ID", userID.String())
req.Header.Set("X-Content-Creator-Role", "true") // Mock role
// Add form fields
req.ParseMultipartForm(10 << 20)
req.Form.Set("title", fmt.Sprintf("Test Track %d", index))
req.Form.Set("artist", "Test Artist")
req.Form.Set("is_public", "true")
reqStart := time.Now()
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
duration := time.Since(reqStart)
mu.Lock()
if w.Code == http.StatusAccepted || w.Code == http.StatusCreated {
successCount++
} else {
errorCount++
t.Logf("Upload %d failed with status %d: %s", index, w.Code, w.Body.String())
}
durations = append(durations, duration)
mu.Unlock()
}(i)
}
wg.Wait()
totalTime := time.Since(startTime)
// Calculate statistics
avgDuration := time.Duration(0)
for _, d := range durations {
avgDuration += d
}
if len(durations) > 0 {
avgDuration /= time.Duration(len(durations))
}
t.Logf("Concurrent Uploads Test Results:")
t.Logf(" Total uploads: %d", concurrentUploads)
t.Logf(" Successful: %d", successCount)
t.Logf(" Failed: %d", errorCount)
t.Logf(" Total time: %v", totalTime)
t.Logf(" Average response time: %v", avgDuration)
t.Logf(" Throughput: %.2f uploads/second", float64(concurrentUploads)/totalTime.Seconds())
assert.Greater(t, successCount, concurrentUploads*8/10, "At least 80% of uploads should succeed")
assert.Less(t, avgDuration, 5*time.Second, "Average response time should be less than 5 seconds")
}
// TestLoad_ConcurrentChunkedUploads teste les uploads par chunks concurrents
func TestLoad_ConcurrentChunkedUploads(t *testing.T) {
if testing.Short() {
t.Skip("Skipping load test in short mode")
}
router, db, redisClient, cleanup := setupLoadTestRouter(t)
defer cleanup()
if redisClient == nil {
t.Skip("Redis not available, skipping chunked upload load test")
}
// Create test user
userID := uuid.New()
user := &models.User{
ID: userID,
Email: "test@example.com",
Username: "testuser",
}
db.Create(user)
// Test parameters
concurrentUploads := 5
chunksPerUpload := 10
chunkSize := 1024 * 50 // 50KB per chunk
var wg sync.WaitGroup
var mu sync.Mutex
successCount := 0
errorCount := 0
uploadIDs := make([]string, concurrentUploads)
startTime := time.Now()
// Phase 1: Initiate all uploads
for i := 0; i < concurrentUploads; i++ {
wg.Add(1)
go func(index int) {
defer wg.Done()
reqBody := map[string]interface{}{
"total_chunks": chunksPerUpload,
"total_size": int64(chunksPerUpload * chunkSize),
"filename": fmt.Sprintf("test%d.mp3", index),
}
body, _ := json.Marshal(reqBody)
req := httptest.NewRequest("POST", "/api/v1/tracks/initiate", bytes.NewBuffer(body))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-User-ID", userID.String())
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
mu.Lock()
if w.Code == http.StatusOK {
var response map[string]interface{}
json.Unmarshal(w.Body.Bytes(), &response)
if data, ok := response["data"].(map[string]interface{}); ok {
if uploadID, ok := data["upload_id"].(string); ok {
uploadIDs[index] = uploadID
successCount++
}
}
} else {
errorCount++
}
mu.Unlock()
}(i)
}
wg.Wait()
// Phase 2: Upload chunks concurrently
var chunkWg sync.WaitGroup
chunkSuccessCount := 0
chunkErrorCount := 0
for uploadIndex, uploadID := range uploadIDs {
if uploadID == "" {
continue
}
for chunkNum := 1; chunkNum <= chunksPerUpload; chunkNum++ {
chunkWg.Add(1)
go func(uploadIdx int, uploadID string, chunkNum int) {
defer chunkWg.Done()
chunkData := createTestFile(t, chunkSize)
body, contentType := createMultipartFormData(t, "chunk", fmt.Sprintf("chunk%d", chunkNum), chunkData)
req := httptest.NewRequest("POST", "/api/v1/tracks/chunk", bytes.NewBuffer(body))
req.Header.Set("Content-Type", contentType)
req.Header.Set("X-User-ID", userID.String())
req.ParseMultipartForm(10 << 20)
req.Form.Set("upload_id", uploadID)
req.Form.Set("chunk_number", fmt.Sprintf("%d", chunkNum))
req.Form.Set("total_chunks", fmt.Sprintf("%d", chunksPerUpload))
req.Form.Set("total_size", fmt.Sprintf("%d", chunksPerUpload*chunkSize))
req.Form.Set("filename", fmt.Sprintf("test%d.mp3", uploadIdx))
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
mu.Lock()
if w.Code == http.StatusOK {
chunkSuccessCount++
} else {
chunkErrorCount++
}
mu.Unlock()
}(uploadIndex, uploadID, chunkNum)
}
}
chunkWg.Wait()
totalTime := time.Since(startTime)
t.Logf("Concurrent Chunked Uploads Test Results:")
t.Logf(" Total uploads initiated: %d", concurrentUploads)
t.Logf(" Successful initiations: %d", successCount)
t.Logf(" Total chunks uploaded: %d", chunkSuccessCount)
t.Logf(" Failed chunks: %d", chunkErrorCount)
t.Logf(" Total time: %v", totalTime)
t.Logf(" Throughput: %.2f chunks/second", float64(chunkSuccessCount)/totalTime.Seconds())
assert.Greater(t, successCount, concurrentUploads*8/10, "At least 80% of upload initiations should succeed")
assert.Greater(t, chunkSuccessCount, concurrentUploads*chunksPerUpload*8/10, "At least 80% of chunks should upload successfully")
}
// TestLoad_ChunkedUploadStress teste le stress des uploads par chunks
func TestLoad_ChunkedUploadStress(t *testing.T) {
if testing.Short() {
t.Skip("Skipping load test in short mode")
}
router, db, redisClient, cleanup := setupLoadTestRouter(t)
defer cleanup()
if redisClient == nil {
t.Skip("Redis not available, skipping chunked upload stress test")
}
// Create test user
userID := uuid.New()
user := &models.User{
ID: userID,
Email: "test@example.com",
Username: "testuser",
}
db.Create(user)
// Test parameters - more aggressive
numUploads := 10
chunksPerUpload := 20
chunkSize := 1024 * 100 // 100KB per chunk
var wg sync.WaitGroup
var mu sync.Mutex
uploadIDs := make([]string, numUploads)
startTime := time.Now()
// Initiate uploads
for i := 0; i < numUploads; i++ {
reqBody := map[string]interface{}{
"total_chunks": chunksPerUpload,
"total_size": int64(chunksPerUpload * chunkSize),
"filename": fmt.Sprintf("stress_test_%d.mp3", i),
}
body, _ := json.Marshal(reqBody)
req := httptest.NewRequest("POST", "/api/v1/tracks/initiate", bytes.NewBuffer(body))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-User-ID", userID.String())
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
if w.Code == http.StatusOK {
var response map[string]interface{}
json.Unmarshal(w.Body.Bytes(), &response)
if data, ok := response["data"].(map[string]interface{}); ok {
if uploadID, ok := data["upload_id"].(string); ok {
uploadIDs[i] = uploadID
}
}
}
}
// Upload all chunks concurrently (stress test)
for uploadIndex, uploadID := range uploadIDs {
if uploadID == "" {
continue
}
for chunkNum := 1; chunkNum <= chunksPerUpload; chunkNum++ {
wg.Add(1)
go func(uploadIdx int, uploadID string, chunkNum int) {
defer wg.Done()
chunkData := createTestFile(t, chunkSize)
body, contentType := createMultipartFormData(t, "chunk", fmt.Sprintf("chunk%d", chunkNum), chunkData)
req := httptest.NewRequest("POST", "/api/v1/tracks/chunk", bytes.NewBuffer(body))
req.Header.Set("Content-Type", contentType)
req.Header.Set("X-User-ID", userID.String())
req.ParseMultipartForm(10 << 20)
req.Form.Set("upload_id", uploadID)
req.Form.Set("chunk_number", fmt.Sprintf("%d", chunkNum))
req.Form.Set("total_chunks", fmt.Sprintf("%d", chunksPerUpload))
req.Form.Set("total_size", fmt.Sprintf("%d", chunksPerUpload*chunkSize))
req.Form.Set("filename", fmt.Sprintf("stress_test_%d.mp3", uploadIdx))
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
mu.Lock()
// Count successes/failures if needed
mu.Unlock()
}(uploadIndex, uploadID, chunkNum)
}
}
wg.Wait()
totalTime := time.Since(startTime)
totalChunks := numUploads * chunksPerUpload
t.Logf("Chunked Upload Stress Test Results:")
t.Logf(" Total uploads: %d", numUploads)
t.Logf(" Chunks per upload: %d", chunksPerUpload)
t.Logf(" Total chunks: %d", totalChunks)
t.Logf(" Total time: %v", totalTime)
t.Logf(" Throughput: %.2f chunks/second", float64(totalChunks)/totalTime.Seconds())
assert.Less(t, totalTime, 60*time.Second, "Stress test should complete within 60 seconds")
}
// TestLoad_UploadStatusPolling teste le polling du statut d'upload sous charge
func TestLoad_UploadStatusPolling(t *testing.T) {
if testing.Short() {
t.Skip("Skipping load test in short mode")
}
router, db, _, cleanup := setupLoadTestRouter(t)
defer cleanup()
// Create test user and track
userID := uuid.New()
user := &models.User{
ID: userID,
Email: "test@example.com",
Username: "testuser",
}
db.Create(user)
trackID := uuid.New()
track := &models.Track{
ID: trackID,
UserID: userID,
Title: "Test Track",
Status: models.TrackStatusUploading,
}
db.Create(track)
// Test parameters
concurrentPolls := 50
var wg sync.WaitGroup
var mu sync.Mutex
successCount := 0
startTime := time.Now()
for i := 0; i < concurrentPolls; i++ {
wg.Add(1)
go func() {
defer wg.Done()
req := httptest.NewRequest("GET", fmt.Sprintf("/api/v1/tracks/%s/status", trackID), nil)
req.Header.Set("X-User-ID", userID.String())
w := httptest.NewRecorder()
router.ServeHTTP(w, req)
mu.Lock()
if w.Code == http.StatusOK {
successCount++
}
mu.Unlock()
}()
}
wg.Wait()
totalTime := time.Since(startTime)
t.Logf("Upload Status Polling Test Results:")
t.Logf(" Total polls: %d", concurrentPolls)
t.Logf(" Successful: %d", successCount)
t.Logf(" Total time: %v", totalTime)
t.Logf(" Throughput: %.2f polls/second", float64(concurrentPolls)/totalTime.Seconds())
assert.Equal(t, concurrentPolls, successCount, "All status polls should succeed")
assert.Less(t, totalTime, 5*time.Second, "Status polling should complete quickly")
}