veza/veza-backend-api/internal/workers/playback_analytics_worker_test.go
2025-12-03 20:29:37 +01:00

451 lines
10 KiB
Go

package workers
import (
"context"
"github.com/google/uuid"
"testing"
"time"
"veza-backend-api/internal/models"
"veza-backend-api/internal/services"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
func setupTestDBForWorker(t *testing.T) *gorm.DB {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
// Migrer les modèles nécessaires
err = db.AutoMigrate(
&models.Track{},
&models.PlaybackAnalytics{},
)
require.NoError(t, err)
return db
}
func TestNewPlaybackAnalyticsWorker(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
1000,
3,
3,
100,
5*time.Second,
)
assert.NotNil(t, worker)
assert.Equal(t, db, worker.db)
assert.Equal(t, analyticsService, worker.analyticsService)
assert.Equal(t, 1000, cap(worker.queue))
assert.Equal(t, 3, worker.processingWorkers)
assert.Equal(t, 3, worker.maxRetries)
assert.Equal(t, 100, worker.batchSize)
assert.Equal(t, 5*time.Second, worker.batchTimeout)
assert.False(t, worker.IsRunning())
}
func TestNewPlaybackAnalyticsWorker_DefaultValues(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
0, // Utiliser les valeurs par défaut
0,
0,
0,
0,
)
assert.NotNil(t, worker)
assert.Equal(t, 1000, cap(worker.queue))
assert.Equal(t, 3, worker.processingWorkers)
assert.Equal(t, 3, worker.maxRetries)
assert.Equal(t, 100, worker.batchSize)
assert.Equal(t, 5*time.Second, worker.batchTimeout)
}
func TestPlaybackAnalyticsWorker_Enqueue(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
10,
1,
3,
5,
1*time.Second,
)
analytics := &models.PlaybackAnalytics{
TrackID: uuid.New(),
UserID: uuid.New(),
PlayTime: 180,
PauseCount: 2,
SeekCount: 1,
CompletionRate: 75.0,
StartedAt: time.Now(),
}
// Enqueue un job
err := worker.Enqueue(analytics, 1)
require.NoError(t, err)
assert.Equal(t, 1, worker.GetQueueSize())
// Enqueue avec analytics nil devrait échouer
err = worker.Enqueue(nil, 1)
assert.Error(t, err)
}
func TestPlaybackAnalyticsWorker_EnqueueBatch(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
100,
1,
3,
10,
1*time.Second,
)
// Créer plusieurs analytics
analyticsList := make([]*models.PlaybackAnalytics, 5)
for i := 0; i < 5; i++ {
analyticsList[i] = &models.PlaybackAnalytics{
TrackID: uuid.New(),
UserID: uuid.New(),
PlayTime: 180,
CompletionRate: 75.0,
StartedAt: time.Now(),
}
}
// Enqueue le batch
err := worker.EnqueueBatch(analyticsList, 1)
require.NoError(t, err)
assert.Equal(t, 5, worker.GetQueueSize())
// Enqueue avec liste vide devrait échouer
err = worker.EnqueueBatch([]*models.PlaybackAnalytics{}, 1)
assert.Error(t, err)
}
func TestPlaybackAnalyticsWorker_StartStop(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
10,
2,
3,
5,
100*time.Millisecond,
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Démarrer le worker
worker.Start(ctx)
// Attendre un peu pour que les workers démarrent
time.Sleep(50 * time.Millisecond)
assert.True(t, worker.IsRunning())
// Arrêter le worker
worker.Stop()
// Attendre un peu pour que les workers s'arrêtent
time.Sleep(100 * time.Millisecond)
// Le worker devrait être arrêté
assert.False(t, worker.IsRunning())
}
func TestPlaybackAnalyticsWorker_ProcessBatch(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
// Créer un track
trackID := uuid.New()
track := &models.Track{
ID: trackID,
Title: "Test Track",
Duration: 180,
}
require.NoError(t, db.Create(track).Error)
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
10,
1,
3,
5,
100*time.Millisecond,
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Démarrer le worker
worker.Start(ctx)
defer worker.Stop()
// Créer et enqueue des analytics
analyticsList := make([]*models.PlaybackAnalytics, 3)
for i := 0; i < 3; i++ {
analyticsList[i] = &models.PlaybackAnalytics{
TrackID: trackID,
UserID: uuid.New(),
PlayTime: 180,
CompletionRate: 100.0,
StartedAt: time.Now(),
}
require.NoError(t, worker.Enqueue(analyticsList[i], 1))
}
// Attendre que le batch soit traité
time.Sleep(500 * time.Millisecond)
// Vérifier que les analytics ont été enregistrés
var count int64
require.NoError(t, db.Model(&models.PlaybackAnalytics{}).Count(&count).Error)
assert.GreaterOrEqual(t, count, int64(3))
}
func TestPlaybackAnalyticsWorker_CollectBatch(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
10,
1,
3,
5,
200*time.Millisecond,
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Enqueue quelques jobs
for i := 0; i < 3; i++ {
analytics := &models.PlaybackAnalytics{
TrackID: uuid.New(),
UserID: uuid.New(),
PlayTime: 180,
CompletionRate: 75.0,
StartedAt: time.Now(),
}
require.NoError(t, worker.Enqueue(analytics, 1))
}
// Collecter un batch
batch := worker.collectBatch(ctx, 0)
assert.GreaterOrEqual(t, len(batch.Jobs), 3)
assert.NotZero(t, batch.CreatedAt)
}
func TestPlaybackAnalyticsWorker_CollectBatch_Timeout(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
10,
1,
3,
10, // Batch size plus grand que le nombre de jobs
100*time.Millisecond, // Timeout court
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Enqueue un seul job
analytics := &models.PlaybackAnalytics{
TrackID: uuid.New(),
UserID: uuid.New(),
PlayTime: 180,
CompletionRate: 75.0,
StartedAt: time.Now(),
}
require.NoError(t, worker.Enqueue(analytics, 1))
// Collecter un batch (devrait timeout et retourner le job)
batch := worker.collectBatch(ctx, 0)
assert.Equal(t, 1, len(batch.Jobs))
}
func TestPlaybackAnalyticsWorker_GetStats(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
100,
3,
5,
50,
10*time.Second,
)
stats := worker.GetStats()
assert.False(t, stats.Running)
assert.Equal(t, 0, stats.QueueSize)
assert.Equal(t, 3, stats.Workers)
assert.Equal(t, 5, stats.MaxRetries)
assert.Equal(t, 50, stats.BatchSize)
assert.Equal(t, 10*time.Second, stats.BatchTimeout)
// Enqueue quelques jobs
for i := 0; i < 5; i++ {
analytics := &models.PlaybackAnalytics{
TrackID: uuid.New(),
UserID: uuid.New(),
PlayTime: 180,
CompletionRate: 75.0,
StartedAt: time.Now(),
}
require.NoError(t, worker.Enqueue(analytics, 1))
}
stats = worker.GetStats()
assert.Equal(t, 5, stats.QueueSize)
}
func TestPlaybackAnalyticsWorker_RetryFailedJobs(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
10,
1,
3,
5,
100*time.Millisecond,
)
// Créer des jobs avec retries
jobs := make([]AnalyticsJob, 3)
for i := 0; i < 3; i++ {
jobs[i] = AnalyticsJob{
ID: uuid.New(),
Analytics: &models.PlaybackAnalytics{
TrackID: uuid.New(),
UserID: uuid.New(),
PlayTime: 180,
CompletionRate: 75.0,
StartedAt: time.Now(),
},
Retries: i, // Premier job: 0 retries, deuxième: 1, troisième: 2
}
}
ctx := context.Background()
// Retry les jobs (le troisième devrait être drop car retries >= maxRetries)
worker.retryFailedJobs(ctx, jobs, assert.AnError, 0)
// Vérifier que les jobs ont été re-enqueued (sauf celui qui a dépassé maxRetries)
// Le troisième job a 2 retries, donc après incrémentation il aura 3, ce qui est >= maxRetries (3)
// Donc seulement les 2 premiers devraient être re-enqueued
time.Sleep(100 * time.Millisecond)
// La queue devrait contenir au moins les 2 premiers jobs
assert.GreaterOrEqual(t, worker.GetQueueSize(), 2)
}
func TestPlaybackAnalyticsWorker_QueueFull(t *testing.T) {
db := setupTestDBForWorker(t)
logger := zap.NewNop()
analyticsService := services.NewPlaybackAnalyticsService(db, logger)
// Créer un worker avec une queue très petite
worker := NewPlaybackAnalyticsWorker(
db,
analyticsService,
logger,
2, // Queue de taille 2
1,
3,
5,
1*time.Second,
)
// Remplir la queue
for i := 0; i < 2; i++ {
analytics := &models.PlaybackAnalytics{
TrackID: uuid.New(),
UserID: uuid.New(),
PlayTime: 180,
CompletionRate: 75.0,
StartedAt: time.Now(),
}
require.NoError(t, worker.Enqueue(analytics, 1))
}
// Essayer d'enqueue un autre job (devrait échouer car queue pleine)
analytics := &models.PlaybackAnalytics{
TrackID: uuid.New(),
UserID: uuid.New(),
PlayTime: 180,
CompletionRate: 75.0,
StartedAt: time.Now(),
}
err := worker.Enqueue(analytics, 1)
assert.Error(t, err)
assert.Contains(t, err.Error(), "queue is full")
}