267 lines
8.7 KiB
Go
267 lines
8.7 KiB
Go
package services
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"veza-backend-api/internal/models"
|
|
|
|
"github.com/google/uuid"
|
|
|
|
"go.uber.org/zap"
|
|
"gorm.io/gorm"
|
|
)
|
|
|
|
// BitrateAdaptationService gère l'adaptation de bitrate pour le streaming
|
|
// T0348: Create Bitrate Adaptation Service
|
|
type BitrateAdaptationService struct {
|
|
db *gorm.DB
|
|
bandwidthService *BandwidthDetectionService
|
|
logger *zap.Logger
|
|
}
|
|
|
|
// NewBitrateAdaptationService crée un nouveau service d'adaptation de bitrate
|
|
func NewBitrateAdaptationService(db *gorm.DB, bandwidthService *BandwidthDetectionService, logger *zap.Logger) *BitrateAdaptationService {
|
|
if logger == nil {
|
|
logger = zap.NewNop()
|
|
}
|
|
return &BitrateAdaptationService{
|
|
db: db,
|
|
bandwidthService: bandwidthService,
|
|
logger: logger,
|
|
}
|
|
}
|
|
|
|
// AdaptBitrate adapte le bitrate en fonction de la bande passante et du niveau de buffer
|
|
// trackID: ID de la piste audio
|
|
// userID: ID de l'utilisateur
|
|
// currentBitrate: bitrate actuel en kbps
|
|
// bandwidth: bande passante disponible en bps
|
|
// bufferLevel: niveau de buffer (0.0 à 1.0)
|
|
// Retourne le nouveau bitrate recommandé en kbps
|
|
// MIGRATION UUID: userID est maintenant int64
|
|
// MIGRATION UUID: userID migré vers uuid.UUID
|
|
func (s *BitrateAdaptationService) AdaptBitrate(ctx context.Context, trackID uuid.UUID, userID uuid.UUID, currentBitrate int, bandwidth int64, bufferLevel float64) (int, error) {
|
|
// Valider les paramètres
|
|
// Valider les paramètres
|
|
if trackID == uuid.Nil {
|
|
return currentBitrate, fmt.Errorf("0: %w", ErrInvalidTrackID)
|
|
}
|
|
if userID == uuid.Nil {
|
|
return currentBitrate, fmt.Errorf("nil UUID: %w", ErrInvalidUserID)
|
|
}
|
|
if currentBitrate <= 0 {
|
|
return currentBitrate, fmt.Errorf("invalid current bitrate: %d", currentBitrate)
|
|
}
|
|
if bufferLevel < 0 || bufferLevel > 1 {
|
|
return currentBitrate, fmt.Errorf("%f (must be between 0.0 and 1.0): %w", bufferLevel, ErrInvalidBufferLevel)
|
|
}
|
|
|
|
// Obtenir la recommandation de bitrate basée sur la bande passante
|
|
recommendedBitrate := s.bandwidthService.RecommendBitrate(bandwidth)
|
|
|
|
// Ajuster en fonction du niveau de buffer
|
|
// Si le buffer est faible (< 20%), ne pas augmenter le bitrate
|
|
if bufferLevel < 0.2 && recommendedBitrate > currentBitrate {
|
|
recommendedBitrate = currentBitrate
|
|
s.logger.Debug("Bitrate increase prevented due to low buffer",
|
|
zap.String("track_id", trackID.String()),
|
|
zap.String("user_id", userID.String()),
|
|
zap.Int("current_bitrate", currentBitrate),
|
|
zap.Int("recommended_bitrate", recommendedBitrate),
|
|
zap.Float64("buffer_level", bufferLevel))
|
|
}
|
|
|
|
// Si le buffer est très faible (<= 10%), réduire le bitrate
|
|
if bufferLevel <= 0.1 && recommendedBitrate >= currentBitrate {
|
|
// Réduire d'un niveau
|
|
// Réduire d'un niveau
|
|
switch currentBitrate {
|
|
case 320:
|
|
recommendedBitrate = 192
|
|
case 192:
|
|
recommendedBitrate = 128
|
|
default:
|
|
recommendedBitrate = 128
|
|
}
|
|
s.logger.Debug("Bitrate reduced due to very low buffer",
|
|
zap.String("track_id", trackID.String()),
|
|
zap.String("user_id", userID.String()),
|
|
zap.Int("current_bitrate", currentBitrate),
|
|
zap.Int("new_bitrate", recommendedBitrate),
|
|
zap.Float64("buffer_level", bufferLevel))
|
|
}
|
|
|
|
// Si le bitrate a changé, logger l'adaptation
|
|
if recommendedBitrate != currentBitrate {
|
|
reason := s.determineReason(currentBitrate, recommendedBitrate, bufferLevel)
|
|
|
|
log := &models.BitrateAdaptationLog{
|
|
TrackID: trackID,
|
|
UserID: userID,
|
|
OldBitrate: currentBitrate,
|
|
NewBitrate: recommendedBitrate,
|
|
Reason: reason,
|
|
NetworkBandwidth: intPtr(int(bandwidth)),
|
|
}
|
|
|
|
if err := s.db.WithContext(ctx).Create(log).Error; err != nil {
|
|
s.logger.Error("Failed to create bitrate adaptation log",
|
|
zap.Error(err),
|
|
zap.String("track_id", trackID.String()),
|
|
zap.String("user_id", userID.String()))
|
|
// Ne pas retourner l'erreur, l'adaptation peut continuer même si le log échoue
|
|
} else {
|
|
s.logger.Info("Bitrate adaptation logged",
|
|
zap.String("track_id", trackID.String()),
|
|
zap.String("user_id", userID.String()),
|
|
zap.Int("old_bitrate", currentBitrate),
|
|
zap.Int("new_bitrate", recommendedBitrate),
|
|
zap.String("reason", string(reason)))
|
|
}
|
|
}
|
|
|
|
return recommendedBitrate, nil
|
|
}
|
|
|
|
// determineReason détermine la raison de l'adaptation de bitrate
|
|
func (s *BitrateAdaptationService) determineReason(old, new int, bufferLevel float64) models.BitrateAdaptationReason {
|
|
// Si le buffer est faible, c'est la raison principale
|
|
if bufferLevel < 0.2 {
|
|
return models.BitrateReasonBufferLow
|
|
}
|
|
|
|
// Sinon, déterminer selon si on augmente ou diminue
|
|
if new > old {
|
|
return models.BitrateReasonNetworkFast
|
|
} else if new < old {
|
|
return models.BitrateReasonNetworkSlow
|
|
}
|
|
|
|
// Par défaut (ne devrait pas arriver)
|
|
return models.BitrateReasonNetworkSlow
|
|
}
|
|
|
|
// BitrateAnalytics représente les statistiques d'adaptation de bitrate
|
|
// T0354: Create Bitrate Adaptation Analytics Endpoint
|
|
type BitrateAnalytics struct {
|
|
TotalAdaptations int64 `json:"total_adaptations"`
|
|
Reasons map[string]int64 `json:"reasons"`
|
|
AdaptationsOverTime []AdaptationTimePoint `json:"adaptations_over_time"`
|
|
AverageBandwidth *float64 `json:"average_bandwidth,omitempty"`
|
|
}
|
|
|
|
// AdaptationTimePoint représente un point dans le temps pour l'évolution des adaptations
|
|
type AdaptationTimePoint struct {
|
|
Date string `json:"date"`
|
|
Count int64 `json:"count"`
|
|
}
|
|
|
|
// GetAnalytics récupère les statistiques d'adaptation de bitrate pour un track
|
|
// T0354: Create Bitrate Adaptation Analytics Endpoint
|
|
func (s *BitrateAdaptationService) GetAnalytics(ctx context.Context, trackID uuid.UUID) (*BitrateAnalytics, error) {
|
|
if trackID == uuid.Nil {
|
|
return nil, fmt.Errorf("0: %w", ErrInvalidTrackID)
|
|
}
|
|
|
|
analytics := &BitrateAnalytics{
|
|
Reasons: make(map[string]int64),
|
|
AdaptationsOverTime: []AdaptationTimePoint{},
|
|
}
|
|
|
|
// Compter le nombre total d'adaptations
|
|
var totalCount int64
|
|
err := s.db.WithContext(ctx).Model(&models.BitrateAdaptationLog{}).
|
|
Where("track_id = ?", trackID). // uuid.UUID
|
|
Count(&totalCount).Error
|
|
if err != nil {
|
|
s.logger.Error("Failed to count adaptations", zap.Error(err), zap.String("track_id", trackID.String()))
|
|
return nil, fmt.Errorf("failed to get analytics: %w", err)
|
|
}
|
|
analytics.TotalAdaptations = totalCount
|
|
|
|
// Compter par raison
|
|
type ReasonCount struct {
|
|
Reason string
|
|
Count int64
|
|
}
|
|
var reasonCounts []ReasonCount
|
|
err = s.db.WithContext(ctx).Model(&models.BitrateAdaptationLog{}).
|
|
Select("reason, COUNT(*) as count").
|
|
Where("track_id = ?", trackID). // uuid.UUID
|
|
Group("reason").
|
|
Scan(&reasonCounts).Error
|
|
if err != nil {
|
|
s.logger.Error("Failed to get reason counts", zap.Error(err), zap.String("track_id", trackID.String()))
|
|
return nil, fmt.Errorf("failed to get analytics: %w", err)
|
|
}
|
|
|
|
for _, rc := range reasonCounts {
|
|
analytics.Reasons[rc.Reason] = rc.Count
|
|
}
|
|
|
|
// Calculer la moyenne de bande passante (si disponible)
|
|
var avgBandwidth *float64
|
|
var avgResult struct {
|
|
Avg float64
|
|
}
|
|
err = s.db.WithContext(ctx).Model(&models.BitrateAdaptationLog{}).
|
|
Select("AVG(network_bandwidth) as avg").
|
|
Where("track_id = ? AND network_bandwidth IS NOT NULL", trackID). // uuid.UUID
|
|
Scan(&avgResult).Error
|
|
if err == nil && avgResult.Avg > 0 {
|
|
avgBandwidth = &avgResult.Avg
|
|
analytics.AverageBandwidth = avgBandwidth
|
|
}
|
|
|
|
// Évolution dans le temps (groupé par jour)
|
|
// Récupérer tous les logs et grouper par jour en Go pour compatibilité SQLite/PostgreSQL
|
|
var logs []models.BitrateAdaptationLog
|
|
err = s.db.WithContext(ctx).Model(&models.BitrateAdaptationLog{}).
|
|
Where("track_id = ?", trackID). // uuid.UUID
|
|
Order("created_at ASC").
|
|
Find(&logs).Error
|
|
if err == nil && len(logs) > 0 {
|
|
// Grouper par jour
|
|
dayCounts := make(map[string]int64)
|
|
for _, log := range logs {
|
|
// Extraire la date (YYYY-MM-DD)
|
|
dateStr := log.CreatedAt.Format("2006-01-02")
|
|
dayCounts[dateStr]++
|
|
}
|
|
|
|
// Convertir en slice triée
|
|
type DayCount struct {
|
|
Date string
|
|
Count int64
|
|
}
|
|
var sortedDays []DayCount
|
|
for date, count := range dayCounts {
|
|
sortedDays = append(sortedDays, DayCount{Date: date, Count: count})
|
|
}
|
|
|
|
// Trier par date (tri simple)
|
|
for i := 0; i < len(sortedDays)-1; i++ {
|
|
for j := i + 1; j < len(sortedDays); j++ {
|
|
if sortedDays[i].Date > sortedDays[j].Date {
|
|
sortedDays[i], sortedDays[j] = sortedDays[j], sortedDays[i]
|
|
}
|
|
}
|
|
}
|
|
|
|
// Ajouter aux analytics
|
|
for _, dc := range sortedDays {
|
|
analytics.AdaptationsOverTime = append(analytics.AdaptationsOverTime, AdaptationTimePoint(dc))
|
|
}
|
|
} else if err != nil {
|
|
s.logger.Warn("Failed to get adaptations over time", zap.Error(err))
|
|
// Continuer sans les données temporelles
|
|
}
|
|
|
|
return analytics, nil
|
|
}
|
|
|
|
// intPtr retourne un pointeur vers un int
|
|
func intPtr(i int) *int {
|
|
return &i
|
|
}
|