veza/veza-backend-api/internal/core/track/service.go
senke b528050afa refactor(backend): extract upload + collaborators into sibling files
Two more cohesive blocks lifted out of monolithic files following the
same recipe as the marketplace refund split (commit 36ee3da1).

internal/core/track/service.go : 1639 → 1026 LOC
  Extracted to service_upload.go (640 LOC) :
    UploadTrack                       (multipart entry point)
    copyFileAsync                     (local/s3 dispatcher)
    copyFileAsyncLocal                (FS write path)
    copyFileAsyncS3                   (direct S3 stream path, v1.0.8)
    chunkStreamer interface           (helper for chunked → S3)
    CreateTrackFromChunkedUploadToS3  (v1.0.9 1.5 fast path)
    extFromContentType                (helper)
    MigrateLocalToS3IfConfigured      (post-assembly migration)
    mimeTypeForAudioExt               (helper)
    updateTrackStatus                 (status updater)
    cleanupFailedUpload               (rollback helper)
    CreateTrackFromPath               (no-multipart constructor)
  Removed `internal/monitoring` import from service.go (the only user
  was the upload path).

internal/handlers/playlist_handler.go : 1397 → 1107 LOC
  Extracted to playlist_handler_collaborators.go (309 LOC) :
    AddCollaboratorRequest, UpdateCollaboratorPermissionRequest DTOs
    AddCollaborator, RemoveCollaborator,
    UpdateCollaboratorPermission, GetCollaborators handlers
  All four handlers were a self-contained surface (one route group,
  one DTO pair, no shared helpers with the rest of the file).

Tests run after each split :
  go test ./internal/core/marketplace -short  →  PASS
  go test ./internal/core/track       -short  →  PASS
  go test ./internal/handlers          -short →  PASS

The dette-tech split target was three files at 1.7k+ / 1.6k+ / 1.4k+
LOC. After this commit + 36ee3da1 :
  marketplace/service.go            : 1737 → 1340  (-397)
  track/service.go                  : 1639 → 1026  (-613)
  handlers/playlist_handler.go      : 1397 → 1107  (-290)
  total reduction  : 4773 → 3473    (-1300, -27%)

Each receiver still has a clear "main" file ; the extracted siblings
encapsulate one concern apiece. Future splits should follow the same
naming pattern (service_<concern>.go,
playlist_handler_<concern>.go) so a quick `ls` shows the file
organisation matches the feature surface.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-01 04:10:43 +02:00

1025 lines
34 KiB
Go

package track
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"mime/multipart"
"os"
"path/filepath"
"strconv"
"strings"
"time" // MOD-P2-008: Ajouté pour timeout asynchrone
"veza-backend-api/internal/config"
"veza-backend-api/internal/core/discover"
"veza-backend-api/internal/database"
"veza-backend-api/internal/models"
"veza-backend-api/internal/services"
"veza-backend-api/internal/types"
"github.com/google/uuid"
"go.uber.org/zap"
"gorm.io/gorm"
)
// Constantes pour les quotas utilisateur
const (
MaxTracksPerUser = 1000 // Nombre maximum de tracks par utilisateur
MaxStoragePerUser = 100 * 1024 * 1024 * 1024 // 100GB par utilisateur
)
// Types d'erreurs spécifiques pour les tracks
var (
// ErrInvalidTrackFormat est retourné quand le format du fichier est invalide
ErrInvalidTrackFormat = errors.New("invalid track format")
// ErrTrackTooLarge est retourné quand le fichier dépasse la taille maximale
ErrTrackTooLarge = errors.New("track file too large")
// ErrTrackQuotaExceeded est retourné quand l'utilisateur a atteint son quota de tracks
ErrTrackQuotaExceeded = errors.New("track quota exceeded")
// ErrStorageQuotaExceeded est retourné quand l'utilisateur a atteint son quota de stockage
ErrStorageQuotaExceeded = errors.New("storage quota exceeded")
// ErrTrackNotFound est retourné quand un track n'est pas trouvé
ErrTrackNotFound = errors.New("track not found")
// ErrNetworkError est retourné en cas d'erreur réseau (timeout, connexion)
ErrNetworkError = errors.New("network error")
// ErrStorageError est retourné en cas d'erreur de stockage
ErrStorageError = errors.New("storage error")
// ErrForbidden est retourné quand l'utilisateur n'a pas la permission d'effectuer l'action
ErrForbidden = errors.New("forbidden")
)
// StreamServiceInterface defines the minimal interface for triggering HLS transcoding on the stream server.
// INT-02: Used to call stream server /internal/jobs/transcode after track upload.
type StreamServiceInterface interface {
StartProcessing(ctx context.Context, trackID uuid.UUID, filePath string) error
}
// S3StorageInterface defines the minimal S3 surface used by TrackService.
// v1.0.8 Phase 1 — narrow interface keeps the service testable without
// requiring real AWS credentials or a MinIO container in unit tests.
// *services.S3StorageService satisfies this interface.
type S3StorageInterface interface {
UploadStream(ctx context.Context, r io.Reader, key, contentType string, size int64) (string, error)
GetSignedURL(ctx context.Context, key string, ttl time.Duration) (string, error)
DeleteFile(ctx context.Context, key string) error
}
// TrackService gère les opérations sur les tracks
// BE-SVC-001: Add cache service for track metadata
// v0.943: Batch operations delegated to TrackBatchService
// v1.0.8: Optional S3 storage backend (TRACK_STORAGE_BACKEND=s3)
type TrackService struct {
db *gorm.DB // Write operations (and read fallback when readDB is nil)
readDB *gorm.DB // Optional read replica for read-only operations
logger *zap.Logger
uploadDir string
maxFileSize int64
cacheService *services.CacheService
streamService StreamServiceInterface // INT-02: Optional, triggers HLS transcoding after upload
batchService *TrackBatchService // v0.943: batch operations
discoverService *discover.Service // v0.10.1: tags/genres sync
// v1.0.8 Phase 1 — storage backend (local vs S3/MinIO)
// storageBackend is "local" (default) or "s3". When "s3", copyFileAsync
// writes to s3Service instead of the local filesystem.
// Both remain nil/zero-value when running without S3.
s3Service S3StorageInterface
storageBackend string
s3Bucket string // for logging/metrics only
// v1.0.9 W3 Day 13 — optional CDN edge in front of S3/MinIO. When
// set + IsEnabled, GetStorageURL routes browsers to a CDN signed URL
// (origin-pull from MinIO) instead of presigning MinIO directly.
// nil ⇒ keep the existing s3Service.GetSignedURL fallback.
cdnService CDNURLSigner
}
// CDNURLSigner is the slice of services.CDNService that TrackService
// needs. Defined as an interface so tests can stub the CDN without
// pulling the full services package in. The shape mirrors
// services.CDNService.GenerateSignedURL + IsEnabled.
type CDNURLSigner interface {
GenerateSignedURL(path string, expiration time.Duration) (string, error)
IsEnabled() bool
}
// forRead returns the DB to use for read operations (read replica if configured, else primary)
func (s *TrackService) forRead() *gorm.DB {
if s.readDB != nil {
return s.readDB
}
return s.db
}
// NewTrackService crée un nouveau service de tracks
func NewTrackService(db *gorm.DB, logger *zap.Logger, uploadDir string) *TrackService {
if uploadDir == "" {
uploadDir = "uploads/tracks"
}
return &TrackService{
db: db,
readDB: nil,
logger: logger,
uploadDir: uploadDir,
maxFileSize: config.AudioLimit.Bytes(),
batchService: NewTrackBatchService(db, logger),
}
}
// NewTrackServiceWithDB crée un TrackService avec support read replica (utilise db.ForRead pour les lectures)
func NewTrackServiceWithDB(db *database.Database, logger *zap.Logger, uploadDir string) *TrackService {
if uploadDir == "" {
uploadDir = "uploads/tracks"
}
return &TrackService{
db: db.GormDB,
readDB: db.ForRead(),
logger: logger,
uploadDir: uploadDir,
maxFileSize: config.AudioLimit.Bytes(),
batchService: NewTrackBatchService(db.GormDB, logger),
}
}
// SetCacheService définit le service de cache pour TrackService
// BE-SVC-001: Implement caching layer for frequently accessed data
func (s *TrackService) SetCacheService(cacheService *services.CacheService) {
s.cacheService = cacheService
}
// SetStreamService définit le service de streaming pour déclencher le transcodage HLS après upload.
// INT-02: Enables HLS pipeline - stream server transcodes track after successful upload.
func (s *TrackService) SetStreamService(streamService StreamServiceInterface) {
s.streamService = streamService
}
// SetDiscoverService définit le service discover pour sync tags/genres (v0.10.1)
func (s *TrackService) SetDiscoverService(d *discover.Service) {
s.discoverService = d
}
// SetS3Storage wires the S3 storage backend (v1.0.8 Phase 1).
// backend is expected to be "local" or "s3" (validated by Config.ValidateForEnvironment).
// Passing svc=nil silently keeps the service in local-only mode regardless of backend.
func (s *TrackService) SetS3Storage(svc S3StorageInterface, backend, bucket string) {
s.s3Service = svc
s.storageBackend = backend
s.s3Bucket = bucket
}
// SetCDNService wires an optional CDN edge in front of S3/MinIO. When
// set, GetStorageURL prefers CDN signed URLs over direct S3 presigns
// for s3-backed tracks. Pass nil to disable. (v1.0.9 W3 Day 13.)
func (s *TrackService) SetCDNService(cdn CDNURLSigner) {
s.cdnService = cdn
}
// IsS3Backend returns true iff the service is configured to write new tracks
// to S3. Exposed for handlers that need to branch behavior after uploads
// (e.g., skip local-path-based stream server trigger).
// v1.0.8 Phase 1.
func (s *TrackService) IsS3Backend() bool {
return s.storageBackend == "s3" && s.s3Service != nil
}
// GetStorageURL returns a signed URL for a track's file when the track row
// carries storage_backend='s3'. Returns ("", false, nil) for local-backed
// tracks — the caller must fall back to filesystem serving.
//
// v1.0.8 Phase 2 — handlers (StreamTrack, DownloadTrack) use this to emit
// a 302 redirect to MinIO/S3 for tracks that were uploaded under s3 mode.
// TTL is caller-provided: 15min for streaming, 30min for downloads, 1h for
// the transcoder.
func (s *TrackService) GetStorageURL(ctx context.Context, track *models.Track, ttl time.Duration) (string, bool, error) {
if track == nil {
return "", false, fmt.Errorf("track is nil")
}
if track.StorageBackend != "s3" || track.StorageKey == nil || *track.StorageKey == "" {
return "", false, nil
}
if s.s3Service == nil {
// Row says s3 but no S3 service wired. Should be prevented by
// Config.ValidateForEnvironment rule 11, but guard here anyway.
return "", false, fmt.Errorf("track %s is s3-backed but TrackService has no S3 service configured", track.ID)
}
// v1.0.9 W3 Day 13 — prefer the CDN signed URL when wired. The CDN
// fronts S3/MinIO via origin-pull, so the path stays the same as the
// storage key. Falls back to direct S3 presign when CDN is disabled
// or signing fails (CDN partial outage shouldn't block playback).
if s.cdnService != nil && s.cdnService.IsEnabled() {
cdnURL, cdnErr := s.cdnService.GenerateSignedURL(*track.StorageKey, ttl)
if cdnErr == nil && cdnURL != "" {
return cdnURL, true, nil
}
// log but keep going — direct presign still works.
s.logger.Warn("CDN signing failed, falling back to direct S3 presign",
zap.String("track_id", track.ID.String()),
zap.Error(cdnErr))
}
url, err := s.s3Service.GetSignedURL(ctx, *track.StorageKey, ttl)
if err != nil {
return "", false, fmt.Errorf("generate signed URL for track %s: %w", track.ID, err)
}
return url, true, nil
}
// ValidateTrackFile valide le format et la taille d'un fichier audio
func (s *TrackService) ValidateTrackFile(fileHeader *multipart.FileHeader) error {
// Valider la taille
if fileHeader.Size > s.maxFileSize {
return fmt.Errorf("%w: file size exceeds maximum allowed size of %s", ErrTrackTooLarge, config.AudioLimit.HumanReadable())
}
if fileHeader.Size == 0 {
return fmt.Errorf("%w: file is empty", ErrInvalidTrackFormat)
}
// Valider l'extension
ext := strings.ToLower(filepath.Ext(fileHeader.Filename))
allowedExtensions := []string{".mp3", ".flac", ".wav", ".ogg", ".m4a", ".aac"}
isValidExt := false
for _, allowedExt := range allowedExtensions {
if ext == allowedExt {
isValidExt = true
break
}
}
if !isValidExt {
return fmt.Errorf("%w: invalid file format. Allowed formats: MP3, FLAC, WAV, OGG", ErrInvalidTrackFormat)
}
// Valider le type MIME en ouvrant le fichier
file, err := fileHeader.Open()
if err != nil {
// FIX #10: Logger l'erreur avec contexte
s.logger.Error("Failed to open file for validation",
zap.String("filename", fileHeader.Filename),
zap.Int64("size", fileHeader.Size),
zap.Error(err),
)
return fmt.Errorf("failed to open file: %w", err)
}
defer file.Close()
// Lire les premiers bytes pour vérifier le magic number
header := make([]byte, 12)
n, err := file.Read(header)
if err != nil && err != io.EOF {
// FIX #10: Logger l'erreur avec contexte
s.logger.Error("Failed to read file header for validation",
zap.String("filename", fileHeader.Filename),
zap.Error(err),
)
return fmt.Errorf("failed to read file header: %w", err)
}
if n < 4 {
return fmt.Errorf("file too small to validate")
}
// Vérifier les magic numbers pour les formats audio
isValidFormat := false
headerStr := string(header[:n])
// MP3: ID3v2 (starts with "ID3") or MPEG frame sync (0xFF 0xFB/E/F)
if strings.HasPrefix(headerStr, "ID3") || (header[0] == 0xFF && (header[1]&0xE0) == 0xE0) {
isValidFormat = true
}
// FLAC: "fLaC"
if strings.HasPrefix(headerStr, "fLaC") {
isValidFormat = true
}
// WAV: "RIFF" followed by "WAVE"
if strings.HasPrefix(headerStr, "RIFF") && len(headerStr) >= 12 && string(header[8:12]) == "WAVE" {
isValidFormat = true
}
// OGG: "OggS"
if strings.HasPrefix(headerStr, "OggS") {
isValidFormat = true
}
// M4A/AAC: "ftyp" avec "M4A" ou "mp4"
if strings.Contains(headerStr, "ftyp") && (strings.Contains(headerStr, "M4A") || strings.Contains(headerStr, "mp4")) {
isValidFormat = true
}
if !isValidFormat {
return fmt.Errorf("%w: invalid audio file format", ErrInvalidTrackFormat)
}
return nil
}
// TrackMetadata contient les métadonnées optionnelles pour un upload
type TrackMetadata struct {
Title string
Artist string
Album string
Genre string
Year int
IsPublic bool
}
// UserQuota représente les informations de quota d'un utilisateur
type UserQuota struct {
TracksCount int64 `json:"tracks_count"`
TracksLimit int64 `json:"tracks_limit"`
StorageUsed int64 `json:"storage_used"` // bytes
StorageLimit int64 `json:"storage_limit"` // bytes
}
// CheckUserQuota vérifie si l'utilisateur peut uploader un fichier selon son quota
func (s *TrackService) CheckUserQuota(ctx context.Context, userID uuid.UUID, fileSize int64) error {
var trackCount int64
// MOD-P2-008: Utiliser creator_id (nom de colonne réel) au lieu de user_id
if err := s.db.WithContext(ctx).Model(&models.Track{}).Where("creator_id = ?", userID).Count(&trackCount).Error; err != nil {
// FIX #10: Logger l'erreur avec contexte
s.logger.Error("Failed to check track count for quota",
zap.String("user_id", userID.String()),
zap.Error(err),
)
return fmt.Errorf("failed to check track count: %w", err)
}
if trackCount >= MaxTracksPerUser {
s.logger.Warn("Track quota exceeded",
zap.String("user_id", userID.String()),
zap.Int64("track_count", trackCount),
zap.Int64("max_tracks", MaxTracksPerUser),
)
return ErrTrackQuotaExceeded
}
var totalSize int64
if err := s.db.WithContext(ctx).Model(&models.Track{}).
Where("creator_id = ?", userID).
Select("COALESCE(SUM(file_size), 0)").
Scan(&totalSize).Error; err != nil {
// FIX #10: Logger l'erreur avec contexte
s.logger.Error("Failed to check storage usage for quota",
zap.String("user_id", userID.String()),
zap.Error(err),
)
return fmt.Errorf("failed to check storage usage: %w", err)
}
if totalSize+fileSize > MaxStoragePerUser {
s.logger.Warn("Storage quota exceeded",
zap.String("user_id", userID.String()),
zap.Int64("total_size", totalSize),
zap.Int64("file_size", fileSize),
zap.Int64("max_storage", MaxStoragePerUser),
)
return ErrStorageQuotaExceeded
}
return nil
}
// GetUserQuota récupère les informations de quota d'un utilisateur
func (s *TrackService) GetUserQuota(ctx context.Context, userID uuid.UUID) (*UserQuota, error) {
var trackCount int64
if err := s.db.WithContext(ctx).Model(&models.Track{}).Where("creator_id = ?", userID).Count(&trackCount).Error; err != nil {
return nil, fmt.Errorf("failed to get track count: %w", err)
}
var totalSize int64
if err := s.db.WithContext(ctx).Model(&models.Track{}).
Where("creator_id = ?", userID).
Select("COALESCE(SUM(file_size), 0)").
Scan(&totalSize).Error; err != nil {
return nil, fmt.Errorf("failed to get storage usage: %w", err)
}
return &UserQuota{
TracksCount: trackCount,
TracksLimit: MaxTracksPerUser,
StorageUsed: totalSize,
StorageLimit: MaxStoragePerUser,
}, nil
}
// TrackListParams représente les paramètres de filtrage et pagination pour la liste des tracks
type TrackListParams struct {
Page int
Limit int
Cursor string // v0.931: opaque cursor for keyset pagination (base64)
UserID *uuid.UUID
Genre *string
Format *string
SortBy string // "created_at", "title", "popularity"
SortOrder string // "asc", "desc"
}
// TrackListResult holds list result with optional next cursor
type TrackListResult struct {
Tracks []*models.Track
Total int64
NextCursor string
}
// ListTracks récupère une liste de tracks avec pagination, filtres et tri
func (s *TrackService) ListTracks(ctx context.Context, params TrackListParams) ([]*models.Track, int64, error) {
// Créer la requête de base avec filtre sur le statut (read replica si configuré)
query := s.forRead().WithContext(ctx).Model(&models.Track{}).Where("status = ?", models.TrackStatusCompleted)
// Appliquer les filtres
if params.UserID != nil {
query = query.Where("creator_id = ?", *params.UserID)
}
if params.Genre != nil && *params.Genre != "" {
query = query.Where("genre = ?", *params.Genre)
}
if params.Format != nil && *params.Format != "" {
query = query.Where("format = ?", *params.Format)
}
// Compter le total avant pagination
var total int64
if err := query.Count(&total).Error; err != nil {
return nil, 0, fmt.Errorf("failed to count tracks: %w", err)
}
// Appliquer le tri
sortOrder := "DESC"
if params.SortOrder == "asc" {
sortOrder = "ASC"
}
// Valider et appliquer SortBy
sortBy := params.SortBy
if sortBy == "" {
sortBy = "created_at"
}
// Sécurité: valider que sortBy est un champ valide
validSortFields := map[string]bool{
"created_at": true,
"title": true,
"popularity": true,
}
if !validSortFields[sortBy] {
sortBy = "created_at"
}
// Pour "popularity", on utilise play_count + like_count
if sortBy == "popularity" {
query = query.Order(fmt.Sprintf("(play_count + like_count) %s", sortOrder))
} else {
query = query.Order(fmt.Sprintf("%s %s", sortBy, sortOrder))
}
// Appliquer la pagination
if params.Limit <= 0 {
params.Limit = 20 // Par défaut
}
if params.Limit > 100 {
params.Limit = 100 // Maximum
}
if params.Page <= 0 {
params.Page = 1
}
offset := (params.Page - 1) * params.Limit
query = query.Offset(offset).Limit(params.Limit)
// Exécuter la requête
var tracks []*models.Track
if err := query.Preload("User").Find(&tracks).Error; err != nil {
return nil, 0, fmt.Errorf("failed to list tracks: %w", err)
}
return tracks, total, nil
}
// ListTracksWithCursor uses keyset pagination on (created_at, id) for consistent performance.
// When params.Cursor is set, decodes it and fetches records after that point.
// Returns NextCursor for the next page when more results exist.
// v0.931: Cursor-based pagination for GET /tracks
func (s *TrackService) ListTracksWithCursor(ctx context.Context, params TrackListParams) (*TrackListResult, error) {
// Cursor-based only supported for sort_by=created_at (default)
if params.SortBy != "created_at" && params.SortBy != "" {
// Fallback to offset-based
tracks, total, err := s.ListTracks(ctx, params)
if err != nil {
return nil, err
}
return &TrackListResult{Tracks: tracks, Total: total}, nil
}
if params.Limit <= 0 {
params.Limit = 20
}
if params.Limit > 100 {
params.Limit = 100
}
query := s.forRead().WithContext(ctx).Model(&models.Track{}).Where("status = ?", models.TrackStatusCompleted)
if params.UserID != nil {
query = query.Where("creator_id = ?", *params.UserID)
}
if params.Genre != nil && *params.Genre != "" {
query = query.Where("genre = ?", *params.Genre)
}
if params.Format != nil && *params.Format != "" {
query = query.Where("format = ?", *params.Format)
}
// Decode cursor: base64(created_at_unix_nano|uuid)
var cursorCreatedAt int64
var cursorID uuid.UUID
if params.Cursor != "" {
decoded, err := base64.RawURLEncoding.DecodeString(params.Cursor)
if err == nil {
parts := strings.SplitN(string(decoded), "|", 2)
if len(parts) == 2 {
if ts, err := strconv.ParseInt(parts[0], 10, 64); err == nil {
cursorCreatedAt = ts
}
if uid, err := uuid.Parse(parts[1]); err == nil {
cursorID = uid
}
}
}
}
sortOrder := "DESC"
if params.SortOrder == "asc" {
sortOrder = "ASC"
}
if sortOrder == "DESC" {
if params.Cursor != "" && (cursorCreatedAt != 0 || cursorID != uuid.Nil) {
query = query.Where("(created_at, id) < (?, ?)", time.Unix(0, cursorCreatedAt), cursorID)
}
query = query.Order("created_at DESC, id DESC")
} else {
if params.Cursor != "" && (cursorCreatedAt != 0 || cursorID != uuid.Nil) {
query = query.Where("(created_at, id) > (?, ?)", time.Unix(0, cursorCreatedAt), cursorID)
}
query = query.Order("created_at ASC, id ASC")
}
// Fetch limit+1 to know if there's a next page
query = query.Limit(params.Limit + 1)
var tracks []*models.Track
if err := query.Preload("User").Find(&tracks).Error; err != nil {
return nil, fmt.Errorf("failed to list tracks: %w", err)
}
var nextCursor string
var total int64
if len(tracks) > params.Limit {
// Has more - last fetched is the cursor for next page
last := tracks[params.Limit-1]
nextCursor = base64.RawURLEncoding.EncodeToString([]byte(
fmt.Sprintf("%d|%s", last.CreatedAt.UnixNano(), last.ID.String())))
tracks = tracks[:params.Limit]
}
// Total not computed for cursor mode (expensive); use -1 or len as approximation
total = int64(len(tracks))
if nextCursor != "" {
total = int64(params.Limit) + 1
}
return &TrackListResult{
Tracks: tracks,
Total: total,
NextCursor: nextCursor,
}, nil
}
// GetTrackByID récupère un track par son ID
// MOD-P1-003: Preload User pour éviter N+1 queries si User est accédé plus tard
// BE-SVC-001: Add caching for track metadata
func (s *TrackService) GetTrackByID(ctx context.Context, trackID uuid.UUID) (*models.Track, error) { // Changed trackID to uuid.UUID
cacheConfig := services.DefaultCacheConfig()
// Try to get from cache first
if s.cacheService != nil {
var cachedTrack models.Track
if err := s.cacheService.GetTrack(ctx, trackID, &cachedTrack); err == nil {
// Cache hit
return &cachedTrack, nil
}
}
// Cache miss - fetch from database (read replica si configuré)
var track models.Track
if err := s.forRead().WithContext(ctx).
Preload("User").
First(&track, "id = ?", trackID).Error; err != nil { // Updated query
if err == gorm.ErrRecordNotFound {
return nil, ErrTrackNotFound
}
return nil, fmt.Errorf("failed to get track: %w", err)
}
// Cache the track
if s.cacheService != nil {
if err := s.cacheService.SetTrack(ctx, trackID, track, cacheConfig); err != nil {
s.logger.Warn("Failed to cache track", zap.Error(err), zap.String("track_id", trackID.String()))
}
}
return &track, nil
}
// UpdateTrackParams représente les paramètres de mise à jour d'un track
// v0.10.1: Genres for multi-genre (max 3)
type UpdateTrackParams struct {
Title *string `json:"title"`
Artist *string `json:"artist"`
Album *string `json:"album"`
Genre *string `json:"genre"` // legacy single
Genres []string `json:"genres"` // v0.10.1: max 3 slugs
Tags []string `json:"tags"`
Year *int `json:"year"`
BPM *int `json:"bpm"`
MusicalKey *string `json:"musical_key"`
IsPublic *bool `json:"is_public"`
}
// UpdateTrack met à jour les métadonnées d'un track
// BE-SVC-001: Invalidate cache on track update
func (s *TrackService) UpdateTrack(ctx context.Context, trackID uuid.UUID, userID uuid.UUID, params UpdateTrackParams) (*models.Track, error) { // Changed trackID to uuid.UUID
// Récupérer le track existant
track, err := s.GetTrackByID(ctx, trackID)
if err != nil {
return nil, err
}
// MOD-P1-003: Vérifier que l'utilisateur est propriétaire du track ou admin
// Check if user is admin (passed via context value)
isAdmin := false
if adminVal := ctx.Value("is_admin"); adminVal != nil {
if admin, ok := adminVal.(bool); ok {
isAdmin = admin
}
}
if track.UserID != userID && !isAdmin {
return nil, ErrForbidden
}
// Construire les mises à jour
updates := make(map[string]interface{})
if params.Title != nil {
if *params.Title == "" {
return nil, fmt.Errorf("title cannot be empty")
}
updates["title"] = *params.Title
}
if params.Artist != nil {
updates["artist"] = *params.Artist
}
if params.Album != nil {
updates["album"] = *params.Album
}
// v0.10.1: Tags and Genres via discover service (track_tags, track_genres)
if s.discoverService != nil {
if params.Tags != nil {
if err := s.discoverService.SyncTrackTags(ctx, trackID, params.Tags); err != nil {
return nil, fmt.Errorf("sync tags: %w", err)
}
}
if params.Genre != nil || len(params.Genres) > 0 {
genres := params.Genres
if len(genres) == 0 && params.Genre != nil {
genres = []string{*params.Genre}
}
if err := s.discoverService.SyncTrackGenres(ctx, trackID, genres); err != nil {
return nil, fmt.Errorf("sync genres: %w", err)
}
}
} else {
// Fallback when discover service not configured
if params.Genre != nil {
updates["genre"] = *params.Genre
}
if params.Tags != nil {
updates["tags"] = params.Tags
}
}
if params.Year != nil {
if *params.Year < 0 {
return nil, fmt.Errorf("year cannot be negative")
}
updates["year"] = *params.Year
}
if params.BPM != nil {
if *params.BPM < 0 || *params.BPM > 300 {
return nil, fmt.Errorf("bpm must be between 0 and 300")
}
updates["bpm"] = *params.BPM
}
if params.MusicalKey != nil {
updates["musical_key"] = *params.MusicalKey
}
if params.IsPublic != nil {
updates["is_public"] = *params.IsPublic
}
// Invalidate cache before update
if s.cacheService != nil {
if err := s.cacheService.InvalidateTrackCache(ctx, trackID); err != nil {
s.logger.Warn("Failed to invalidate track cache", zap.Error(err), zap.String("track_id", trackID.String()))
}
}
// v0.10.1: If only tags/genres were updated via discover, reload and return
discoverUpdated := s.discoverService != nil && (params.Tags != nil || params.Genre != nil || len(params.Genres) > 0)
if len(updates) == 0 {
if discoverUpdated {
updatedTrack, err := s.GetTrackByID(ctx, trackID)
if err != nil {
return nil, err
}
return updatedTrack, nil
}
return track, nil
}
// Appliquer les mises à jour
if err := s.db.WithContext(ctx).Model(track).Updates(updates).Error; err != nil {
return nil, fmt.Errorf("failed to update track: %w", err)
}
// Recharger le track pour obtenir les valeurs mises à jour
updatedTrack, err := s.GetTrackByID(ctx, trackID)
if err != nil {
return nil, err
}
s.logger.Info("Track updated",
zap.Any("track_id", trackID), // Changed to zap.Any for uuid.UUID
zap.String("user_id", userID.String()),
zap.Any("updates", updates),
)
return updatedTrack, nil
}
// DeleteTrack supprime un track et son fichier physique
func (s *TrackService) DeleteTrack(ctx context.Context, trackID uuid.UUID, userID uuid.UUID) error { // Changed trackID to uuid.UUID
// Récupérer le track existant
track, err := s.GetTrackByID(ctx, trackID)
if err != nil {
return err
}
// MOD-P1-003: Vérifier que l'utilisateur est propriétaire du track ou admin
// Check if user is admin (passed via context value)
isAdmin := false
if adminVal := ctx.Value("is_admin"); adminVal != nil {
if admin, ok := adminVal.(bool); ok {
isAdmin = admin
}
}
if track.UserID != userID && !isAdmin {
return ErrForbidden
}
// Supprimer le fichier physique
if track.FilePath != "" {
if err := os.Remove(track.FilePath); err != nil && !os.IsNotExist(err) {
s.logger.Warn("Failed to delete track file",
zap.Any("track_id", trackID), // Changed to zap.Any for uuid.UUID
zap.String("file_path", track.FilePath),
zap.Error(err),
)
// On continue même si la suppression du fichier échoue
}
}
// Supprimer les fichiers associés (waveform, cover art)
if track.WaveformPath != "" {
if err := os.Remove(track.WaveformPath); err != nil && !os.IsNotExist(err) {
s.logger.Warn("Failed to delete waveform file",
zap.Any("track_id", trackID), // Changed to zap.Any for uuid.UUID
zap.String("waveform_path", track.WaveformPath),
zap.Error(err),
)
}
}
if track.CoverArtPath != "" {
if err := os.Remove(track.CoverArtPath); err != nil && !os.IsNotExist(err) {
s.logger.Warn("Failed to delete cover art file",
zap.Any("track_id", trackID), // Changed to zap.Any for uuid.UUID
zap.String("cover_art_path", track.CoverArtPath),
zap.Error(err),
)
}
}
// Supprimer de la base de données
// GORM gérera automatiquement les relations en cascade grâce aux contraintes OnDelete:CASCADE
if err := s.db.WithContext(ctx).Delete(track).Error; err != nil {
return fmt.Errorf("failed to delete track: %w", err)
}
s.logger.Info("Track deleted",
zap.Any("track_id", trackID), // Changed to zap.Any for uuid.UUID
zap.String("user_id", userID.String()),
zap.String("file_path", track.FilePath),
)
return nil
}
// UpdateStreamStatus updates the stream status and manifest URL of a track
func (s *TrackService) UpdateStreamStatus(ctx context.Context, trackID uuid.UUID, status string, manifestURL string) error { // Changed trackID to uuid.UUID
updates := map[string]interface{}{
"stream_status": status,
}
if manifestURL != "" {
updates["stream_manifest_url"] = manifestURL
}
switch status {
case "ready":
updates["status"] = models.TrackStatusCompleted
updates["status_message"] = "Ready for streaming"
case "error":
updates["status"] = models.TrackStatusFailed
updates["status_message"] = "Transcoding failed"
}
if err := s.db.WithContext(ctx).Model(&models.Track{}).Where("id = ?", trackID).Updates(updates).Error; err != nil {
return fmt.Errorf("failed to update stream status: %w", err)
}
s.logger.Info("Track stream status updated",
zap.Any("track_id", trackID), // Changed to zap.Any for uuid.UUID
zap.String("status", status),
zap.String("manifest_url", manifestURL),
)
return nil
}
// TrackStats représente les statistiques d'un track
type TrackStats struct {
Views int64 `json:"views"`
Likes int64 `json:"likes"`
Comments int64 `json:"comments"`
TotalPlayTime int64 `json:"total_play_time"` // seconds
Downloads int64 `json:"downloads"`
}
// GetTrackStats récupère les statistiques d'un track
func (s *TrackService) GetTrackStats(ctx context.Context, trackID uuid.UUID) (*types.TrackStats, error) { // Changed trackID to uuid.UUID
// Vérifier que le track existe
var track models.Track
if err := s.db.WithContext(ctx).First(&track, "id = ?", trackID).Error; err != nil { // Updated query
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, ErrTrackNotFound
}
return nil, fmt.Errorf("failed to get track: %w", err)
}
var stats types.TrackStats
// Count likes
if err := s.db.WithContext(ctx).Model(&models.TrackLike{}).
Where("track_id = ?", trackID).
Count(&stats.Likes).Error; err != nil {
return nil, fmt.Errorf("failed to count likes: %w", err)
}
// Count comments (excluding soft-deleted)
if err := s.db.WithContext(ctx).Model(&models.TrackComment{}).
Where("track_id = ?", trackID).
Count(&stats.Comments).Error; err != nil {
return nil, fmt.Errorf("failed to count comments: %w", err)
}
// Count views (total plays) and sum total play time
type PlayStats struct {
Views int64
TotalPlayTime int64
}
var playStats PlayStats
if err := s.db.WithContext(ctx).Model(&models.TrackPlay{}).
Where("track_id = ?", trackID).
Select("COUNT(*) as views, COALESCE(SUM(duration), 0) as total_play_time").
Scan(&playStats).Error; err != nil {
return nil, fmt.Errorf("failed to get play statistics: %w", err)
}
stats.Views = playStats.Views
stats.TotalPlayTime = playStats.TotalPlayTime
// Count downloads (sum of access_count from track_shares where permissions include 'download')
// Note: access_count is incremented when a share link with download permission is accessed
if err := s.db.WithContext(ctx).Model(&models.TrackShare{}).
Where("track_id = ? AND permissions LIKE ?", trackID, "%download%").
Select("COALESCE(SUM(access_count), 0)").
Scan(&stats.Downloads).Error; err != nil {
return nil, fmt.Errorf("failed to count downloads: %w", err)
}
s.logger.Info("Track stats retrieved",
zap.Any("track_id", trackID), // Changed to zap.Any for uuid.UUID
zap.Int64("views", stats.Views),
zap.Int64("likes", stats.Likes),
zap.Int64("comments", stats.Comments),
zap.Int64("total_play_time", stats.TotalPlayTime),
zap.Int64("downloads", stats.Downloads),
)
return &stats, nil
}
// GetLyrics returns lyrics for a track (E3)
func (s *TrackService) GetLyrics(ctx context.Context, trackID uuid.UUID) (*models.TrackLyrics, error) {
var lyrics models.TrackLyrics
if err := s.forRead().WithContext(ctx).Where("track_id = ?", trackID).First(&lyrics).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil // No lyrics yet
}
return nil, fmt.Errorf("failed to get lyrics: %w", err)
}
return &lyrics, nil
}
// CreateOrUpdateLyrics creates or updates lyrics for a track (E3)
func (s *TrackService) CreateOrUpdateLyrics(ctx context.Context, trackID uuid.UUID, userID uuid.UUID, content string) (*models.TrackLyrics, error) {
// Verify track exists and user owns it
track, err := s.GetTrackByID(ctx, trackID)
if err != nil {
return nil, err
}
if track.UserID != userID {
return nil, ErrForbidden
}
var lyrics models.TrackLyrics
err = s.db.WithContext(ctx).Where("track_id = ?", trackID).First(&lyrics).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return nil, fmt.Errorf("failed to get lyrics: %w", err)
}
lyrics.TrackID = trackID
lyrics.Content = content
if lyrics.ID == uuid.Nil {
if err := s.db.WithContext(ctx).Create(&lyrics).Error; err != nil {
return nil, fmt.Errorf("failed to create lyrics: %w", err)
}
} else {
if err := s.db.WithContext(ctx).Save(&lyrics).Error; err != nil {
return nil, fmt.Errorf("failed to update lyrics: %w", err)
}
}
return &lyrics, nil
}
// BatchDeleteTracks delegates to TrackBatchService (v0.943)
func (s *TrackService) BatchDeleteTracks(ctx context.Context, trackIDs []uuid.UUID, userID uuid.UUID) (*BatchDeleteResult, error) {
return s.batchService.BatchDeleteTracks(ctx, trackIDs, userID)
}
// deleteTrackFiles supprime les fichiers physiques d'un track (logique extraite de DeleteTrack)
func (s *TrackService) deleteTrackFiles(ctx context.Context, track *models.Track) error {
var errors []error
// Supprimer le fichier principal
if track.FilePath != "" {
if err := os.Remove(track.FilePath); err != nil && !os.IsNotExist(err) {
errors = append(errors, fmt.Errorf("failed to delete track file %s: %w", track.FilePath, err))
}
}
// Supprimer le fichier waveform
if track.WaveformPath != "" {
if err := os.Remove(track.WaveformPath); err != nil && !os.IsNotExist(err) {
errors = append(errors, fmt.Errorf("failed to delete waveform file %s: %w", track.WaveformPath, err))
}
}
// Supprimer le fichier cover art
if track.CoverArtPath != "" {
if err := os.Remove(track.CoverArtPath); err != nil && !os.IsNotExist(err) {
errors = append(errors, fmt.Errorf("failed to delete cover art file %s: %w", track.CoverArtPath, err))
}
}
// Retourner la première erreur si il y en a, sinon nil
if len(errors) > 0 {
return errors[0]
}
return nil
}
// BatchUpdateTracks delegates to TrackBatchService (v0.943)
func (s *TrackService) BatchUpdateTracks(ctx context.Context, trackIDs []uuid.UUID, userID uuid.UUID, updates map[string]interface{}) (*BatchUpdateResult, error) {
return s.batchService.BatchUpdateTracks(ctx, trackIDs, userID, updates)
}
// UpdateStreamStatus updates the stream status and manifest URL of a track