veza/veza-backend-api/internal/services/track_chunk_service.go
2025-12-03 20:29:37 +01:00

439 lines
12 KiB
Go

package services
import (
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"io"
"mime/multipart"
"os"
"path/filepath"
"sync"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
)
// ChunkUploadInfo représente les informations sur un upload par chunks
// MIGRATION UUID: UserID migré vers uuid.UUID
type ChunkUploadInfo struct {
UploadID string `json:"upload_id"`
UserID uuid.UUID `json:"user_id"`
TotalChunks int `json:"total_chunks"`
TotalSize int64 `json:"total_size"`
Filename string `json:"filename"`
Chunks map[int]ChunkInfo `json:"chunks"` // chunk_number -> ChunkInfo
ReceivedMD5 string `json:"received_md5,omitempty"` // MD5 du fichier final
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
mu sync.RWMutex `json:"-"`
}
// ChunkInfo représente les informations sur un chunk
type ChunkInfo struct {
ChunkNumber int `json:"chunk_number"`
Size int64 `json:"size"`
MD5 string `json:"md5"`
FilePath string `json:"file_path"`
Received bool `json:"received"`
}
// TrackChunkService gère l'upload par chunks de fichiers audio
type TrackChunkService struct {
chunksDir string
uploads map[string]*ChunkUploadInfo // upload_id -> ChunkUploadInfo
logger *zap.Logger
mu sync.RWMutex
cleanupInterval time.Duration
maxUploadAge time.Duration
}
// NewTrackChunkService crée un nouveau service de gestion d'upload par chunks
func NewTrackChunkService(chunksDir string, logger *zap.Logger) *TrackChunkService {
if chunksDir == "" {
chunksDir = "uploads/tracks/chunks"
}
if logger == nil {
logger = zap.NewNop()
}
service := &TrackChunkService{
chunksDir: chunksDir,
uploads: make(map[string]*ChunkUploadInfo),
logger: logger,
cleanupInterval: time.Hour,
maxUploadAge: 24 * time.Hour, // Supprimer les uploads incomplets après 24h
}
// Créer le répertoire de chunks
if err := os.MkdirAll(chunksDir, 0755); err != nil {
logger.Warn("Failed to create chunks directory", zap.Error(err))
}
// Démarrer le nettoyage périodique
go service.startCleanup()
return service
}
// InitiateChunkedUpload initialise un nouvel upload par chunks
func (s *TrackChunkService) InitiateChunkedUpload(userID uuid.UUID, totalChunks int, totalSize int64, filename string) (string, error) {
uploadID := uuid.New().String()
uploadInfo := &ChunkUploadInfo{
UploadID: uploadID,
UserID: userID,
TotalChunks: totalChunks,
TotalSize: totalSize,
Filename: filename,
Chunks: make(map[int]ChunkInfo),
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
s.mu.Lock()
s.uploads[uploadID] = uploadInfo
s.mu.Unlock()
s.logger.Info("Chunked upload initiated",
zap.String("upload_id", uploadID),
zap.String("user_id", userID.String()),
zap.Int("total_chunks", totalChunks),
zap.Int64("total_size", totalSize),
)
return uploadID, nil
}
// SaveChunk sauvegarde un chunk reçu
func (s *TrackChunkService) SaveChunk(ctx context.Context, uploadID string, chunkNumber int, totalChunks int, fileHeader *multipart.FileHeader) error {
s.mu.RLock()
uploadInfo, exists := s.uploads[uploadID]
s.mu.RUnlock()
if !exists {
return fmt.Errorf("upload not found")
}
uploadInfo.mu.Lock()
defer uploadInfo.mu.Unlock()
// Vérifier que le chunk n'a pas déjà été reçu
if chunk, exists := uploadInfo.Chunks[chunkNumber]; exists && chunk.Received {
return fmt.Errorf("chunk %d already received", chunkNumber)
}
// Vérifier les paramètres
if uploadInfo.TotalChunks != totalChunks {
return fmt.Errorf("total chunks mismatch: expected %d, got %d", uploadInfo.TotalChunks, totalChunks)
}
// Créer le répertoire pour cet upload
uploadDir := filepath.Join(s.chunksDir, uploadID)
if err := os.MkdirAll(uploadDir, 0755); err != nil {
return fmt.Errorf("failed to create upload directory: %w", err)
}
// Sauvegarder le chunk
chunkPath := filepath.Join(uploadDir, fmt.Sprintf("chunk_%d", chunkNumber))
file, err := fileHeader.Open()
if err != nil {
return fmt.Errorf("failed to open chunk file: %w", err)
}
defer file.Close()
// Créer le fichier de destination
destFile, err := os.Create(chunkPath)
if err != nil {
return fmt.Errorf("failed to create chunk file: %w", err)
}
defer destFile.Close()
// Calculer le MD5 pendant la copie
hash := md5.New()
multiWriter := io.MultiWriter(destFile, hash)
if _, err := io.Copy(multiWriter, file); err != nil {
os.Remove(chunkPath)
return fmt.Errorf("failed to save chunk: %w", err)
}
chunkMD5 := hex.EncodeToString(hash.Sum(nil))
// Enregistrer les informations du chunk
uploadInfo.Chunks[chunkNumber] = ChunkInfo{
ChunkNumber: chunkNumber,
Size: fileHeader.Size,
MD5: chunkMD5,
FilePath: chunkPath,
Received: true,
}
uploadInfo.UpdatedAt = time.Now()
s.logger.Info("Chunk saved",
zap.String("upload_id", uploadID),
zap.Int("chunk_number", chunkNumber),
zap.Int64("size", fileHeader.Size),
zap.String("md5", chunkMD5),
)
return nil
}
// GetUploadInfo récupère les informations d'un upload
func (s *TrackChunkService) GetUploadInfo(uploadID string) (*ChunkUploadInfo, error) {
s.mu.RLock()
uploadInfo, exists := s.uploads[uploadID]
s.mu.RUnlock()
if !exists {
return nil, fmt.Errorf("upload not found")
}
return uploadInfo, nil
}
// CompleteChunkedUpload assemble tous les chunks et crée le fichier final
func (s *TrackChunkService) CompleteChunkedUpload(ctx context.Context, uploadID string, finalPath string) (string, int64, string, error) {
s.mu.RLock()
uploadInfo, exists := s.uploads[uploadID]
s.mu.RUnlock()
if !exists {
return "", 0, "", fmt.Errorf("upload not found")
}
uploadInfo.mu.Lock()
defer uploadInfo.mu.Unlock()
// Vérifier que tous les chunks ont été reçus
if len(uploadInfo.Chunks) != uploadInfo.TotalChunks {
return "", 0, "", fmt.Errorf("missing chunks: received %d/%d", len(uploadInfo.Chunks), uploadInfo.TotalChunks)
}
// Vérifier l'ordre des chunks (1 à totalChunks)
for i := 1; i <= uploadInfo.TotalChunks; i++ {
chunk, exists := uploadInfo.Chunks[i]
if !exists || !chunk.Received {
return "", 0, "", fmt.Errorf("chunk %d is missing", i)
}
}
// Créer le répertoire de destination
if err := os.MkdirAll(filepath.Dir(finalPath), 0755); err != nil {
return "", 0, "", fmt.Errorf("failed to create destination directory: %w", err)
}
// Assembler les chunks
finalFile, err := os.Create(finalPath)
if err != nil {
return "", 0, "", fmt.Errorf("failed to create final file: %w", err)
}
defer finalFile.Close()
hash := md5.New()
multiWriter := io.MultiWriter(finalFile, hash)
var totalSize int64
// Assembler les chunks dans l'ordre
for i := 1; i <= uploadInfo.TotalChunks; i++ {
chunk := uploadInfo.Chunks[i]
chunkFile, err := os.Open(chunk.FilePath)
if err != nil {
finalFile.Close()
os.Remove(finalPath)
return "", 0, "", fmt.Errorf("failed to open chunk %d: %w", i, err)
}
size, err := io.Copy(multiWriter, chunkFile)
chunkFile.Close()
if err != nil {
finalFile.Close()
os.Remove(finalPath)
return "", 0, "", fmt.Errorf("failed to write chunk %d: %w", i, err)
}
totalSize += size
}
finalMD5 := hex.EncodeToString(hash.Sum(nil))
// Vérifier la taille totale
if totalSize != uploadInfo.TotalSize {
finalFile.Close()
os.Remove(finalPath)
return "", 0, "", fmt.Errorf("size mismatch: expected %d, got %d", uploadInfo.TotalSize, totalSize)
}
// Nettoyer les chunks temporaires
uploadDir := filepath.Join(s.chunksDir, uploadID)
if err := os.RemoveAll(uploadDir); err != nil {
s.logger.Warn("Failed to cleanup chunks", zap.String("upload_id", uploadID), zap.Error(err))
}
// Supprimer l'upload de la mémoire
s.mu.Lock()
delete(s.uploads, uploadID)
s.mu.Unlock()
s.logger.Info("Chunked upload completed",
zap.String("upload_id", uploadID),
zap.String("final_path", finalPath),
zap.Int64("total_size", totalSize),
zap.String("md5", finalMD5),
)
return uploadInfo.Filename, totalSize, finalMD5, nil
}
// UploadState représente l'état d'un upload pour la reprise
// MIGRATION UUID: UserID migré vers uuid.UUID
type UploadState struct {
UploadID string `json:"upload_id"`
UserID uuid.UUID `json:"user_id"`
TotalChunks int `json:"total_chunks"`
TotalSize int64 `json:"total_size"`
Filename string `json:"filename"`
ChunksReceived []int `json:"chunks_received"` // Liste des numéros de chunks reçus
LastChunk int `json:"last_chunk"` // Dernier chunk reçu (0 si aucun)
ReceivedCount int `json:"received_count"` // Nombre de chunks reçus
Progress int `json:"progress"` // Pourcentage de progression (0-100)
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
// GetUploadState récupère l'état d'un upload pour permettre la reprise
func (s *TrackChunkService) GetUploadState(uploadID string) (*UploadState, error) {
s.mu.RLock()
uploadInfo, exists := s.uploads[uploadID]
s.mu.RUnlock()
if !exists {
return nil, fmt.Errorf("upload not found")
}
uploadInfo.mu.RLock()
defer uploadInfo.mu.RUnlock()
// Compter les chunks reçus et déterminer le dernier
chunksReceived := make([]int, 0, len(uploadInfo.Chunks))
lastChunk := 0
receivedCount := 0
for chunkNum, chunk := range uploadInfo.Chunks {
if chunk.Received {
chunksReceived = append(chunksReceived, chunkNum)
if chunkNum > lastChunk {
lastChunk = chunkNum
}
receivedCount++
}
}
progress := 0
if uploadInfo.TotalChunks > 0 {
progress = (receivedCount * 100) / uploadInfo.TotalChunks
}
return &UploadState{
UploadID: uploadInfo.UploadID,
UserID: uploadInfo.UserID,
TotalChunks: uploadInfo.TotalChunks,
TotalSize: uploadInfo.TotalSize,
Filename: uploadInfo.Filename,
ChunksReceived: chunksReceived,
LastChunk: lastChunk,
ReceivedCount: receivedCount,
Progress: progress,
CreatedAt: uploadInfo.CreatedAt,
UpdatedAt: uploadInfo.UpdatedAt,
}, nil
}
// GetUploadProgress retourne la progression d'un upload par chunks
func (s *TrackChunkService) GetUploadProgress(uploadID string) (int, int, error) {
s.mu.RLock()
uploadInfo, exists := s.uploads[uploadID]
s.mu.RUnlock()
if !exists {
return 0, 0, fmt.Errorf("upload not found")
}
uploadInfo.mu.RLock()
defer uploadInfo.mu.RUnlock()
receivedChunks := 0
for _, chunk := range uploadInfo.Chunks {
if chunk.Received {
receivedChunks++
}
}
progress := (receivedChunks * 100) / uploadInfo.TotalChunks
return receivedChunks, progress, nil
}
// CleanupUpload supprime un upload et ses chunks
func (s *TrackChunkService) CleanupUpload(uploadID string) error {
s.mu.Lock()
_, exists := s.uploads[uploadID]
if exists {
delete(s.uploads, uploadID)
}
s.mu.Unlock()
if !exists {
return fmt.Errorf("upload not found")
}
// Supprimer les chunks
uploadDir := filepath.Join(s.chunksDir, uploadID)
if err := os.RemoveAll(uploadDir); err != nil {
return fmt.Errorf("failed to cleanup chunks: %w", err)
}
s.logger.Info("Upload cleaned up", zap.String("upload_id", uploadID))
return nil
}
// startCleanup démarre le nettoyage périodique des uploads expirés
func (s *TrackChunkService) startCleanup() {
ticker := time.NewTicker(s.cleanupInterval)
defer ticker.Stop()
for range ticker.C {
s.cleanupExpiredUploads()
}
}
// cleanupExpiredUploads supprime les uploads qui ont dépassé la durée maximale
func (s *TrackChunkService) cleanupExpiredUploads() {
now := time.Now()
var expiredUploads []string
s.mu.RLock()
for uploadID, uploadInfo := range s.uploads {
if now.Sub(uploadInfo.UpdatedAt) > s.maxUploadAge {
expiredUploads = append(expiredUploads, uploadID)
}
}
s.mu.RUnlock()
for _, uploadID := range expiredUploads {
if err := s.CleanupUpload(uploadID); err != nil {
s.logger.Warn("Failed to cleanup expired upload", zap.String("upload_id", uploadID), zap.Error(err))
}
}
if len(expiredUploads) > 0 {
s.logger.Info("Cleaned up expired uploads", zap.Int("count", len(expiredUploads)))
}
}