Security fixes implemented:
CRITICAL:
- CRIT-001: IDOR on chat rooms — added IsRoomMember check before
returning room data or message history (returns 404, not 403)
- CRIT-002: play_count/like_count exposed publicly — changed JSON
tags to "-" so they are never serialized in API responses
HIGH:
- HIGH-001: TOCTOU race on marketplace downloads — transaction +
SELECT FOR UPDATE on GetDownloadURL
- HIGH-002: HS256 in production docker-compose — replaced JWT_SECRET
with JWT_PRIVATE_KEY_PATH / JWT_PUBLIC_KEY_PATH (RS256)
- HIGH-003: context.Background() bypass in user repository — full
context propagation from handlers → services → repository (29 files)
- HIGH-004: Race condition on promo codes — SELECT FOR UPDATE
- HIGH-005: Race condition on exclusive licenses — SELECT FOR UPDATE
- HIGH-006: Rate limiter IP spoofing — SetTrustedProxies(nil) default
- HIGH-007: RGPD hard delete incomplete — added cleanup for sessions,
settings, follows, notifications, audit_logs anonymization
- HIGH-008: RTMP callback auth weak — fail-closed when unconfigured,
header-only (no query param), constant-time compare
- HIGH-009: Co-listening host hijack — UpdateHostState now takes *Conn
and verifies IsHost before processing
- HIGH-010: Moderator self-strike — added issuedBy != userID check
MEDIUM:
- MEDIUM-001: Recovery codes used math/rand — replaced with crypto/rand
- MEDIUM-005: Stream token forgeable — resolved by HIGH-002 (RS256)
Updated REMEDIATION_MATRIX: 14 findings marked ✅ CORRIGÉ.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
319 lines
11 KiB
Go
319 lines
11 KiB
Go
package main
|
||
|
||
import (
|
||
"context"
|
||
"fmt"
|
||
"log"
|
||
"net/http"
|
||
_ "net/http/pprof" // MOD-P2-006: Activer pprof pour profiling
|
||
"os"
|
||
"os/signal"
|
||
"syscall"
|
||
"time"
|
||
|
||
"github.com/getsentry/sentry-go"
|
||
"github.com/gin-gonic/gin"
|
||
"github.com/joho/godotenv"
|
||
"go.uber.org/zap"
|
||
|
||
"veza-backend-api/internal/api"
|
||
"veza-backend-api/internal/config"
|
||
"veza-backend-api/internal/core/marketplace"
|
||
"veza-backend-api/internal/metrics"
|
||
"veza-backend-api/internal/services"
|
||
"veza-backend-api/internal/shutdown"
|
||
"veza-backend-api/internal/workers"
|
||
|
||
_ "veza-backend-api/docs" // Import docs for swagger
|
||
)
|
||
|
||
// @title Veza Backend API
|
||
// @version 1.2.0
|
||
// @description Backend API for Veza platform.
|
||
// @termsOfService http://swagger.io/terms/
|
||
|
||
// @contact.name API Support
|
||
// @contact.url http://www.veza.app/support
|
||
// @contact.email support@veza.app
|
||
|
||
// @license.name Apache 2.0
|
||
// @license.url http://www.apache.org/licenses/LICENSE-2.0.html
|
||
|
||
// @host localhost:8080
|
||
// @BasePath /api/v1
|
||
|
||
// @securityDefinitions.apikey BearerAuth
|
||
// @in header
|
||
// @name Authorization
|
||
|
||
func main() {
|
||
// Charger les variables d'environnement
|
||
// NOTE: Do not write to stderr to avoid broken pipe errors with systemd journald
|
||
// The message will be logged by the logger once it's initialized
|
||
_ = godotenv.Load()
|
||
|
||
// FIX #1: Supprimer l'initialisation dupliquée du logger
|
||
// Le logger sera initialisé dans config.NewConfig() avec le bon LOG_LEVEL
|
||
// Charger la configuration (qui initialise le logger)
|
||
cfg, err := config.NewConfig()
|
||
if err != nil {
|
||
// CRITICAL: Do not write to stderr or files to avoid broken pipe errors
|
||
// Just exit silently - systemd will capture the exit code
|
||
// The error details will be in the application logs if the logger was initialized
|
||
os.Exit(1)
|
||
}
|
||
|
||
// Utiliser le logger de la config
|
||
logger := cfg.Logger
|
||
if logger == nil {
|
||
log.Fatal("❌ Logger non initialisé dans la configuration")
|
||
}
|
||
|
||
logger.Info("🚀 Démarrage de Veza Backend API")
|
||
|
||
// Valider la configuration
|
||
if err := cfg.Validate(); err != nil {
|
||
logger.Fatal("❌ Configuration invalide", zap.Error(err))
|
||
}
|
||
|
||
// Initialiser Sentry si DSN configuré
|
||
if cfg.SentryDsn != "" {
|
||
err := sentry.Init(sentry.ClientOptions{
|
||
Dsn: cfg.SentryDsn,
|
||
Environment: cfg.SentryEnvironment,
|
||
TracesSampleRate: cfg.SentrySampleRateTransactions,
|
||
SampleRate: cfg.SentrySampleRateErrors,
|
||
// AttachStacktrace pour capturer les stack traces
|
||
AttachStacktrace: true,
|
||
})
|
||
if err != nil {
|
||
logger.Warn("❌ Impossible d'initialiser Sentry", zap.Error(err))
|
||
} else {
|
||
logger.Info("✅ Sentry initialisé", zap.String("environment", cfg.SentryEnvironment))
|
||
}
|
||
// Flush les événements Sentry avant shutdown
|
||
defer sentry.Flush(2 * time.Second)
|
||
} else {
|
||
logger.Info("ℹ️ Sentry non configuré (SENTRY_DSN non défini)")
|
||
}
|
||
|
||
// Initialisation de la base de données
|
||
db := cfg.Database
|
||
if db == nil {
|
||
logger.Fatal("❌ Base de données non initialisée")
|
||
}
|
||
defer db.Close()
|
||
|
||
if err := db.Initialize(); err != nil {
|
||
logger.Fatal("❌ Impossible d'initialiser la base de données", zap.Error(err))
|
||
}
|
||
|
||
// MOD-P2-004: Démarrer le collecteur de métriques DB pool
|
||
// Collecte les stats DB pool toutes les 10 secondes et les expose via Prometheus
|
||
metrics.StartDBPoolStatsCollector(db.DB, 10*time.Second)
|
||
logger.Info("✅ Collecteur de métriques DB pool démarré")
|
||
|
||
// Fail-Fast: Vérifier RabbitMQ si activé
|
||
if cfg.RabbitMQEnable {
|
||
if cfg.RabbitMQEventBus == nil {
|
||
logger.Fatal("❌ RabbitMQ activé (RABBITMQ_ENABLE=true) mais non initialisé (problème de connexion?)")
|
||
} else {
|
||
// Optionnel: Check connection status if RabbitMQEventBus exposes it
|
||
// For now, assume if initialized it's connected or retrying.
|
||
// If we want STRICT fail fast, we would need to verify connection is Open here.
|
||
logger.Info("✅ RabbitMQ actif")
|
||
}
|
||
} else {
|
||
logger.Info("ℹ️ RabbitMQ désactivé")
|
||
}
|
||
|
||
// BE-SVC-017: Créer le gestionnaire de shutdown gracieux
|
||
shutdownManager := shutdown.NewShutdownManager(logger)
|
||
|
||
// Démarrer le Job Worker avec contexte pour shutdown gracieux
|
||
var workerCtx context.Context
|
||
var workerCancel context.CancelFunc
|
||
if cfg.JobWorker != nil {
|
||
workerCtx, workerCancel = context.WithCancel(context.Background())
|
||
cfg.JobWorker.Start(workerCtx)
|
||
logger.Info("✅ Job Worker démarré")
|
||
|
||
// Enregistrer le Job Worker pour shutdown gracieux
|
||
shutdownManager.Register(shutdown.NewShutdownFunc("job_worker", func(ctx context.Context) error {
|
||
if workerCancel != nil {
|
||
workerCancel()
|
||
// Attendre un peu pour que les workers se terminent
|
||
time.Sleep(2 * time.Second)
|
||
}
|
||
return nil
|
||
}))
|
||
} else {
|
||
logger.Warn("⚠️ Job Worker non initialisé")
|
||
}
|
||
|
||
// v0.701: Start Transfer Retry Worker
|
||
if cfg.TransferRetryEnabled && cfg.StripeConnectEnabled && cfg.StripeConnectSecretKey != "" {
|
||
stripeConnectSvc := services.NewStripeConnectService(db.GormDB, cfg.StripeConnectSecretKey, logger)
|
||
retryWorker := marketplace.NewTransferRetryWorker(
|
||
db.GormDB, stripeConnectSvc, logger, cfg.TransferRetryInterval, cfg.TransferRetryMaxAttempts,
|
||
)
|
||
retryCtx, retryCancel := context.WithCancel(context.Background())
|
||
go retryWorker.Start(retryCtx)
|
||
logger.Info("Transfer Retry Worker started",
|
||
zap.Duration("interval", cfg.TransferRetryInterval),
|
||
zap.Int("max_retries", cfg.TransferRetryMaxAttempts))
|
||
|
||
shutdownManager.Register(shutdown.NewShutdownFunc("transfer_retry_worker", func(ctx context.Context) error {
|
||
retryCancel()
|
||
return nil
|
||
}))
|
||
} else if cfg.TransferRetryEnabled {
|
||
logger.Info("Transfer Retry Worker skipped — Stripe Connect not enabled")
|
||
}
|
||
|
||
// v0.802: Start Cloud Backup Worker (copies cloud files to backup prefix every 24h)
|
||
if cfg.S3StorageService != nil {
|
||
backupWorker := services.NewCloudBackupWorker(db.GormDB, cfg.S3StorageService, logger)
|
||
backupCtx, backupCancel := context.WithCancel(context.Background())
|
||
go backupWorker.Start(backupCtx)
|
||
logger.Info("Cloud Backup Worker started (24h interval)")
|
||
|
||
shutdownManager.Register(shutdown.NewShutdownFunc("cloud_backup_worker", func(ctx context.Context) error {
|
||
backupCancel()
|
||
return nil
|
||
}))
|
||
}
|
||
|
||
// v0.802: Start Gear Warranty Notifier (sends notifications when warranty expires in 30 days)
|
||
notificationService := services.NewNotificationService(db, logger)
|
||
warrantyNotifier := services.NewGearWarrantyNotifier(db.GormDB, notificationService, logger)
|
||
warrantyCtx, warrantyCancel := context.WithCancel(context.Background())
|
||
go warrantyNotifier.Start(warrantyCtx)
|
||
logger.Info("Gear Warranty Notifier started (24h interval)")
|
||
|
||
shutdownManager.Register(shutdown.NewShutdownFunc("gear_warranty_notifier", func(ctx context.Context) error {
|
||
warrantyCancel()
|
||
return nil
|
||
}))
|
||
|
||
// v0.10.5 F552: Weekly notification digest (runs on Sunday)
|
||
if cfg.JobWorker != nil {
|
||
digestWorker := services.NewNotificationDigestWorker(db.GormDB, cfg.JobWorker, logger)
|
||
digestCtx, digestCancel := context.WithCancel(context.Background())
|
||
go digestWorker.Start(digestCtx)
|
||
logger.Info("Notification digest worker started (weekly on Sunday)")
|
||
|
||
shutdownManager.Register(shutdown.NewShutdownFunc("notification_digest_worker", func(ctx context.Context) error {
|
||
digestCancel()
|
||
return nil
|
||
}))
|
||
}
|
||
|
||
// v0.10.8 F065: Hard delete worker (GDPR - final anonymization after 30 days)
|
||
if os.Getenv("HARD_DELETE_CRON_ENABLED") != "false" {
|
||
hardDeleteWorker := workers.NewHardDeleteWorker(db.GormDB, logger, 24*time.Hour)
|
||
hardDeleteCtx, hardDeleteCancel := context.WithCancel(context.Background())
|
||
go hardDeleteWorker.Start(hardDeleteCtx)
|
||
logger.Info("Hard delete worker started (24h interval)")
|
||
|
||
shutdownManager.Register(shutdown.NewShutdownFunc("hard_delete_worker", func(ctx context.Context) error {
|
||
hardDeleteWorker.Stop()
|
||
hardDeleteCancel()
|
||
return nil
|
||
}))
|
||
} else {
|
||
logger.Info("Hard delete worker disabled (HARD_DELETE_CRON_ENABLED=false)")
|
||
}
|
||
|
||
// Configuration du mode Gin
|
||
// Correction: Utilisation directe de la variable d'env car non exposée dans Config
|
||
appEnv := os.Getenv("APP_ENV")
|
||
if appEnv == "production" {
|
||
gin.SetMode(gin.ReleaseMode)
|
||
} else {
|
||
gin.SetMode(gin.DebugMode)
|
||
}
|
||
|
||
// Créer le router Gin
|
||
router := gin.New()
|
||
|
||
// SECURITY(HIGH-006): Restrict trusted proxies to prevent IP spoofing via X-Forwarded-For.
|
||
// Default: trust nothing (c.ClientIP() returns RemoteAddr only).
|
||
// Set TRUSTED_PROXIES="10.0.0.1,10.0.0.2" if behind a known reverse proxy/load balancer.
|
||
router.SetTrustedProxies(nil)
|
||
|
||
// Middleware globaux (Logger, Recovery) recommandés par ORIGIN
|
||
router.Use(gin.Logger(), gin.Recovery())
|
||
|
||
// Configuration des routes
|
||
apiRouter := api.NewAPIRouter(db, cfg) // Instantiate APIRouter
|
||
if err := apiRouter.Setup(router); err != nil {
|
||
logger.Error("Failed to setup API routes", zap.Error(err))
|
||
os.Exit(1)
|
||
}
|
||
|
||
// Configuration du serveur HTTP
|
||
port := fmt.Sprintf("%d", cfg.AppPort)
|
||
if cfg.AppPort == 0 {
|
||
port = "8080"
|
||
}
|
||
|
||
server := &http.Server{
|
||
Addr: fmt.Sprintf(":%s", port),
|
||
Handler: router,
|
||
ReadTimeout: 30 * time.Second, // Standards ORIGIN
|
||
WriteTimeout: 30 * time.Second,
|
||
}
|
||
|
||
// BE-SVC-017: Enregistrer tous les services pour shutdown gracieux
|
||
// Enregistrer le serveur HTTP
|
||
shutdownManager.Register(shutdown.NewShutdownFunc("http_server", func(ctx context.Context) error {
|
||
return server.Shutdown(ctx)
|
||
}))
|
||
|
||
// Enregistrer la configuration (ferme DB, Redis, RabbitMQ, etc.)
|
||
shutdownManager.Register(shutdown.NewShutdownFunc("config", func(ctx context.Context) error {
|
||
return cfg.Close()
|
||
}))
|
||
|
||
// Enregistrer le logger pour flush final
|
||
shutdownManager.Register(shutdown.NewShutdownFunc("logger", func(ctx context.Context) error {
|
||
if logger != nil {
|
||
return logger.Sync()
|
||
}
|
||
return nil
|
||
}))
|
||
|
||
// Enregistrer Sentry pour flush final
|
||
if cfg.SentryDsn != "" {
|
||
shutdownManager.Register(shutdown.NewShutdownFunc("sentry", func(ctx context.Context) error {
|
||
sentry.Flush(2 * time.Second)
|
||
return nil
|
||
}))
|
||
}
|
||
|
||
// Gestion de l'arrêt gracieux
|
||
quit := make(chan os.Signal, 1)
|
||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||
|
||
go func() {
|
||
logger.Info("🌐 Serveur HTTP démarré", zap.String("port", port))
|
||
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||
logger.Fatal("❌ Erreur du serveur HTTP", zap.Error(err))
|
||
}
|
||
}()
|
||
|
||
// Attendre le signal d'arrêt
|
||
<-quit
|
||
logger.Info("🔄 Signal d'arrêt reçu, démarrage du shutdown gracieux...")
|
||
|
||
// BE-SVC-017: Arrêt gracieux coordonné de tous les services
|
||
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||
defer shutdownCancel()
|
||
|
||
if err := shutdownManager.Shutdown(shutdownCtx); err != nil {
|
||
logger.Error("❌ Erreur lors du shutdown gracieux", zap.Error(err))
|
||
} else {
|
||
logger.Info("✅ Shutdown gracieux terminé avec succès")
|
||
}
|
||
}
|