veza/veza-backend-api/internal/middleware/ratelimit.go
senke 73eca4f6ad feat: backend, stream server & infra improvements
Backend (Go):
- Config: CORS, RabbitMQ, rate limit, main config updates
- Routes: core, distribution, tracks routing changes
- Middleware: rate limiter, endpoint limiter, response cache hardening
- Handlers: distribution, search handler fixes
- Workers: job worker improvements
- Upload validator and logging config additions
- New migrations: products, orders, performance indexes
- Seed tooling and data

Stream Server (Rust):
- Audio processing, config, routes, simple stream server updates
- Dockerfile improvements

Infrastructure:
- docker-compose.yml updates
- nginx-rtmp config changes
- Makefile improvements (config, dev, high, infra)
- Root package.json and lock file updates
- .env.example updates

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 11:36:06 +01:00

188 lines
5 KiB
Go

package middleware
import (
"fmt"
"net/http"
"os"
"strconv"
"sync"
"time"
"github.com/gin-gonic/gin"
)
// SimpleRateLimiter est un rate limiter simple basé sur une sliding window en mémoire
// Utilisé pour le rate limiting basique par IP sans dépendance Redis
type SimpleRateLimiter struct {
requests map[string][]time.Time
limit int
window time.Duration
mu sync.Mutex
stop chan struct{} // Channel to signal cleanup goroutine to stop
}
// NewSimpleRateLimiter crée un nouveau rate limiter simple
// limit: nombre maximum de requêtes
// window: fenêtre de temps (ex: 1 * time.Minute pour 100 req/min)
func NewSimpleRateLimiter(limit int, window time.Duration) *SimpleRateLimiter {
rl := &SimpleRateLimiter{
requests: make(map[string][]time.Time),
limit: limit,
window: window,
stop: make(chan struct{}), // Initialize the stop channel
}
// Démarrer la goroutine de nettoyage
go rl.cleanup()
return rl
}
// Routes exclues du rate limiting (routes critiques)
var excludedRateLimitPaths = []string{
"/health",
"/healthz",
"/readyz",
"/api/v1/health",
"/api/v1/healthz",
"/api/v1/readyz",
"/api/v1/csrf-token",
// v0.903: login/register no longer excluded - subject to global rate limit (100 req/min) + endpoint-specific limiters
// SEC-009, SEC-010: refresh and check-username have EndpointLimiter, not excluded
"/api/v1/auth/verify-email",
"/api/v1/auth/resend-verification",
"/swagger",
"/docs",
}
// isExcludedPath vérifie si un chemin est exclu du rate limiting
func isExcludedPath(path string) bool {
for _, excluded := range excludedRateLimitPaths {
if path == excluded || (len(path) > len(excluded) && path[:len(excluded)] == excluded) {
return true
}
}
return false
}
// Middleware retourne le middleware Gin pour le rate limiting
func (rl *SimpleRateLimiter) Middleware() gin.HandlerFunc {
return func(c *gin.Context) {
// Exclure les routes critiques du rate limiting
if isExcludedPath(c.Request.URL.Path) {
c.Next()
return
}
// SEC-011: Never bypass rate limiting in production.
// E2E: Completely disable in test environment (APP_ENV=test) to prevent flaky tests.
if os.Getenv("APP_ENV") == "production" {
// Continue to rate limit — NEVER bypass in production
} else if os.Getenv("APP_ENV") == "test" || os.Getenv("DISABLE_RATE_LIMIT_FOR_TESTS") == "true" {
c.Next()
return
}
ip := c.ClientIP()
rl.mu.Lock()
now := time.Now()
cutoff := now.Add(-rl.window)
// Nettoyer les anciennes requêtes
valid := []time.Time{}
for _, t := range rl.requests[ip] {
if t.After(cutoff) {
valid = append(valid, t)
}
}
// Vérifier si la limite est atteinte
if len(valid) >= rl.limit {
rl.mu.Unlock()
resetTime := now.Add(rl.window).Unix()
retryAfter := int(rl.window.Seconds())
// INT-013: Standardize rate limit response format
c.Header("X-RateLimit-Limit", strconv.Itoa(rl.limit))
c.Header("X-RateLimit-Remaining", "0")
c.Header("X-RateLimit-Reset", strconv.FormatInt(resetTime, 10))
c.Header("Retry-After", strconv.Itoa(retryAfter))
c.JSON(http.StatusTooManyRequests, gin.H{
"success": false,
"error": gin.H{
"code": 429,
"message": "Rate limit exceeded. Please try again later.",
"details": []gin.H{
{
"field": "rate_limit",
"message": fmt.Sprintf("You have exceeded the rate limit of %d requests per %v", rl.limit, rl.window),
},
},
"retry_after": retryAfter,
"limit": rl.limit,
"remaining": 0,
"reset": resetTime,
},
})
c.Abort()
return
}
// Ajouter la nouvelle requête
valid = append(valid, now)
rl.requests[ip] = valid
remaining := rl.limit - len(valid)
rl.mu.Unlock()
// Ajouter les headers de rate limiting
c.Header("X-RateLimit-Limit", strconv.Itoa(rl.limit))
c.Header("X-RateLimit-Remaining", strconv.Itoa(remaining))
c.Header("X-RateLimit-Reset", strconv.FormatInt(now.Add(rl.window).Unix(), 10))
c.Next()
}
}
// UpdateLimits met à jour les limites de rate limiting (T0034)
// Permet le rechargement à chaud des limites sans redémarrer l'application
func (rl *SimpleRateLimiter) UpdateLimits(limit int, window time.Duration) {
rl.mu.Lock()
defer rl.mu.Unlock()
rl.limit = limit
rl.window = window
}
// cleanup nettoie périodiquement les anciennes requêtes
func (rl *SimpleRateLimiter) cleanup() {
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop() // Ensure ticker is stopped
for {
select {
case <-ticker.C:
rl.mu.Lock()
cutoff := time.Now().Add(-rl.window)
for ip, times := range rl.requests {
valid := []time.Time{}
for _, t := range times {
if t.After(cutoff) {
valid = append(valid, t)
}
}
if len(valid) == 0 {
delete(rl.requests, ip)
} else {
rl.requests[ip] = valid
}
}
rl.mu.Unlock()
case <-rl.stop: // Listen for stop signal
return // Exit goroutine
}
}
}
// Stop signale au goroutine de nettoyage de s'arrêter
func (rl *SimpleRateLimiter) Stop() {
close(rl.stop)
}