veza/veza-backend-api/internal/workers/webhook_worker.go

220 lines
5.5 KiB
Go

package workers
import (
"context"
"fmt"
"time"
"veza-backend-api/internal/models"
"veza-backend-api/internal/services"
"github.com/google/uuid"
"go.uber.org/zap"
"gorm.io/gorm"
)
// WebhookWorker gère les webhooks en arrière-plan
type WebhookWorker struct {
db *gorm.DB
webhookService *services.WebhookService
logger *zap.Logger
queue chan WebhookJob
maxRetries int
processingWorkers int
}
// WebhookJob représente une tâche de webhook à traiter
type WebhookJob struct {
ID uuid.UUID
Webhook *models.Webhook
Event string
Data map[string]interface{}
Retries int
CreatedAt time.Time
}
// NewWebhookWorker crée un nouveau worker de webhooks
func NewWebhookWorker(
db *gorm.DB,
webhookService *services.WebhookService,
logger *zap.Logger,
queueSize int,
workers int,
maxRetries int,
) *WebhookWorker {
return &WebhookWorker{
db: db,
webhookService: webhookService,
logger: logger,
queue: make(chan WebhookJob, queueSize),
maxRetries: maxRetries,
processingWorkers: workers,
}
}
// Enqueue ajoute un job au queue
func (w *WebhookWorker) Enqueue(job WebhookJob) {
job.CreatedAt = time.Now()
job.ID = uuid.New()
select {
case w.queue <- job:
w.logger.Debug("Webhook job enqueued",
zap.String("job_id", job.ID.String()),
zap.String("event", job.Event),
zap.String("webhook_url", job.Webhook.URL))
default:
w.logger.Warn("Webhook queue full, dropping job",
zap.String("event", job.Event))
}
}
// Start démarre le worker
func (w *WebhookWorker) Start(ctx context.Context) {
w.logger.Info("Starting webhook worker",
zap.Int("workers", w.processingWorkers))
for i := 0; i < w.processingWorkers; i++ {
go w.processWorker(ctx, i)
}
}
// processWorker traite les jobs du queue
func (w *WebhookWorker) processWorker(ctx context.Context, workerID int) {
w.logger.Info("Webhook worker started",
zap.Int("worker_id", workerID))
for {
select {
case <-ctx.Done():
w.logger.Info("Webhook worker stopping",
zap.Int("worker_id", workerID))
return
case job := <-w.queue:
w.processJob(ctx, job, workerID)
}
}
}
// processJob traite un job individuel
func (w *WebhookWorker) processJob(ctx context.Context, job WebhookJob, workerID int) {
logger := w.logger.With(
zap.String("job_id", job.ID.String()),
zap.String("event", job.Event),
zap.Int("worker_id", workerID))
logger.Info("Processing webhook job",
zap.Int("retries", job.Retries))
// Créer un contexte avec timeout pour la livraison
deliveryCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
// Tenter de livrer le webhook
err := w.webhookService.DeliverWebhook(
deliveryCtx,
job.Webhook,
job.Event,
job.Data,
)
if err != nil {
logger.Error("Webhook delivery failed",
zap.Error(err))
// Enregistrer l'échec dans la table de logs
w.logFailedDelivery(ctx, job, err)
// Retry si pas atteint max retries
if job.Retries < w.maxRetries {
job.Retries++
// Exponential backoff via time.AfterFunc (non-blocking)
delay := time.Duration(job.Retries) * time.Second
time.AfterFunc(delay, func() {
// Ré-enqueue le job
w.Enqueue(job)
logger.Info("Retrying webhook delivery (enqueued)",
zap.Int("new_retries", job.Retries))
})
} else {
logger.Error("Webhook delivery failed after max retries",
zap.Int("max_retries", w.maxRetries))
}
} else {
logger.Info("Webhook delivered successfully")
}
}
// logFailedDelivery enregistre un échec de livraison
func (w *WebhookWorker) logFailedDelivery(ctx context.Context, job WebhookJob, err error) {
failure := &models.WebhookFailure{
WebhookID: job.Webhook.ID,
Event: job.Event,
Error: err.Error(),
Retries: job.Retries,
CreatedAt: time.Now(),
}
if err := w.db.WithContext(ctx).Create(failure).Error; err != nil {
w.logger.Error("Failed to log webhook failure",
zap.Error(err),
zap.String("job_id", job.ID.String()))
}
}
// GetStats retourne les statistiques du worker
func (w *WebhookWorker) GetStats() map[string]interface{} {
return map[string]interface{}{
"queue_size": len(w.queue),
"workers": w.processingWorkers,
"max_retries": w.maxRetries,
}
}
// CleanupOldFailures supprime les anciennes pannes de livraison
func (w *WebhookWorker) CleanupOldFailures(ctx context.Context, daysOld int) error {
cutoffDate := time.Now().AddDate(0, 0, -daysOld)
result := w.db.WithContext(ctx).
Where("created_at < ?", cutoffDate).
Delete(&models.WebhookFailure{})
if result.Error != nil {
return fmt.Errorf("failed to cleanup old failures: %w", result.Error)
}
w.logger.Info("Cleaned up old webhook failures",
zap.Int("rows_deleted", int(result.RowsAffected)),
zap.Int("days_old", daysOld))
return nil
}
// RequeueFailed retente les webhooks en échec
func (w *WebhookWorker) RequeueFailed(ctx context.Context, failure *models.WebhookFailure) error {
// Récupérer le webhook
var webhook models.Webhook
if err := w.db.WithContext(ctx).
First(&webhook, failure.WebhookID).Error; err != nil {
return fmt.Errorf("failed to fetch webhook: %w", err)
}
// Enqueue à nouveau
job := WebhookJob{
Webhook: &webhook,
Event: failure.Event,
Data: map[string]interface{}{}, // On perd les données originales
Retries: failure.Retries,
}
w.Enqueue(job)
w.logger.Info("Requeued failed webhook",
zap.String("webhook_id", failure.WebhookID.String()),
zap.Int("retries", failure.Retries))
return nil
}