veza/veza-backend-api/internal/services/advanced_analytics_service.go
senke 41b5f6c455
Some checks failed
Veza CI / Backend (Go) (push) Waiting to run
Veza CI / Frontend (Web) (push) Waiting to run
Veza CI / Notify on failure (push) Blocked by required conditions
Security Scan / Secret Scanning (gitleaks) (push) Failing after 3m4s
Veza CI / Rust (Stream Server) (push) Has been cancelled
Backend API CI / test-integration (push) Failing after 11m59s
Backend API CI / test-unit (push) Failing after 12m1s
style(backend): gofmt -w on 85 files (whitespace only)
backend-ci.yml's `test -z "$(gofmt -l .)"` strict gate (added in
c96edd692) failed on a backlog of unformatted files. None of the
85 files in this commit had been edited since the gate was added
because no push touched veza-backend-api/** in between, so the
gate never fired until today's CI fixes triggered it.

The diff is exclusively whitespace alignment in struct literals
and trailing-space comments. `go build ./...` and the full test
suite (with VEZA_SKIP_INTEGRATION=1 -short) pass identically.
2026-04-14 12:22:14 +02:00

645 lines
22 KiB
Go

package services
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"gorm.io/gorm"
)
// AdvancedAnalyticsService provides advanced analytics for creators (F396-F399)
// All data is private to the creator — never exposed publicly.
type AdvancedAnalyticsService struct {
db *gorm.DB
logger *zap.Logger
}
// NewAdvancedAnalyticsService creates a new advanced analytics service
func NewAdvancedAnalyticsService(db *gorm.DB, logger *zap.Logger) *AdvancedAnalyticsService {
if logger == nil {
logger = zap.NewNop()
}
return &AdvancedAnalyticsService{db: db, logger: logger}
}
// --- F396: Heatmap d'écoute ---
// TrackSegmentStat represents a segment of a track with aggregated listening data
type TrackSegmentStat struct {
SegmentIndex int `json:"segment_index"`
SegmentStartMs int64 `json:"segment_start_ms"`
SegmentEndMs int64 `json:"segment_end_ms"`
ListenCount int64 `json:"listen_count"`
DropOffCount int64 `json:"drop_off_count"`
ReplayCount int64 `json:"replay_count"`
Intensity float64 `json:"intensity"` // normalized 0-1
}
// TrackHeatmap represents the full heatmap for a track
type TrackHeatmap struct {
TrackID string `json:"track_id"`
TotalSegments int `json:"total_segments"`
SegmentDurationMs int64 `json:"segment_duration_ms"`
Segments []TrackSegmentStat `json:"segments"`
MaxListens int64 `json:"max_listens"`
AvgDropOff float64 `json:"avg_drop_off"`
}
// GetTrackHeatmap returns aggregated heatmap data for a track (F396)
func (s *AdvancedAnalyticsService) GetTrackHeatmap(ctx context.Context, creatorID uuid.UUID, trackID uuid.UUID, startDate, endDate time.Time) (*TrackHeatmap, error) {
// Verify the track belongs to the creator
var trackCount int64
if err := s.db.WithContext(ctx).Table("tracks").
Where("id = ? AND creator_id = ? AND deleted_at IS NULL", trackID, creatorID).
Count(&trackCount).Error; err != nil {
return nil, fmt.Errorf("failed to verify track ownership: %w", err)
}
if trackCount == 0 {
return nil, fmt.Errorf("track not found or not owned by creator")
}
var rows []struct {
SegmentIndex int `gorm:"column:segment_index"`
SegmentStartMs int64 `gorm:"column:segment_start_ms"`
SegmentEndMs int64 `gorm:"column:segment_end_ms"`
ListenCount int64 `gorm:"column:listen_count"`
DropOffCount int64 `gorm:"column:drop_off_count"`
ReplayCount int64 `gorm:"column:replay_count"`
}
if err := s.db.WithContext(ctx).Raw(`
SELECT segment_index, segment_start_ms, segment_end_ms,
SUM(listen_count) AS listen_count,
SUM(drop_off_count) AS drop_off_count,
SUM(replay_count) AS replay_count
FROM track_segment_stats
WHERE track_id = ? AND date >= ? AND date <= ?
GROUP BY segment_index, segment_start_ms, segment_end_ms
ORDER BY segment_index ASC
`, trackID, startDate.Format("2006-01-02"), endDate.Format("2006-01-02")).Scan(&rows).Error; err != nil {
return nil, fmt.Errorf("failed to get heatmap data: %w", err)
}
// Find max listen count for normalization
var maxListens int64
for _, r := range rows {
if r.ListenCount > maxListens {
maxListens = r.ListenCount
}
}
var totalDropOff int64
segments := make([]TrackSegmentStat, len(rows))
for i, r := range rows {
intensity := float64(0)
if maxListens > 0 {
intensity = float64(r.ListenCount) / float64(maxListens)
}
segments[i] = TrackSegmentStat{
SegmentIndex: r.SegmentIndex,
SegmentStartMs: r.SegmentStartMs,
SegmentEndMs: r.SegmentEndMs,
ListenCount: r.ListenCount,
DropOffCount: r.DropOffCount,
ReplayCount: r.ReplayCount,
Intensity: intensity,
}
totalDropOff += r.DropOffCount
}
avgDropOff := float64(0)
if len(segments) > 0 {
avgDropOff = float64(totalDropOff) / float64(len(segments))
}
segmentDuration := int64(0)
if len(rows) > 0 {
segmentDuration = rows[0].SegmentEndMs - rows[0].SegmentStartMs
}
return &TrackHeatmap{
TrackID: trackID.String(),
TotalSegments: len(segments),
SegmentDurationMs: segmentDuration,
Segments: segments,
MaxListens: maxListens,
AvgDropOff: avgDropOff,
}, nil
}
// --- F397: Comparaison de périodes ---
// PeriodComparison holds the comparison between two time periods
type PeriodComparison struct {
CurrentPeriod PeriodStats `json:"current_period"`
PreviousPeriod PeriodStats `json:"previous_period"`
Changes PeriodChanges `json:"changes"`
}
// PeriodStats holds stats for a single period
type PeriodStats struct {
StartDate string `json:"start_date"`
EndDate string `json:"end_date"`
TotalPlays int64 `json:"total_plays"`
UniqueListeners int64 `json:"unique_listeners"`
CompleteListens int64 `json:"complete_listens"`
TotalPlayTime int64 `json:"total_play_time"`
AvgCompletion float64 `json:"avg_completion"`
TotalRevenue float64 `json:"total_revenue"`
NewFollowers int64 `json:"new_followers"`
}
// PeriodChanges holds the percentage changes between periods
type PeriodChanges struct {
PlaysChange float64 `json:"plays_change"` // percentage
ListenersChange float64 `json:"listeners_change"` // percentage
CompletionChange float64 `json:"completion_change"` // percentage
RevenueChange float64 `json:"revenue_change"` // percentage
FollowersChange float64 `json:"followers_change"` // percentage
PlayTimeChange float64 `json:"play_time_change"` // percentage
}
// ComparePeriods compares analytics between two time periods (F397)
func (s *AdvancedAnalyticsService) ComparePeriods(ctx context.Context, creatorID uuid.UUID, currentStart, currentEnd, previousStart, previousEnd time.Time) (*PeriodComparison, error) {
current, err := s.getPeriodStats(ctx, creatorID, currentStart, currentEnd)
if err != nil {
return nil, fmt.Errorf("failed to get current period: %w", err)
}
current.StartDate = currentStart.Format(time.RFC3339)
current.EndDate = currentEnd.Format(time.RFC3339)
previous, err := s.getPeriodStats(ctx, creatorID, previousStart, previousEnd)
if err != nil {
return nil, fmt.Errorf("failed to get previous period: %w", err)
}
previous.StartDate = previousStart.Format(time.RFC3339)
previous.EndDate = previousEnd.Format(time.RFC3339)
changes := PeriodChanges{
PlaysChange: calcPercentChange(previous.TotalPlays, current.TotalPlays),
ListenersChange: calcPercentChange(previous.UniqueListeners, current.UniqueListeners),
CompletionChange: calcFloatPercentChange(previous.AvgCompletion, current.AvgCompletion),
RevenueChange: calcFloatPercentChange(previous.TotalRevenue, current.TotalRevenue),
FollowersChange: calcPercentChange(previous.NewFollowers, current.NewFollowers),
PlayTimeChange: calcPercentChange(previous.TotalPlayTime, current.TotalPlayTime),
}
return &PeriodComparison{
CurrentPeriod: *current,
PreviousPeriod: *previous,
Changes: changes,
}, nil
}
func (s *AdvancedAnalyticsService) getPeriodStats(ctx context.Context, creatorID uuid.UUID, startDate, endDate time.Time) (*PeriodStats, error) {
stats := &PeriodStats{}
var playbackAgg struct {
TotalPlays int64 `gorm:"column:total_plays"`
UniqueListeners int64 `gorm:"column:unique_listeners"`
CompleteListens int64 `gorm:"column:complete_listens"`
TotalPlayTime int64 `gorm:"column:total_play_time"`
AvgCompletion float64 `gorm:"column:avg_completion"`
}
if err := s.db.WithContext(ctx).Raw(`
SELECT
COUNT(pa.id) AS total_plays,
COUNT(DISTINCT pa.user_id) AS unique_listeners,
COUNT(CASE WHEN pa.completion_rate >= 90 THEN 1 END) AS complete_listens,
COALESCE(SUM(pa.play_time), 0) AS total_play_time,
COALESCE(AVG(pa.completion_rate), 0) AS avg_completion
FROM playback_analytics pa
JOIN tracks t ON t.id = pa.track_id
WHERE t.creator_id = ? AND pa.started_at >= ? AND pa.started_at <= ?
`, creatorID, startDate, endDate).Scan(&playbackAgg).Error; err != nil {
return nil, fmt.Errorf("failed to get playback stats: %w", err)
}
stats.TotalPlays = playbackAgg.TotalPlays
stats.UniqueListeners = playbackAgg.UniqueListeners
stats.CompleteListens = playbackAgg.CompleteListens
stats.TotalPlayTime = playbackAgg.TotalPlayTime
stats.AvgCompletion = playbackAgg.AvgCompletion
// Revenue
if err := s.db.WithContext(ctx).Raw(`
SELECT COALESCE(SUM(oi.price), 0)
FROM order_items oi
JOIN orders o ON o.id = oi.order_id
JOIN products p ON p.id = oi.product_id
WHERE p.seller_id = ? AND o.status IN ('completed', 'paid')
AND o.created_at >= ? AND o.created_at <= ?
`, creatorID, startDate, endDate).Scan(&stats.TotalRevenue).Error; err != nil {
s.logger.Warn("Failed to get revenue", zap.Error(err))
}
// New followers
if err := s.db.WithContext(ctx).Raw(`
SELECT COUNT(*) FROM follows
WHERE followed_id = ? AND created_at >= ? AND created_at <= ?
`, creatorID, startDate, endDate).Scan(&stats.NewFollowers).Error; err != nil {
s.logger.Warn("Failed to get new followers", zap.Error(err))
}
return stats, nil
}
func calcPercentChange(previous, current int64) float64 {
if previous == 0 {
if current > 0 {
return 100.0
}
return 0.0
}
return float64(current-previous) / float64(previous) * 100.0
}
func calcFloatPercentChange(previous, current float64) float64 {
if previous == 0 {
if current > 0 {
return 100.0
}
return 0.0
}
return (current - previous) / previous * 100.0
}
// --- F398: Analytics Marketplace ---
// MarketplaceAnalytics holds marketplace analytics data for a creator
type MarketplaceAnalytics struct {
TotalViews int64 `json:"total_views"`
TotalSales int64 `json:"total_sales"`
TotalRevenue float64 `json:"total_revenue"`
OverallConversion float64 `json:"overall_conversion"` // percentage
PlatformCommission float64 `json:"platform_commission"`
NetRevenue float64 `json:"net_revenue"`
Products []ProductAnalytics `json:"products"`
RevenueTimeline []MarketplaceRevenue `json:"revenue_timeline"`
}
// ProductAnalytics holds analytics for a single product
type ProductAnalytics struct {
ProductID string `json:"product_id"`
Name string `json:"name"`
ProductType string `json:"product_type"`
Views int64 `json:"views"`
Sales int64 `json:"sales"`
Revenue float64 `json:"revenue"`
ConversionRate float64 `json:"conversion_rate"` // percentage
}
// MarketplaceRevenue holds revenue data for a time period
type MarketplaceRevenue struct {
Date string `json:"date"`
Revenue float64 `json:"revenue"`
Sales int64 `json:"sales"`
Views int64 `json:"views"`
}
// GetMarketplaceAnalytics returns marketplace analytics for the creator (F398)
func (s *AdvancedAnalyticsService) GetMarketplaceAnalytics(ctx context.Context, creatorID uuid.UUID, startDate, endDate time.Time) (*MarketplaceAnalytics, error) {
analytics := &MarketplaceAnalytics{
Products: []ProductAnalytics{},
RevenueTimeline: []MarketplaceRevenue{},
}
// Per-product views, sales, and conversion
var productRows []struct {
ProductID string `gorm:"column:product_id"`
Name string `gorm:"column:name"`
ProductType string `gorm:"column:product_type"`
Views int64 `gorm:"column:views"`
Sales int64 `gorm:"column:sales"`
Revenue float64 `gorm:"column:revenue"`
}
if err := s.db.WithContext(ctx).Raw(`
SELECT CAST(p.id AS TEXT) AS product_id,
p.name,
COALESCE(p.product_type, 'track') AS product_type,
COALESCE(pv.view_count, 0) AS views,
COALESCE(oi.sale_count, 0) AS sales,
COALESCE(oi.revenue, 0) AS revenue
FROM products p
LEFT JOIN (
SELECT product_id, COUNT(*) AS view_count
FROM product_views
WHERE created_at >= ? AND created_at <= ?
GROUP BY product_id
) pv ON pv.product_id = p.id
LEFT JOIN (
SELECT oi.product_id, COUNT(oi.id) AS sale_count, SUM(oi.price) AS revenue
FROM order_items oi
JOIN orders o ON o.id = oi.order_id
WHERE o.status IN ('completed', 'paid')
AND o.created_at >= ? AND o.created_at <= ?
GROUP BY oi.product_id
) oi ON oi.product_id = p.id
WHERE p.seller_id = ?
ORDER BY COALESCE(oi.revenue, 0) DESC
`, startDate, endDate, startDate, endDate, creatorID).Scan(&productRows).Error; err != nil {
return nil, fmt.Errorf("failed to get product analytics: %w", err)
}
var totalViews, totalSales int64
var totalRevenue float64
for _, r := range productRows {
convRate := float64(0)
if r.Views > 0 {
convRate = float64(r.Sales) / float64(r.Views) * 100.0
}
analytics.Products = append(analytics.Products, ProductAnalytics{
ProductID: r.ProductID,
Name: r.Name,
ProductType: r.ProductType,
Views: r.Views,
Sales: r.Sales,
Revenue: r.Revenue,
ConversionRate: convRate,
})
totalViews += r.Views
totalSales += r.Sales
totalRevenue += r.Revenue
}
analytics.TotalViews = totalViews
analytics.TotalSales = totalSales
analytics.TotalRevenue = totalRevenue
if totalViews > 0 {
analytics.OverallConversion = float64(totalSales) / float64(totalViews) * 100.0
}
// Platform commission (15% as per Veza business model)
analytics.PlatformCommission = totalRevenue * 0.15
analytics.NetRevenue = totalRevenue - analytics.PlatformCommission
// Revenue timeline
var timelineRows []struct {
Date string `gorm:"column:date"`
Revenue float64 `gorm:"column:revenue"`
Sales int64 `gorm:"column:sales"`
Views int64 `gorm:"column:views"`
}
if err := s.db.WithContext(ctx).Raw(`
WITH daily_sales AS (
SELECT DATE(o.created_at) AS date,
COALESCE(SUM(oi.price), 0) AS revenue,
COUNT(oi.id) AS sales
FROM order_items oi
JOIN orders o ON o.id = oi.order_id
JOIN products p ON p.id = oi.product_id
WHERE p.seller_id = ? AND o.status IN ('completed', 'paid')
AND o.created_at >= ? AND o.created_at <= ?
GROUP BY DATE(o.created_at)
),
daily_views AS (
SELECT DATE(pv.created_at) AS date, COUNT(*) AS views
FROM product_views pv
JOIN products p ON p.id = pv.product_id
WHERE p.seller_id = ? AND pv.created_at >= ? AND pv.created_at <= ?
GROUP BY DATE(pv.created_at)
)
SELECT COALESCE(ds.date, dv.date) AS date,
COALESCE(ds.revenue, 0) AS revenue,
COALESCE(ds.sales, 0) AS sales,
COALESCE(dv.views, 0) AS views
FROM daily_sales ds
FULL OUTER JOIN daily_views dv ON ds.date = dv.date
ORDER BY date ASC
`, creatorID, startDate, endDate, creatorID, startDate, endDate).Scan(&timelineRows).Error; err != nil {
s.logger.Warn("Failed to get revenue timeline", zap.Error(err))
}
for _, r := range timelineRows {
analytics.RevenueTimeline = append(analytics.RevenueTimeline, MarketplaceRevenue{
Date: r.Date,
Revenue: r.Revenue,
Sales: r.Sales,
Views: r.Views,
})
}
return analytics, nil
}
// --- F399: Alertes métriques ---
// MetricAlert represents a metric alert configuration and status
type MetricAlert struct {
ID string `json:"id"`
MetricType string `json:"metric_type"`
Threshold int64 `json:"threshold"`
IsTriggered bool `json:"is_triggered"`
TriggeredAt *time.Time `json:"triggered_at,omitempty"`
CreatedAt time.Time `json:"created_at"`
}
// MetricAlertPreference represents the user's preference for a metric type
type MetricAlertPreference struct {
MetricType string `json:"metric_type"`
Enabled bool `json:"enabled"`
}
// MetricAlertSummary holds all alerts and preferences for a user
type MetricAlertSummary struct {
Preferences []MetricAlertPreference `json:"preferences"`
Alerts []MetricAlert `json:"alerts"`
Pending []MetricAlert `json:"pending"` // triggered but not yet seen
}
// GetMetricAlerts returns all metric alerts for a user (F399)
func (s *AdvancedAnalyticsService) GetMetricAlerts(ctx context.Context, userID uuid.UUID) (*MetricAlertSummary, error) {
summary := &MetricAlertSummary{
Preferences: []MetricAlertPreference{},
Alerts: []MetricAlert{},
Pending: []MetricAlert{},
}
// Get preferences
var prefRows []struct {
MetricType string `gorm:"column:metric_type"`
Enabled bool `gorm:"column:enabled"`
}
if err := s.db.WithContext(ctx).Raw(`
SELECT metric_type, enabled FROM metric_alert_preferences
WHERE user_id = ?
ORDER BY metric_type
`, userID).Scan(&prefRows).Error; err != nil {
s.logger.Warn("Failed to get alert preferences", zap.Error(err))
}
// Provide defaults if no preferences exist
if len(prefRows) == 0 {
for _, mt := range []string{"plays", "followers", "sales", "listeners"} {
summary.Preferences = append(summary.Preferences, MetricAlertPreference{
MetricType: mt,
Enabled: true, // opt-in by default but user controls
})
}
} else {
for _, r := range prefRows {
summary.Preferences = append(summary.Preferences, MetricAlertPreference{
MetricType: r.MetricType,
Enabled: r.Enabled,
})
}
}
// Get alerts
var alertRows []struct {
ID uuid.UUID `gorm:"column:id"`
MetricType string `gorm:"column:metric_type"`
Threshold int64 `gorm:"column:threshold"`
IsTriggered bool `gorm:"column:is_triggered"`
TriggeredAt *time.Time `gorm:"column:triggered_at"`
CreatedAt time.Time `gorm:"column:created_at"`
}
if err := s.db.WithContext(ctx).Raw(`
SELECT id, metric_type, threshold, is_triggered, triggered_at, created_at
FROM metric_alerts
WHERE user_id = ?
ORDER BY metric_type, threshold
`, userID).Scan(&alertRows).Error; err != nil {
return nil, fmt.Errorf("failed to get alerts: %w", err)
}
for _, r := range alertRows {
alert := MetricAlert{
ID: r.ID.String(),
MetricType: r.MetricType,
Threshold: r.Threshold,
IsTriggered: r.IsTriggered,
TriggeredAt: r.TriggeredAt,
CreatedAt: r.CreatedAt,
}
summary.Alerts = append(summary.Alerts, alert)
if r.IsTriggered {
summary.Pending = append(summary.Pending, alert)
}
}
return summary, nil
}
// UpdateAlertPreference updates the preference for a metric type (F399)
func (s *AdvancedAnalyticsService) UpdateAlertPreference(ctx context.Context, userID uuid.UUID, metricType string, enabled bool) error {
validTypes := map[string]bool{"plays": true, "followers": true, "sales": true, "listeners": true}
if !validTypes[metricType] {
return fmt.Errorf("invalid metric type: %s", metricType)
}
return s.db.WithContext(ctx).Exec(`
INSERT INTO metric_alert_preferences (user_id, metric_type, enabled)
VALUES (?, ?, ?)
ON CONFLICT (user_id, metric_type)
DO UPDATE SET enabled = EXCLUDED.enabled, updated_at = NOW()
`, userID, metricType, enabled).Error
}
// CreateAlertThreshold creates a new alert threshold for a metric (F399)
func (s *AdvancedAnalyticsService) CreateAlertThreshold(ctx context.Context, userID uuid.UUID, metricType string, threshold int64) error {
validTypes := map[string]bool{"plays": true, "followers": true, "sales": true, "listeners": true}
if !validTypes[metricType] {
return fmt.Errorf("invalid metric type: %s", metricType)
}
if threshold <= 0 {
return fmt.Errorf("threshold must be positive")
}
return s.db.WithContext(ctx).Exec(`
INSERT INTO metric_alerts (user_id, metric_type, threshold)
VALUES (?, ?, ?)
ON CONFLICT (user_id, metric_type, threshold) DO NOTHING
`, userID, metricType, threshold).Error
}
// DeleteAlertThreshold removes an alert threshold (F399)
func (s *AdvancedAnalyticsService) DeleteAlertThreshold(ctx context.Context, userID uuid.UUID, alertID uuid.UUID) error {
result := s.db.WithContext(ctx).Exec(`
DELETE FROM metric_alerts WHERE id = ? AND user_id = ?
`, alertID, userID)
if result.Error != nil {
return fmt.Errorf("failed to delete alert: %w", result.Error)
}
if result.RowsAffected == 0 {
return fmt.Errorf("alert not found")
}
return nil
}
// CheckAndTriggerAlerts checks current metrics against thresholds and triggers alerts (F399)
// This should be called periodically (e.g., by a cron job)
func (s *AdvancedAnalyticsService) CheckAndTriggerAlerts(ctx context.Context, userID uuid.UUID) ([]MetricAlert, error) {
var triggered []MetricAlert
// Get untriggered alerts for enabled metric types
var alerts []struct {
ID uuid.UUID `gorm:"column:id"`
MetricType string `gorm:"column:metric_type"`
Threshold int64 `gorm:"column:threshold"`
}
if err := s.db.WithContext(ctx).Raw(`
SELECT ma.id, ma.metric_type, ma.threshold
FROM metric_alerts ma
JOIN metric_alert_preferences map ON map.user_id = ma.user_id AND map.metric_type = ma.metric_type
WHERE ma.user_id = ? AND ma.is_triggered = FALSE AND map.enabled = TRUE
`, userID).Scan(&alerts).Error; err != nil {
return nil, fmt.Errorf("failed to get pending alerts: %w", err)
}
for _, alert := range alerts {
var currentValue int64
switch alert.MetricType {
case "plays":
s.db.WithContext(ctx).Raw(`
SELECT COUNT(*) FROM playback_analytics pa
JOIN tracks t ON t.id = pa.track_id
WHERE t.creator_id = ?
`, userID).Scan(&currentValue)
case "followers":
s.db.WithContext(ctx).Raw(`
SELECT COUNT(*) FROM follows WHERE followed_id = ?
`, userID).Scan(&currentValue)
case "sales":
s.db.WithContext(ctx).Raw(`
SELECT COUNT(oi.id)
FROM order_items oi
JOIN orders o ON o.id = oi.order_id
JOIN products p ON p.id = oi.product_id
WHERE p.seller_id = ? AND o.status IN ('completed', 'paid')
`, userID).Scan(&currentValue)
case "listeners":
s.db.WithContext(ctx).Raw(`
SELECT COUNT(DISTINCT pa.user_id)
FROM playback_analytics pa
JOIN tracks t ON t.id = pa.track_id
WHERE t.creator_id = ?
`, userID).Scan(&currentValue)
}
if currentValue >= alert.Threshold {
now := time.Now()
s.db.WithContext(ctx).Exec(`
UPDATE metric_alerts SET is_triggered = TRUE, triggered_at = ?, updated_at = ?
WHERE id = ?
`, now, now, alert.ID)
triggered = append(triggered, MetricAlert{
ID: alert.ID.String(),
MetricType: alert.MetricType,
Threshold: alert.Threshold,
IsTriggered: true,
TriggeredAt: &now,
})
}
}
return triggered, nil
}