veza/fixtures/scenarios/performance/high-load.ts
2025-12-03 22:56:50 +01:00

730 lines
No EOL
23 KiB
TypeScript

import { DataRelationManager } from '../../core/utils/data-relations'
import { UserGenerator } from '../../core/generators/users'
import { AudioGenerator } from '../../core/generators/audio'
import { ConversationGenerator } from '../../core/generators/conversations'
import { StreamServerFixtures } from '../../services/stream-server'
import { vezaFaker } from '../../core/utils/faker-config'
import { globalConfig } from '../../core/config'
import type { User, Audio, Message, Conversation } from '../../core/schemas/database'
/**
* High Load Performance Scenario
*
* Simulates high traffic conditions to test system performance and scalability
*/
export interface HighLoadScenarioContext {
loadProfile: LoadProfile
concurrentUsers: ConcurrentUser[]
streamingSessions: StreamingSession[]
messageFlood: MessageFlood[]
systemMetrics: SystemMetrics
testParameters: LoadTestParameters
}
export interface LoadProfile {
name: string
description: string
duration: number // seconds
rampUpTime: number // seconds
peakUsers: number
averageSessionDuration: number // seconds
actionsPerUser: number
expectedThroughput: number // requests per second
}
export interface ConcurrentUser {
id: string
user: User
sessionStart: Date
sessionEnd: Date
actions: UserLoadAction[]
currentActivity: 'streaming' | 'browsing' | 'chatting' | 'idle'
connectionQuality: 'excellent' | 'good' | 'poor'
deviceType: 'desktop' | 'mobile' | 'tablet'
}
export interface StreamingSession {
id: string
userId: string
trackId: string
startTime: Date
duration: number // seconds
quality: 'low' | 'medium' | 'high' | 'lossless'
bitrate: number
bufferEvents: BufferEvent[]
networkConditions: NetworkCondition
}
export interface MessageFlood {
conversationId: string
participantIds: string[]
messagesPerSecond: number
duration: number // seconds
totalMessages: number
messageTypes: ('text' | 'image' | 'audio' | 'file')[]
}
export interface BufferEvent {
timestamp: Date
type: 'buffer_start' | 'buffer_end' | 'quality_change' | 'connection_drop'
duration?: number // milliseconds
metadata?: any
}
export interface NetworkCondition {
bandwidth: number // Mbps
latency: number // ms
packetLoss: number // percentage
stability: 'stable' | 'unstable' | 'intermittent'
}
export interface SystemMetrics {
expectedLoad: {
cpuUsage: number // percentage
memoryUsage: number // percentage
diskIO: number // MB/s
networkIO: number // MB/s
databaseConnections: number
redisConnections: number
}
performanceTargets: {
averageResponseTime: number // ms
p95ResponseTime: number // ms
errorRate: number // percentage
throughput: number // requests per second
concurrentStreams: number
}
}
export interface LoadTestParameters {
scenario: string
environment: 'testing' | 'staging' | 'production'
testType: 'stress' | 'load' | 'spike' | 'endurance'
dataSize: 'small' | 'medium' | 'large' | 'xlarge'
monitoring: {
metricsInterval: number // seconds
alertThresholds: AlertThreshold[]
reportingEnabled: boolean
}
}
export interface AlertThreshold {
metric: string
operator: '>' | '<' | '>=' | '<=' | '=='
value: number
severity: 'warning' | 'critical'
}
export interface UserLoadAction {
type: 'login' | 'stream_track' | 'send_message' | 'create_playlist' | 'search' | 'browse'
timestamp: Date
duration: number // ms
success: boolean
responseTime: number // ms
errorCode?: string
metadata?: any
}
/**
* High Load Scenario Generator
*/
export class HighLoadScenario {
/**
* Generate high load test scenario
*/
static async setup(loadProfile?: Partial<LoadProfile>): Promise<HighLoadScenarioContext> {
console.log('🚀 Setting up high load performance scenario...')
const profile = this.createLoadProfile(loadProfile)
console.log(`📊 Load profile: ${profile.name} - ${profile.peakUsers} concurrent users`)
// Generate concurrent users
console.log('👥 Generating concurrent users...')
const concurrentUsers = await this.generateConcurrentUsers(profile.peakUsers)
// Generate streaming sessions
console.log('🎵 Setting up streaming sessions...')
const streamingSessions = this.generateStreamingSessions(concurrentUsers, profile)
// Generate message flood scenarios
console.log('💬 Creating message flood scenarios...')
const messageFlood = this.generateMessageFlood(concurrentUsers, profile)
// Calculate system metrics
const systemMetrics = this.calculateSystemMetrics(profile, concurrentUsers.length)
// Setup test parameters
const testParameters = this.createTestParameters(profile)
const context: HighLoadScenarioContext = {
loadProfile: profile,
concurrentUsers,
streamingSessions,
messageFlood,
systemMetrics,
testParameters
}
console.log('✅ High load scenario setup complete')
console.log(`📈 Expected throughput: ${profile.expectedThroughput} req/s`)
console.log(`⏱️ Test duration: ${profile.duration}s`)
return context
}
/**
* Generate multiple load scenarios
*/
static async setupMultipleScenarios(): Promise<HighLoadScenarioContext[]> {
const scenarios: HighLoadScenarioContext[] = []
// Light load scenario
scenarios.push(await this.setup({
name: 'Light Load',
peakUsers: 100,
duration: 300, // 5 minutes
expectedThroughput: 50
}))
// Medium load scenario
scenarios.push(await this.setup({
name: 'Medium Load',
peakUsers: 500,
duration: 600, // 10 minutes
expectedThroughput: 200
}))
// Heavy load scenario
scenarios.push(await this.setup({
name: 'Heavy Load',
peakUsers: 1000,
duration: 900, // 15 minutes
expectedThroughput: 500
}))
// Spike test scenario
scenarios.push(await this.setup({
name: 'Spike Test',
peakUsers: 2000,
duration: 300, // 5 minutes
rampUpTime: 30, // Quick ramp up
expectedThroughput: 1000
}))
return scenarios
}
/**
* Simulate load test execution
*/
static simulateLoadTest(context: HighLoadScenarioContext): {
executionPlan: LoadTestExecution[]
expectedResults: LoadTestResults
monitoringPlan: MonitoringPlan
} {
console.log(`🎯 Simulating load test: ${context.loadProfile.name}`)
const executionPlan = this.createExecutionPlan(context)
const expectedResults = this.calculateExpectedResults(context)
const monitoringPlan = this.createMonitoringPlan(context)
console.log('✅ Load test simulation complete')
return {
executionPlan,
expectedResults,
monitoringPlan
}
}
/**
* Generate stress test data
*/
static generateStressTestData(maxUsers: number = 5000): {
users: User[]
tracks: Audio[]
concurrentSessions: number
expectedBottlenecks: string[]
} {
console.log(`💥 Generating stress test data for ${maxUsers} users...`)
// Generate users for stress test
const users = UserGenerator.generateBatch(maxUsers, {
status: 'active',
withStats: false // Minimal data for performance
})
// Generate tracks for streaming
const tracks = AudioGenerator.generateBatch(1000, {
isPopular: true, // Popular tracks will be requested more
withWaveform: false // Skip waveform for performance
})
// Calculate concurrent sessions (70% of users streaming)
const concurrentSessions = Math.floor(maxUsers * 0.7)
// Identify expected bottlenecks
const expectedBottlenecks = [
'Database connection pool exhaustion',
'Redis memory usage spike',
'Network bandwidth saturation',
'CPU usage from audio processing',
'WebSocket connection limits',
'File system I/O for audio streaming'
]
console.log(`✅ Stress test data generated: ${users.length} users, ${tracks.length} tracks`)
return {
users,
tracks,
concurrentSessions,
expectedBottlenecks
}
}
/**
* Private helper methods
*/
private static createLoadProfile(partial?: Partial<LoadProfile>): LoadProfile {
const defaultProfile: LoadProfile = {
name: 'Standard Load Test',
description: 'Standard load testing scenario for Veza Platform',
duration: 600, // 10 minutes
rampUpTime: 120, // 2 minutes
peakUsers: globalConfig.generation.performance.concurrentUsers,
averageSessionDuration: 300, // 5 minutes
actionsPerUser: 20,
expectedThroughput: 100 // requests per second
}
return { ...defaultProfile, ...partial }
}
private static async generateConcurrentUsers(count: number): Promise<ConcurrentUser[]> {
const users: ConcurrentUser[] = []
const startTime = new Date()
for (let i = 0; i < count; i++) {
const user = UserGenerator.generate({
status: 'active',
withStats: false
})
const sessionDuration = vezaFaker.number.int({ min: 180, max: 1800 }) // 3-30 minutes
const sessionStart = new Date(startTime.getTime() + vezaFaker.number.int({ min: 0, max: 120000 })) // Stagger logins
const concurrentUser: ConcurrentUser = {
id: user.id,
user,
sessionStart,
sessionEnd: new Date(sessionStart.getTime() + sessionDuration * 1000),
actions: this.generateUserActions(sessionDuration),
currentActivity: vezaFaker.helpers.arrayElement(['streaming', 'browsing', 'chatting', 'idle']),
connectionQuality: vezaFaker.utils.weightedChoice([
{ item: 'excellent', weight: 0.3 },
{ item: 'good', weight: 0.5 },
{ item: 'poor', weight: 0.2 }
]),
deviceType: vezaFaker.helpers.arrayElement(['desktop', 'mobile', 'tablet'])
}
users.push(concurrentUser)
}
return users
}
private static generateStreamingSessions(
users: ConcurrentUser[],
profile: LoadProfile
): StreamingSession[] {
const sessions: StreamingSession[] = []
// 70% of users will be streaming
const streamingUsers = users.filter(() => vezaFaker.datatype.boolean({ probability: 0.7 }))
streamingUsers.forEach(concurrentUser => {
const sessionCount = vezaFaker.number.int({ min: 1, max: 3 })
for (let i = 0; i < sessionCount; i++) {
const session = StreamServerFixtures.generateStreamingSession(
concurrentUser.user.id,
vezaFaker.string.uuid() // Random track ID
)
// Add load-specific data
const streamingSession: StreamingSession = {
...session,
bufferEvents: this.generateBufferEvents(session.duration),
networkConditions: this.generateNetworkConditions(concurrentUser.connectionQuality)
}
sessions.push(streamingSession)
}
})
return sessions
}
private static generateMessageFlood(
users: ConcurrentUser[],
profile: LoadProfile
): MessageFlood[] {
const floods: MessageFlood[] = []
// Create high-traffic conversations
const highTrafficConversations = Math.floor(profile.peakUsers / 50) // One per 50 users
for (let i = 0; i < highTrafficConversations; i++) {
const participantCount = vezaFaker.number.int({ min: 20, max: 100 })
const participants = vezaFaker.helpers.arrayElements(users, participantCount)
const flood: MessageFlood = {
conversationId: vezaFaker.string.uuid(),
participantIds: participants.map(p => p.id),
messagesPerSecond: vezaFaker.number.int({ min: 5, max: 50 }),
duration: profile.duration * 0.8, // 80% of test duration
totalMessages: 0, // Will be calculated
messageTypes: ['text', 'image', 'audio']
}
flood.totalMessages = Math.floor(flood.messagesPerSecond * flood.duration)
floods.push(flood)
}
return floods
}
private static generateUserActions(sessionDuration: number): UserLoadAction[] {
const actions: UserLoadAction[] = []
const actionCount = Math.floor(sessionDuration / 15) // One action every 15 seconds on average
const actionTypes = ['stream_track', 'send_message', 'search', 'browse', 'create_playlist']
const weights = [0.4, 0.25, 0.15, 0.15, 0.05] // Stream track is most common
for (let i = 0; i < actionCount; i++) {
const actionType = vezaFaker.utils.weightedChoice(
actionTypes.map((type, index) => ({ item: type, weight: weights[index] || 0.1 }))
) as any
const timestamp = new Date(Date.now() + i * 15000 + vezaFaker.number.int({ min: 0, max: 10000 }))
const success = vezaFaker.datatype.boolean({ probability: 0.95 }) // 95% success rate
actions.push({
type: actionType,
timestamp,
duration: this.getActionDuration(actionType),
success,
responseTime: success ?
vezaFaker.number.int({ min: 50, max: 500 }) :
vezaFaker.number.int({ min: 1000, max: 5000 }),
errorCode: success ? '' : this.getErrorCode(actionType),
metadata: this.getActionMetadata(actionType)
})
}
return actions
}
private static generateBufferEvents(duration: number): BufferEvent[] {
const events: BufferEvent[] = []
const eventCount = Math.floor(duration / 60) // One event per minute on average
for (let i = 0; i < eventCount; i++) {
const eventType = vezaFaker.helpers.arrayElement([
'buffer_start', 'buffer_end', 'quality_change', 'connection_drop'
]) as any
events.push({
timestamp: new Date(Date.now() + i * 60000 + vezaFaker.number.int({ min: 0, max: 30000 })),
type: eventType,
duration: eventType.includes('buffer') ? vezaFaker.number.int({ min: 100, max: 3000 }) : undefined,
metadata: {
reason: this.getBufferEventReason(eventType),
severity: vezaFaker.helpers.arrayElement(['low', 'medium', 'high'])
}
})
}
return events
}
private static generateNetworkConditions(quality: string): NetworkCondition {
const conditions = {
excellent: {
bandwidth: vezaFaker.number.int({ min: 50, max: 100 }),
latency: vezaFaker.number.int({ min: 10, max: 30 }),
packetLoss: vezaFaker.number.float({ min: 0, max: 0.1 }),
stability: 'stable' as const
},
good: {
bandwidth: vezaFaker.number.int({ min: 20, max: 50 }),
latency: vezaFaker.number.int({ min: 30, max: 100 }),
packetLoss: vezaFaker.number.float({ min: 0.1, max: 1 }),
stability: vezaFaker.helpers.arrayElement(['stable', 'unstable']) as any
},
poor: {
bandwidth: vezaFaker.number.int({ min: 5, max: 20 }),
latency: vezaFaker.number.int({ min: 100, max: 500 }),
packetLoss: vezaFaker.number.float({ min: 1, max: 5 }),
stability: vezaFaker.helpers.arrayElement(['unstable', 'intermittent']) as any
}
}
return conditions[quality as keyof typeof conditions] || conditions.good
}
private static calculateSystemMetrics(profile: LoadProfile, userCount: number): SystemMetrics {
// Estimate system resource usage based on load
const baseLoad = userCount / 1000 // Base calculation per 1000 users
return {
expectedLoad: {
cpuUsage: Math.min(90, 20 + baseLoad * 30), // 20-90%
memoryUsage: Math.min(85, 30 + baseLoad * 25), // 30-85%
diskIO: Math.min(100, 10 + baseLoad * 40), // MB/s
networkIO: Math.min(500, 50 + baseLoad * 200), // MB/s
databaseConnections: Math.min(100, 10 + Math.floor(baseLoad * 50)),
redisConnections: Math.min(200, 20 + Math.floor(baseLoad * 80))
},
performanceTargets: {
averageResponseTime: 200, // ms
p95ResponseTime: 500, // ms
errorRate: 1, // 1% error rate acceptable
throughput: profile.expectedThroughput,
concurrentStreams: Math.floor(userCount * 0.7)
}
}
}
private static createTestParameters(profile: LoadProfile): LoadTestParameters {
return {
scenario: profile.name,
environment: 'testing',
testType: 'load',
dataSize: profile.peakUsers > 1000 ? 'large' : 'medium',
monitoring: {
metricsInterval: 10, // seconds
alertThresholds: [
{ metric: 'response_time_p95', operator: '>', value: 1000, severity: 'warning' },
{ metric: 'error_rate', operator: '>', value: 5, severity: 'critical' },
{ metric: 'cpu_usage', operator: '>', value: 90, severity: 'warning' },
{ metric: 'memory_usage', operator: '>', value: 90, severity: 'critical' }
],
reportingEnabled: true
}
}
}
private static createExecutionPlan(context: HighLoadScenarioContext): LoadTestExecution[] {
const plan: LoadTestExecution[] = []
const { loadProfile } = context
// Ramp up phase
plan.push({
phase: 'ramp_up',
duration: loadProfile.rampUpTime,
startUsers: 0,
endUsers: loadProfile.peakUsers,
description: 'Gradually increase user load'
})
// Steady state phase
plan.push({
phase: 'steady_state',
duration: loadProfile.duration - loadProfile.rampUpTime - 60,
startUsers: loadProfile.peakUsers,
endUsers: loadProfile.peakUsers,
description: 'Maintain peak load'
})
// Ramp down phase
plan.push({
phase: 'ramp_down',
duration: 60,
startUsers: loadProfile.peakUsers,
endUsers: 0,
description: 'Gradually decrease user load'
})
return plan
}
private static calculateExpectedResults(context: HighLoadScenarioContext): LoadTestResults {
const { loadProfile, systemMetrics } = context
return {
throughput: {
expected: loadProfile.expectedThroughput,
minimum: loadProfile.expectedThroughput * 0.8,
maximum: loadProfile.expectedThroughput * 1.2
},
responseTime: {
average: systemMetrics.performanceTargets.averageResponseTime,
p95: systemMetrics.performanceTargets.p95ResponseTime,
p99: systemMetrics.performanceTargets.p95ResponseTime * 1.5
},
errorRate: {
expected: systemMetrics.performanceTargets.errorRate,
maximum: 5 // 5% maximum acceptable error rate
},
resourceUsage: systemMetrics.expectedLoad,
bottlenecks: [
'Database connection pool under high concurrent access',
'Redis memory usage during peak streaming',
'Network bandwidth for concurrent audio streams',
'WebSocket connection management'
]
}
}
private static createMonitoringPlan(context: HighLoadScenarioContext): MonitoringPlan {
return {
metrics: [
'response_time_avg',
'response_time_p95',
'response_time_p99',
'throughput_rps',
'error_rate_percent',
'cpu_usage_percent',
'memory_usage_percent',
'disk_io_mbps',
'network_io_mbps',
'active_connections',
'streaming_sessions_count',
'message_queue_size'
],
alerts: context.testParameters.monitoring.alertThresholds,
reports: [
'Real-time dashboard',
'Performance summary report',
'Error analysis report',
'Resource utilization report',
'Bottleneck identification report'
],
frequency: context.testParameters.monitoring.metricsInterval
}
}
private static getActionDuration(actionType: string): number {
const durations = {
login: vezaFaker.number.int({ min: 1000, max: 3000 }),
stream_track: vezaFaker.number.int({ min: 30000, max: 300000 }), // 30s to 5min
send_message: vezaFaker.number.int({ min: 500, max: 2000 }),
create_playlist: vezaFaker.number.int({ min: 5000, max: 15000 }),
search: vezaFaker.number.int({ min: 200, max: 1000 }),
browse: vezaFaker.number.int({ min: 2000, max: 10000 })
}
return durations[actionType as keyof typeof durations] || 1000
}
private static getErrorCode(actionType: string): string {
const errorCodes = {
login: vezaFaker.helpers.arrayElement(['AUTH_FAILED', 'RATE_LIMITED', 'SERVER_ERROR']),
stream_track: vezaFaker.helpers.arrayElement(['TRACK_NOT_FOUND', 'STREAMING_ERROR', 'BANDWIDTH_LIMIT']),
send_message: vezaFaker.helpers.arrayElement(['MESSAGE_TOO_LONG', 'RATE_LIMITED', 'CONVERSATION_NOT_FOUND']),
create_playlist: vezaFaker.helpers.arrayElement(['INVALID_DATA', 'QUOTA_EXCEEDED', 'SERVER_ERROR']),
search: vezaFaker.helpers.arrayElement(['INVALID_QUERY', 'SEARCH_TIMEOUT', 'SERVER_ERROR']),
browse: vezaFaker.helpers.arrayElement(['PAGE_NOT_FOUND', 'SERVER_ERROR', 'TIMEOUT'])
}
return errorCodes[actionType as keyof typeof errorCodes] || 'UNKNOWN_ERROR'
}
private static getActionMetadata(actionType: string): any {
switch (actionType) {
case 'stream_track':
return {
quality: vezaFaker.helpers.arrayElement(['low', 'medium', 'high']),
duration: vezaFaker.number.int({ min: 30, max: 300 })
}
case 'search':
return {
query: vezaFaker.music.songTitle(),
filters: vezaFaker.helpers.arrayElements(['genre', 'artist', 'year'], 2)
}
case 'send_message':
return {
messageLength: vezaFaker.number.int({ min: 10, max: 500 }),
hasAttachment: vezaFaker.datatype.boolean({ probability: 0.1 })
}
default:
return {}
}
}
private static getBufferEventReason(eventType: string): string {
const reasons = {
buffer_start: vezaFaker.helpers.arrayElement(['slow_connection', 'server_load', 'network_congestion']),
buffer_end: 'buffer_filled',
quality_change: vezaFaker.helpers.arrayElement(['bandwidth_adaptation', 'user_preference', 'automatic_adjustment']),
connection_drop: vezaFaker.helpers.arrayElement(['network_timeout', 'server_restart', 'client_disconnect'])
}
return reasons[eventType as keyof typeof reasons] || 'unknown'
}
}
// Additional interfaces for the load test execution
interface LoadTestExecution {
phase: string
duration: number
startUsers: number
endUsers: number
description: string
}
interface LoadTestResults {
throughput: {
expected: number
minimum: number
maximum: number
}
responseTime: {
average: number
p95: number
p99: number
}
errorRate: {
expected: number
maximum: number
}
resourceUsage: any
bottlenecks: string[]
}
interface MonitoringPlan {
metrics: string[]
alerts: AlertThreshold[]
reports: string[]
frequency: number
}
/**
* Export scenario configuration
*/
export const HIGH_LOAD_SCENARIO = {
name: 'High Load Performance Test',
description: 'Comprehensive load testing scenario for system performance validation',
duration: 'Variable (5-15 minutes)',
complexity: 'high',
userTypes: ['concurrent_users'],
expectedOutcomes: [
'System handles peak load without degradation',
'Response times remain within acceptable limits',
'Error rates stay below threshold',
'Resource usage is optimized',
'Bottlenecks are identified and documented'
],
testCoverage: [
'concurrent_user_handling',
'streaming_performance',
'database_scalability',
'redis_cache_performance',
'websocket_connections',
'api_response_times',
'error_handling_under_load'
]
}