221 lines
5.3 KiB
Go
221 lines
5.3 KiB
Go
package testutils
|
|
|
|
import (
|
|
"sync"
|
|
"sync/atomic"
|
|
"testing"
|
|
"time"
|
|
)
|
|
|
|
func TestSetupParallelTest(t *testing.T) {
|
|
// Ce test vérifie que SetupParallelTest marque le test comme parallèle
|
|
// Note: On ne peut pas vraiment tester t.Parallel() directement, mais on peut
|
|
// vérifier que la fonction ne panique pas et s'exécute correctement
|
|
SetupParallelTest(t)
|
|
}
|
|
|
|
func TestRunParallelTests(t *testing.T) {
|
|
var counter int64
|
|
|
|
testFuncs := map[string]func(*testing.T){
|
|
"test1": func(t *testing.T) {
|
|
// Don't call SetupParallelTest here - RunParallelTests already calls t.Parallel()
|
|
atomic.AddInt64(&counter, 1)
|
|
t.Logf("test1 executed, counter=%d", atomic.LoadInt64(&counter))
|
|
},
|
|
"test2": func(t *testing.T) {
|
|
// Don't call SetupParallelTest here - RunParallelTests already calls t.Parallel()
|
|
atomic.AddInt64(&counter, 1)
|
|
t.Logf("test2 executed, counter=%d", atomic.LoadInt64(&counter))
|
|
},
|
|
"test3": func(t *testing.T) {
|
|
// Don't call SetupParallelTest here - RunParallelTests already calls t.Parallel()
|
|
atomic.AddInt64(&counter, 1)
|
|
t.Logf("test3 executed, counter=%d", atomic.LoadInt64(&counter))
|
|
},
|
|
}
|
|
|
|
// RunParallelTests uses t.Run() which should wait for all sub-tests to complete
|
|
// With t.Parallel(), tests are paused and resumed later, but t.Run() still blocks
|
|
// until all sub-tests complete. The issue is that the check happens before parallel
|
|
// tests resume. We need to check the count in a cleanup function that runs after
|
|
// all tests complete.
|
|
t.Cleanup(func() {
|
|
// This runs after all sub-tests complete
|
|
finalCount := atomic.LoadInt64(&counter)
|
|
if finalCount != 3 {
|
|
t.Errorf("Expected counter to be 3, got %d", finalCount)
|
|
}
|
|
})
|
|
|
|
RunParallelTests(t, testFuncs)
|
|
}
|
|
|
|
func TestRunParallelTests_MultipleExecution(t *testing.T) {
|
|
var executions int64
|
|
|
|
testFuncs := map[string]func(*testing.T){
|
|
"parallel_test_1": func(t *testing.T) {
|
|
// Don't call t.Parallel() here - RunParallelTests already calls it
|
|
time.Sleep(10 * time.Millisecond)
|
|
atomic.AddInt64(&executions, 1)
|
|
},
|
|
"parallel_test_2": func(t *testing.T) {
|
|
// Don't call t.Parallel() here - RunParallelTests already calls it
|
|
time.Sleep(10 * time.Millisecond)
|
|
atomic.AddInt64(&executions, 1)
|
|
},
|
|
"parallel_test_3": func(t *testing.T) {
|
|
// Don't call t.Parallel() here - RunParallelTests already calls it
|
|
time.Sleep(10 * time.Millisecond)
|
|
atomic.AddInt64(&executions, 1)
|
|
},
|
|
}
|
|
|
|
// RunParallelTests uses t.Run() which should wait for all sub-tests to complete
|
|
// With t.Parallel(), tests are paused and resumed later, but t.Run() still blocks
|
|
// until all sub-tests complete. The issue is that the check happens before parallel
|
|
// tests resume. We need to check the count in a cleanup function that runs after
|
|
// all tests complete.
|
|
t.Cleanup(func() {
|
|
// This runs after all sub-tests complete
|
|
finalCount := atomic.LoadInt64(&executions)
|
|
if finalCount != 3 {
|
|
t.Errorf("Expected 3 executions, got %d", finalCount)
|
|
}
|
|
})
|
|
|
|
RunParallelTests(t, testFuncs)
|
|
}
|
|
|
|
func TestWithLock(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
var counter int
|
|
var wg sync.WaitGroup
|
|
|
|
// Exécuter plusieurs goroutines qui incrémentent le compteur
|
|
for i := 0; i < 10; i++ {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
WithLock(func() {
|
|
counter++
|
|
})
|
|
}()
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
if counter != 10 {
|
|
t.Errorf("Expected counter to be 10, got %d", counter)
|
|
}
|
|
}
|
|
|
|
func TestWithLock_Isolation(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
var value int
|
|
|
|
// Exécuter plusieurs opérations avec lock
|
|
for i := 0; i < 5; i++ {
|
|
WithLock(func() {
|
|
oldValue := value
|
|
time.Sleep(1 * time.Millisecond)
|
|
value = oldValue + 1
|
|
})
|
|
}
|
|
|
|
if value != 5 {
|
|
t.Errorf("Expected value to be 5, got %d", value)
|
|
}
|
|
}
|
|
|
|
func TestTestLockManager(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
lockManager := NewTestLockManager()
|
|
var counter1, counter2 int
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
// Utiliser deux locks différents en parallèle
|
|
for i := 0; i < 5; i++ {
|
|
wg.Add(2)
|
|
go func() {
|
|
defer wg.Done()
|
|
unlock := lockManager.Lock("resource1")
|
|
defer unlock()
|
|
counter1++
|
|
}()
|
|
go func() {
|
|
defer wg.Done()
|
|
unlock := lockManager.Lock("resource2")
|
|
defer unlock()
|
|
counter2++
|
|
}()
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
if counter1 != 5 {
|
|
t.Errorf("Expected counter1 to be 5, got %d", counter1)
|
|
}
|
|
if counter2 != 5 {
|
|
t.Errorf("Expected counter2 to be 5, got %d", counter2)
|
|
}
|
|
}
|
|
|
|
func TestTestLockManager_SameLock(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
lockManager := NewTestLockManager()
|
|
var counter int
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
// Utiliser le même lock pour plusieurs goroutines
|
|
for i := 0; i < 10; i++ {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
unlock := lockManager.Lock("shared_resource")
|
|
defer unlock()
|
|
counter++
|
|
}()
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
if counter != 10 {
|
|
t.Errorf("Expected counter to be 10, got %d", counter)
|
|
}
|
|
}
|
|
|
|
func TestTestLockManager_ConcurrentAccess(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
lockManager := NewTestLockManager()
|
|
var sharedValue int
|
|
|
|
// Test que les locks fonctionnent correctement en accès concurrent
|
|
var wg sync.WaitGroup
|
|
for i := 0; i < 20; i++ {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
unlock := lockManager.Lock("concurrent_resource")
|
|
defer unlock()
|
|
|
|
oldValue := sharedValue
|
|
time.Sleep(1 * time.Millisecond)
|
|
sharedValue = oldValue + 1
|
|
}()
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
if sharedValue != 20 {
|
|
t.Errorf("Expected sharedValue to be 20, got %d", sharedValue)
|
|
}
|
|
}
|