|
| 1 | +package benchmark |
| 2 | + |
| 3 | +import ( |
| 4 | + "fmt" |
| 5 | + "log" |
| 6 | + "runtime" |
| 7 | + "runtime/metrics" |
| 8 | + "sort" |
| 9 | + "sync" |
| 10 | + "time" |
| 11 | + |
| 12 | + "github.com/schollz/progressbar/v3" |
| 13 | +) |
| 14 | + |
| 15 | +// === Test Orchestration === |
| 16 | + |
| 17 | +// runThroughputTests executes all throughput tests |
| 18 | +func (b *DBESDKBenchmark) runThroughputTests(dataSizes []int, iterations int) { |
| 19 | + log.Println("Running throughput tests...") |
| 20 | + for _, dataSize := range dataSizes { |
| 21 | + result, err := b.runThroughputTest(dataSize, iterations) |
| 22 | + if err != nil { |
| 23 | + log.Printf("Throughput test failed: %v", err) |
| 24 | + continue |
| 25 | + } |
| 26 | + b.Results = append(b.Results, *result) |
| 27 | + log.Printf("Throughput test completed: %.2f ops/sec", result.OpsPerSecond) |
| 28 | + } |
| 29 | +} |
| 30 | + |
| 31 | +// runMemoryTests executes all memory tests |
| 32 | +func (b *DBESDKBenchmark) runMemoryTests(dataSizes []int) { |
| 33 | + log.Println("Running memory tests...") |
| 34 | + for _, dataSize := range dataSizes { |
| 35 | + result, err := b.runMemoryTest(dataSize) |
| 36 | + if err != nil { |
| 37 | + log.Printf("Memory test failed: %v", err) |
| 38 | + continue |
| 39 | + } |
| 40 | + b.Results = append(b.Results, *result) |
| 41 | + log.Printf("Memory test completed: %.2f MB peak", result.PeakMemoryMB) |
| 42 | + } |
| 43 | +} |
| 44 | + |
| 45 | +// runConcurrencyTests executes all concurrency tests |
| 46 | +func (b *DBESDKBenchmark) runConcurrencyTests(dataSizes []int, concurrencyLevels []int) { |
| 47 | + log.Println("Running concurrency tests...") |
| 48 | + for _, dataSize := range dataSizes { |
| 49 | + for _, concurrency := range concurrencyLevels { |
| 50 | + if concurrency > 1 { // Skip single-threaded |
| 51 | + result, err := b.runConcurrentTest(dataSize, concurrency, 5) |
| 52 | + if err != nil { |
| 53 | + log.Printf("Concurrent test failed: %v", err) |
| 54 | + continue |
| 55 | + } |
| 56 | + b.Results = append(b.Results, *result) |
| 57 | + log.Printf("Concurrent test completed: %.2f ops/sec @ %d threads", result.OpsPerSecond, concurrency) |
| 58 | + } |
| 59 | + } |
| 60 | + } |
| 61 | +} |
| 62 | + |
| 63 | +// RunAllBenchmarks runs all configured benchmark tests |
| 64 | +func (b *DBESDKBenchmark) RunAllBenchmarks() error { |
| 65 | + log.Println("Starting comprehensive DB-ESDK benchmark suite") |
| 66 | + |
| 67 | + // Combine all data sizes |
| 68 | + var dataSizes []int |
| 69 | + for _, sizes := range [][]int{b.Config.DataSizes.Small, b.Config.DataSizes.Medium, b.Config.DataSizes.Large} { |
| 70 | + dataSizes = append(dataSizes, sizes...) |
| 71 | + } |
| 72 | + |
| 73 | + // Run test suites |
| 74 | + if b.shouldRunTestType("throughput") { |
| 75 | + b.runThroughputTests(dataSizes, b.Config.Iterations.Measurement) |
| 76 | + } else { |
| 77 | + log.Println("Skipping throughput tests (not in test_types)") |
| 78 | + } |
| 79 | + |
| 80 | + if b.shouldRunTestType("memory") { |
| 81 | + b.runMemoryTests(dataSizes) |
| 82 | + } else { |
| 83 | + log.Println("Skipping memory tests (not in test_types)") |
| 84 | + } |
| 85 | + |
| 86 | + if b.shouldRunTestType("concurrency") { |
| 87 | + b.runConcurrencyTests(dataSizes, b.Config.ConcurrencyLevels) |
| 88 | + } else { |
| 89 | + log.Println("Skipping concurrency tests (not in test_types)") |
| 90 | + } |
| 91 | + |
| 92 | + log.Printf("Benchmark suite completed. Total results: %d", len(b.Results)) |
| 93 | + return nil |
| 94 | +} |
| 95 | + |
| 96 | +// === Memory Test Implementation === |
| 97 | + |
| 98 | +// runMemoryTest runs memory benchmark with continuous sampling |
| 99 | +func (b *DBESDKBenchmark) runMemoryTest(dataSize int) (*BenchmarkResult, error) { |
| 100 | + log.Printf("Running memory test - Size: %d bytes (%d iterations, continuous sampling)", dataSize, MemoryTestIterations) |
| 101 | + |
| 102 | + data := b.GenerateTestData(dataSize) |
| 103 | + |
| 104 | + // Setup runtime/metrics tracking |
| 105 | + samples := make([]metrics.Sample, 2) |
| 106 | + samples[0].Name = "/memory/classes/heap/objects:bytes" |
| 107 | + samples[1].Name = "/gc/heap/allocs:bytes" |
| 108 | + |
| 109 | + var peakHeap, peakAllocations float64 |
| 110 | + var avgHeapValues []float64 |
| 111 | + |
| 112 | + // Run iterations |
| 113 | + for i := 0; i < MemoryTestIterations; i++ { |
| 114 | + runtime.GC() |
| 115 | + time.Sleep(GCSettleTimeMs * time.Millisecond) |
| 116 | + |
| 117 | + // Get baseline |
| 118 | + metrics.Read(samples) |
| 119 | + beforeHeap := samples[0].Value.Uint64() |
| 120 | + beforeAllocs := samples[1].Value.Uint64() |
| 121 | + |
| 122 | + // Start continuous sampling |
| 123 | + stopSampling := make(chan bool) |
| 124 | + var continuousSamples []MemorySample |
| 125 | + var samplingMutex sync.Mutex |
| 126 | + |
| 127 | + go func() { |
| 128 | + sampledData := b.sampleMemoryContinuously(beforeHeap, beforeAllocs, stopSampling) |
| 129 | + samplingMutex.Lock() |
| 130 | + continuousSamples = sampledData |
| 131 | + samplingMutex.Unlock() |
| 132 | + }() |
| 133 | + |
| 134 | + // Run operation |
| 135 | + operationStart := time.Now() |
| 136 | + _, _, err := b.runBatchPutGetCycle(data) |
| 137 | + operationDuration := time.Since(operationStart) |
| 138 | + |
| 139 | + close(stopSampling) |
| 140 | + time.Sleep(FinalSampleWaitMs * time.Millisecond) |
| 141 | + |
| 142 | + if err != nil { |
| 143 | + log.Printf("Iteration %d failed: %v", i+1, err) |
| 144 | + continue |
| 145 | + } |
| 146 | + |
| 147 | + // Analyze samples |
| 148 | + samplingMutex.Lock() |
| 149 | + var iterPeakHeap, iterTotalAllocs, iterAvgHeap float64 |
| 150 | + if len(continuousSamples) > 0 { |
| 151 | + var heapSum float64 |
| 152 | + for _, s := range continuousSamples { |
| 153 | + if s.HeapMB > iterPeakHeap { |
| 154 | + iterPeakHeap = s.HeapMB |
| 155 | + } |
| 156 | + if s.MetricsAllocsMB > iterTotalAllocs { |
| 157 | + iterTotalAllocs = s.MetricsAllocsMB |
| 158 | + } |
| 159 | + heapSum += s.HeapMB |
| 160 | + } |
| 161 | + iterAvgHeap = heapSum / float64(len(continuousSamples)) |
| 162 | + } |
| 163 | + samplingMutex.Unlock() |
| 164 | + |
| 165 | + // Update global metrics |
| 166 | + if iterPeakHeap > peakHeap { |
| 167 | + peakHeap = iterPeakHeap |
| 168 | + } |
| 169 | + if iterTotalAllocs > peakAllocations { |
| 170 | + peakAllocations = iterTotalAllocs |
| 171 | + } |
| 172 | + avgHeapValues = append(avgHeapValues, iterAvgHeap) |
| 173 | + |
| 174 | + log.Printf("=== Iteration %d === Peak Heap: %.2f MB, Total Allocs: %.2f MB, Avg Heap: %.2f MB (%v, %d samples)", |
| 175 | + i+1, iterPeakHeap, iterTotalAllocs, iterAvgHeap, operationDuration, len(continuousSamples)) |
| 176 | + } |
| 177 | + |
| 178 | + if len(avgHeapValues) == 0 { |
| 179 | + return nil, fmt.Errorf("all memory test iterations failed") |
| 180 | + } |
| 181 | + |
| 182 | + overallAvgHeap := Average(avgHeapValues) |
| 183 | + memoryEfficiency := float64(dataSize) / (overallAvgHeap * 1024 * 1024) |
| 184 | + if overallAvgHeap == 0 { |
| 185 | + memoryEfficiency = 0 |
| 186 | + } |
| 187 | + |
| 188 | + log.Printf("\nMemory Summary:") |
| 189 | + log.Printf("- Absolute Peak Heap: %.2f MB (across all runs)", peakHeap) |
| 190 | + log.Printf("- Average Heap: %.2f MB (across all runs)", overallAvgHeap) |
| 191 | + log.Printf("- Total Allocations: %.2f MB (max across all runs)", peakAllocations) |
| 192 | + |
| 193 | + result := &BenchmarkResult{ |
| 194 | + TestName: "memory", |
| 195 | + Language: "go", |
| 196 | + DataSize: dataSize, |
| 197 | + Concurrency: 1, |
| 198 | + PeakMemoryMB: peakHeap, |
| 199 | + MemoryEfficiency: memoryEfficiency, |
| 200 | + Timestamp: time.Now().Format("2006-01-02 15:04:05"), |
| 201 | + GoVersion: runtime.Version(), |
| 202 | + CPUCount: b.CPUCount, |
| 203 | + TotalMemoryGB: b.TotalMemoryGB, |
| 204 | + } |
| 205 | + |
| 206 | + return result, nil |
| 207 | +} |
| 208 | + |
| 209 | +// === Concurrent Test Implementation === |
| 210 | + |
| 211 | +// runConcurrentTest runs concurrent operations benchmark test |
| 212 | +func (b *DBESDKBenchmark) runConcurrentTest(dataSize int, concurrency int, iterationsPerWorker int) (*BenchmarkResult, error) { |
| 213 | + log.Printf("Running concurrent test - Size: %d bytes, Concurrency: %d", dataSize, concurrency) |
| 214 | + |
| 215 | + data := b.GenerateTestData(dataSize) |
| 216 | + var allTimes []float64 |
| 217 | + var timesMutex sync.Mutex |
| 218 | + var wg sync.WaitGroup |
| 219 | + |
| 220 | + errorChan := make(chan error, concurrency) |
| 221 | + startTime := time.Now() |
| 222 | + |
| 223 | + // Launch workers |
| 224 | + for i := 0; i < concurrency; i++ { |
| 225 | + wg.Add(1) |
| 226 | + go func(workerID int) { |
| 227 | + defer wg.Done() |
| 228 | + |
| 229 | + var workerTimes []float64 |
| 230 | + for j := 0; j < iterationsPerWorker; j++ { |
| 231 | + iterStart := time.Now() |
| 232 | + _, _, err := b.runBatchPutGetCycle(data) |
| 233 | + if err != nil { |
| 234 | + errorChan <- fmt.Errorf("worker %d iteration %d failed: %w", workerID, j, err) |
| 235 | + return |
| 236 | + } |
| 237 | + workerTimes = append(workerTimes, time.Since(iterStart).Seconds()*1000) |
| 238 | + } |
| 239 | + |
| 240 | + timesMutex.Lock() |
| 241 | + allTimes = append(allTimes, workerTimes...) |
| 242 | + timesMutex.Unlock() |
| 243 | + }(i) |
| 244 | + } |
| 245 | + |
| 246 | + wg.Wait() |
| 247 | + totalDuration := time.Since(startTime).Seconds() |
| 248 | + |
| 249 | + // Check for errors |
| 250 | + select { |
| 251 | + case err := <-errorChan: |
| 252 | + return nil, err |
| 253 | + default: |
| 254 | + } |
| 255 | + |
| 256 | + // Calculate metrics |
| 257 | + totalOps := concurrency * iterationsPerWorker |
| 258 | + totalBytes := int64(totalOps * dataSize) |
| 259 | + |
| 260 | + sort.Float64s(allTimes) |
| 261 | + result := &BenchmarkResult{ |
| 262 | + TestName: "concurrent", |
| 263 | + Language: "go", |
| 264 | + DataSize: dataSize, |
| 265 | + Concurrency: concurrency, |
| 266 | + EndToEndLatencyMs: Average(allTimes), |
| 267 | + OpsPerSecond: float64(totalOps) / totalDuration, |
| 268 | + BytesPerSecond: float64(totalBytes) / totalDuration, |
| 269 | + P50Latency: Percentile(allTimes, 0.50), |
| 270 | + P95Latency: Percentile(allTimes, 0.95), |
| 271 | + P99Latency: Percentile(allTimes, 0.99), |
| 272 | + Timestamp: time.Now().Format("2006-01-02 15:04:05"), |
| 273 | + GoVersion: runtime.Version(), |
| 274 | + CPUCount: b.CPUCount, |
| 275 | + TotalMemoryGB: b.TotalMemoryGB, |
| 276 | + } |
| 277 | + |
| 278 | + log.Printf("Concurrent test completed - Ops/sec: %.2f, Avg latency: %.2f ms", |
| 279 | + result.OpsPerSecond, result.EndToEndLatencyMs) |
| 280 | + |
| 281 | + return result, nil |
| 282 | +} |
| 283 | + |
| 284 | +// === Throughput Test Implementation === |
| 285 | + |
| 286 | +// runThroughputTest runs throughput benchmark test |
| 287 | +func (b *DBESDKBenchmark) runThroughputTest(dataSize int, iterations int) (*BenchmarkResult, error) { |
| 288 | + log.Printf("Running throughput test - Size: %d bytes, Iterations: %d", dataSize, iterations) |
| 289 | + |
| 290 | + testData := b.GenerateTestData(dataSize) |
| 291 | + |
| 292 | + // Warmup |
| 293 | + for i := 0; i < b.Config.Iterations.Warmup; i++ { |
| 294 | + if _, _, err := b.runBatchPutGetCycle(testData); err != nil { |
| 295 | + return nil, fmt.Errorf("warmup iteration %d failed: %w", i, err) |
| 296 | + } |
| 297 | + } |
| 298 | + |
| 299 | + // Measurement runs |
| 300 | + var putLatencies, getLatencies, endToEndLatencies []float64 |
| 301 | + var totalBytes int64 |
| 302 | + |
| 303 | + bar := progressbar.NewOptions(iterations, |
| 304 | + progressbar.OptionSetDescription("Throughput test"), |
| 305 | + progressbar.OptionShowCount(), |
| 306 | + progressbar.OptionSetWidth(50), |
| 307 | + ) |
| 308 | + |
| 309 | + startTime := time.Now() |
| 310 | + for i := 0; i < iterations; i++ { |
| 311 | + iterationStart := time.Now() |
| 312 | + putMs, getMs, err := b.runBatchPutGetCycle(testData) |
| 313 | + if err != nil { |
| 314 | + return nil, fmt.Errorf("measurement iteration %d failed: %w", i, err) |
| 315 | + } |
| 316 | + iterationDuration := time.Since(iterationStart).Seconds() * 1000 |
| 317 | + |
| 318 | + putLatencies = append(putLatencies, putMs) |
| 319 | + getLatencies = append(getLatencies, getMs) |
| 320 | + endToEndLatencies = append(endToEndLatencies, iterationDuration) |
| 321 | + totalBytes += int64(dataSize) |
| 322 | + |
| 323 | + bar.Add(1) |
| 324 | + } |
| 325 | + totalDuration := time.Since(startTime).Seconds() |
| 326 | + |
| 327 | + // Calculate metrics |
| 328 | + sort.Float64s(endToEndLatencies) |
| 329 | + result := &BenchmarkResult{ |
| 330 | + TestName: "throughput", |
| 331 | + Language: "go", |
| 332 | + DataSize: dataSize, |
| 333 | + Concurrency: 1, |
| 334 | + PutLatencyMs: Average(putLatencies), |
| 335 | + GetLatencyMs: Average(getLatencies), |
| 336 | + EndToEndLatencyMs: Average(endToEndLatencies), |
| 337 | + OpsPerSecond: float64(iterations) / totalDuration, |
| 338 | + BytesPerSecond: float64(totalBytes) / totalDuration, |
| 339 | + P50Latency: Percentile(endToEndLatencies, 0.50), |
| 340 | + P95Latency: Percentile(endToEndLatencies, 0.95), |
| 341 | + P99Latency: Percentile(endToEndLatencies, 0.99), |
| 342 | + Timestamp: time.Now().Format("2006-01-02 15:04:05"), |
| 343 | + GoVersion: runtime.Version(), |
| 344 | + CPUCount: b.CPUCount, |
| 345 | + TotalMemoryGB: b.TotalMemoryGB, |
| 346 | + } |
| 347 | + |
| 348 | + log.Printf("Throughput test completed - Ops/sec: %.2f, MB/sec: %.2f", |
| 349 | + result.OpsPerSecond, result.BytesPerSecond/(1024*1024)) |
| 350 | + |
| 351 | + return result, nil |
| 352 | +} |
0 commit comments