1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
|
package errors
import (
"encoding/json"
"errors"
"fmt"
"runtime"
"testing"
)
// Basic Error Creation Benchmarks
// These benchmarks measure the performance of creating basic errors with and without
// pooling, compared to standard library equivalents for baseline reference.
// BenchmarkBasic_New measures the creation and pooling of a new error.
func BenchmarkBasic_New(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := New("test error") // Create and pool a new error
err.Free()
}
}
// BenchmarkBasic_NewNoFree measures error creation without pooling.
func BenchmarkBasic_NewNoFree(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = New("test error") // Create error without returning to pool
}
}
// BenchmarkBasic_StdlibComparison measures standard library error creation as a baseline.
func BenchmarkBasic_StdlibComparison(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = errors.New("test error") // Baseline using standard library errors.New
}
}
// BenchmarkBasic_StdErrorComparison measures the package's Std wrapper for errors.New.
func BenchmarkBasic_StdErrorComparison(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = Std("test error") // Baseline using package’s Std wrapper for errors.New
}
}
// BenchmarkBasic_StdfComparison measures the package's Stdf wrapper for fmt.Errorf.
func BenchmarkBasic_StdfComparison(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = Stdf("test error %d", i) // Baseline using package’s Stdf wrapper for fmt.Errorf
}
}
// Stack Trace Benchmarks
// These benchmarks evaluate the performance of stack trace operations, including
// capturing and generating stack traces for error instances.
// BenchmarkStack_WithStack measures adding a stack trace to an error.
func BenchmarkStack_WithStack(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := New("test").WithStack() // Add stack trace to an error
err.Free()
}
}
// BenchmarkStack_Trace measures creating an error with a stack trace.
func BenchmarkStack_Trace(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := Trace("test error") // Create error with stack trace
err.Free()
}
}
// BenchmarkStack_Capture measures generating a stack trace from an existing error.
func BenchmarkStack_Capture(b *testing.B) {
err := New("test")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = err.Stack() // Generate stack trace from existing error
}
err.Free()
}
// BenchmarkCaptureStack measures capturing a raw stack trace.
func BenchmarkCaptureStack(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
stack := captureStack(0) // Capture raw stack trace
if stack != nil {
runtime.KeepAlive(stack) // Ensure stack isn’t optimized away
}
}
}
// Context Operation Benchmarks
// These benchmarks assess the performance of adding context to errors, testing
// small context (array-based), map-based, and reuse scenarios.
// BenchmarkContext_Small measures adding context within the smallContext limit.
func BenchmarkContext_Small(b *testing.B) {
err := New("base")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = err.With("key", i).With("key2", i+1) // Add two key-value pairs within smallContext limit
}
err.Free()
}
// BenchmarkContext_Map measures adding context exceeding smallContext capacity.
func BenchmarkContext_Map(b *testing.B) {
err := New("base")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = err.With("k1", i).With("k2", i+1).With("k3", i+2) // Exceed smallContext, forcing map usage
}
err.Free()
}
// BenchmarkContext_Reuse measures adding to an existing context.
func BenchmarkContext_Reuse(b *testing.B) {
err := New("base").With("init", "value")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = err.With("key", i) // Add to existing context
}
err.Free()
}
// Error Wrapping Benchmarks
// These benchmarks measure the cost of wrapping errors, both shallow and deep chains.
// BenchmarkWrapping_Simple measures wrapping a single base error.
func BenchmarkWrapping_Simple(b *testing.B) {
base := New("base")
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := New("wrapper").Wrap(base) // Wrap a single base error
err.Free()
}
base.Free()
}
// BenchmarkWrapping_Deep measures unwrapping a 10-level deep error chain.
func BenchmarkWrapping_Deep(b *testing.B) {
var err *Error
for i := 0; i < 10; i++ {
err = New("level").Wrap(err) // Build a 10-level deep error chain
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = err.Unwrap() // Unwrap the deep chain
}
err.Free()
}
// Type Assertion Benchmarks
// These benchmarks evaluate the performance of type assertions (Is and As) on wrapped errors.
// BenchmarkTypeAssertion_Is measures checking if an error matches a target.
func BenchmarkTypeAssertion_Is(b *testing.B) {
target := Named("target")
err := New("wrapper").Wrap(target)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = Is(err, target) // Check if error matches target
}
target.Free()
}
// BenchmarkTypeAssertion_As measures extracting a target from an error chain.
func BenchmarkTypeAssertion_As(b *testing.B) {
err := New("wrapper").Wrap(Named("target"))
var target *Error
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = As(err, &target) // Extract target from error chain
}
if target != nil {
target.Free()
}
}
// Serialization Benchmarks
// These benchmarks test JSON serialization performance with and without stack traces.
// BenchmarkSerialization_JSON measures serializing an error with context to JSON.
func BenchmarkSerialization_JSON(b *testing.B) {
err := New("test").With("key", "value").With("num", 42)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = json.Marshal(err) // Serialize error with context
}
}
// BenchmarkSerialization_JSONWithStack measures serializing an error with stack trace to JSON.
func BenchmarkSerialization_JSONWithStack(b *testing.B) {
err := Trace("test").With("key", "value")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = json.Marshal(err) // Serialize error with stack trace
}
}
// Concurrency Benchmarks
// These benchmarks assess performance under concurrent error creation and context modification.
// BenchmarkConcurrency_Creation measures concurrent error creation and pooling.
func BenchmarkConcurrency_Creation(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
err := New("parallel error") // Create errors concurrently
err.Free()
}
})
}
// BenchmarkConcurrency_Context measures concurrent context addition to a shared error.
func BenchmarkConcurrency_Context(b *testing.B) {
base := New("base")
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_ = base.With("key", "value") // Add context concurrently
}
})
base.Free()
}
// BenchmarkContext_Concurrent measures concurrent context addition with unique keys.
func BenchmarkContext_Concurrent(b *testing.B) {
err := New("base")
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
err.With(fmt.Sprintf("key%d", i%10), i) // Add unique keys concurrently
i++
}
})
}
// Pool and Allocation Benchmarks
// These benchmarks evaluate pooling mechanisms and raw allocation costs.
// BenchmarkPoolGetPut measures the speed of pool get and put operations.
func BenchmarkPoolGetPut(b *testing.B) {
e := &Error{}
b.ResetTimer()
for i := 0; i < b.N; i++ {
errorPool.Put(e) // Return error to pool
e = errorPool.Get() // Retrieve error from pool
}
}
// BenchmarkPoolWarmup measures the cost of resetting and warming the error pool.
func BenchmarkPoolWarmup(b *testing.B) {
for i := 0; i < b.N; i++ {
errorPool = NewErrorPool() // Recreate pool
WarmPool(100) // Pre-warm with 100 errors
}
}
// BenchmarkStackAlloc measures the cost of allocating a stack slice.
func BenchmarkStackAlloc(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = make([]uintptr, 0, currentConfig.stackDepth) // Allocate stack slice
}
}
// Special Case Benchmarks
// These benchmarks test specialized error creation methods.
// BenchmarkSpecial_Named measures creating a named error with a stack trace.
func BenchmarkSpecial_Named(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := Named("test_error") // Create named error with stack
err.Free()
}
}
// BenchmarkSpecial_Format measures creating a formatted error.
func BenchmarkSpecial_Format(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := Errorf("formatted %s %d", "error", i) // Create formatted error
err.Free()
}
}
|