1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
|
package errmgr
import (
"fmt"
"github.com/olekukonko/errors"
"testing"
)
// BenchmarkTemplateError measures the performance of creating templated errors.
func BenchmarkTemplateError(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := ErrDBConnection(fmt.Sprintf("connection failed %d", i))
err.Free()
}
}
// BenchmarkCodedError measures the performance of creating coded errors.
func BenchmarkCodedError(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := ErrValidationFailed(fmt.Sprintf("field %d", i))
err.Free()
}
}
// BenchmarkCategorizedError measures the performance of creating categorized errors.
func BenchmarkCategorizedError(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := NetworkError(fmt.Sprintf("host %d", i))
err.Free()
}
}
// BenchmarkCallableError measures the performance of creating custom callable errors.
func BenchmarkCallableError(b *testing.B) {
fn := Tracked("custom", func(args ...interface{}) *errors.Error {
return errors.New(fmt.Sprintf("custom %v", args[0]))
})
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := fn(i)
err.Free()
}
}
// BenchmarkMetrics measures the performance of retrieving error metrics.
func BenchmarkMetrics(b *testing.B) {
for i := 0; i < 100; i++ {
err := ErrDBConnection(fmt.Sprintf("test %d", i))
err.Free()
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = Metrics()
}
}
func BenchmarkMonitorWithClosedChannel(b *testing.B) {
Reset()
SetThreshold("BenchError", 1)
// Create and close monitor to test closed channel case
monitor := NewMonitor("BenchError")
monitor.Close()
errFunc := Define("BenchError", "bench test %d")
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := errFunc(i)
err.Free()
}
}
|