1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
|
// Copyright 2015, Joe Tsai. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/dsnet/compress/internal/testutil"
)
type Result struct {
R float64 // Rate (MB/s) or ratio (rawSize/compSize)
D float64 // Delta ratio relative to primary benchmark
}
// BenchmarkEncoder benchmarks a single encoder on the given input data using
// the selected compression level and reports the result.
func BenchmarkEncoder(input []byte, enc Encoder, lvl int) testing.BenchmarkResult {
return testing.Benchmark(func(b *testing.B) {
b.StopTimer()
if enc == nil {
b.Fatalf("unexpected error: nil Encoder")
}
runtime.GC()
b.StartTimer()
for i := 0; i < b.N; i++ {
wr := enc(ioutil.Discard, lvl)
_, err := io.Copy(wr, bytes.NewBuffer(input))
if err := wr.Close(); err != nil {
b.Fatalf("unexpected error: %v", err)
}
if err != nil {
b.Fatalf("unexpected error: %v", err)
}
b.SetBytes(int64(len(input)))
}
})
}
// BenchmarkEncoderSuite runs multiple benchmarks across all encoder
// implementations, files, levels, and sizes.
//
// The values returned have the following structure:
//
// results: [len(files)*len(levels)*len(sizes)][len(encs)]Result
// names: [len(files)*len(levels)*len(sizes)]string
func BenchmarkEncoderSuite(ft Format, encs []string, files []file, levels, sizes []int, tick func()) (results [][]Result, names []string) {
return benchmarkSuite(encs, files, levels, sizes, tick,
func(input []byte, enc string, lvl int) Result {
result := BenchmarkEncoder(input, encoders[ft][enc], lvl)
if result.N == 0 {
return Result{}
}
us := (float64(result.T.Nanoseconds()) / 1e3) / float64(result.N)
rate := float64(result.Bytes) / us
return Result{R: rate}
})
}
// BenchmarkDecoder benchmarks a single decoder on the given pre-compressed
// input data and reports the result.
func BenchmarkDecoder(input []byte, dec Decoder) testing.BenchmarkResult {
return testing.Benchmark(func(b *testing.B) {
b.StopTimer()
if dec == nil {
b.Fatalf("unexpected error: nil Decoder")
}
runtime.GC()
b.StartTimer()
for i := 0; i < b.N; i++ {
rd := dec(bufio.NewReader(bytes.NewBuffer(input)))
cnt, err := io.Copy(ioutil.Discard, rd)
if err := rd.Close(); err != nil {
b.Fatalf("unexpected error: %v", err)
}
if err != nil {
b.Fatalf("unexpected error: %v", err)
}
b.SetBytes(cnt)
}
})
}
// BenchmarkDecoderSuite runs multiple benchmarks across all decoder
// implementations, files, levels, and sizes.
//
// The values returned have the following structure:
//
// results: [len(files)*len(levels)*len(sizes)][len(decs)]Result
// names: [len(files)*len(levels)*len(sizes)]string
func BenchmarkDecoderSuite(ft Format, decs []string, files []file, levels, sizes []int, ref Encoder, tick func()) (results [][]Result, names []string) {
return benchmarkSuite(decs, files, levels, sizes, tick,
func(input []byte, dec string, lvl int) Result {
buf := new(bytes.Buffer)
wr := ref(buf, lvl)
if _, err := io.Copy(wr, bytes.NewReader(input)); err != nil {
return Result{}
}
if wr.Close() != nil {
return Result{}
}
output := buf.Bytes()
result := BenchmarkDecoder(output, decoders[ft][dec])
if result.N == 0 {
return Result{}
}
us := (float64(result.T.Nanoseconds()) / 1e3) / float64(result.N)
rate := float64(result.Bytes) / us
return Result{R: rate}
})
}
// BenchmarkRatioSuite runs multiple benchmarks across all encoder
// implementations, files, levels, and sizes.
//
// The values returned have the following structure:
//
// results: [len(files)*len(levels)*len(sizes)][len(encs)]Result
// names: [len(files)*len(levels)*len(sizes)]string
func BenchmarkRatioSuite(ft Format, encs []string, files []file, levels, sizes []int, tick func()) (results [][]Result, names []string) {
return benchmarkSuite(encs, files, levels, sizes, tick,
func(input []byte, enc string, lvl int) Result {
buf := new(bytes.Buffer)
wr := encoders[ft][enc](buf, lvl)
if _, err := io.Copy(wr, bytes.NewReader(input)); err != nil {
return Result{}
}
if wr.Close() != nil {
return Result{}
}
output := buf.Bytes()
ratio := float64(len(input)) / float64(len(output))
return Result{R: ratio}
})
}
type benchFunc func(input []byte, codec string, level int) Result
func benchmarkSuite(codecs []string, files []file, levels, sizes []int, tick func(), run benchFunc) ([][]Result, []string) {
// Allocate buffers for the result.
d0 := len(files) * len(levels) * len(sizes)
d1 := len(codecs)
results := make([][]Result, d0)
for i := range results {
results[i] = make([]Result, d1)
}
names := make([]string, d0)
// Run the benchmark for every codec, file, level, and size.
var i int
for _, f := range files {
for _, l := range levels {
for _, n := range sizes {
b, err := ioutil.ReadFile(f.Abs)
if err == nil {
b = testutil.ResizeData(b, n)
}
fname := strings.Replace(f.Rel, string(filepath.Separator), "_", -1)
name := fmt.Sprintf("%s:%d:%s", fname, l, intName(int64(len(b))))
for j, c := range codecs {
if tick != nil {
tick()
}
names[i] = name
if err == nil {
results[i][j] = run(b, c, l)
}
results[i][j].D = results[i][j].R / results[i][0].R
}
i++
}
}
}
return results, names
}
|