File: types_test.go

package info (click to toggle)
golang-github-aws-aws-sdk-go 1.21.6%2Bdfsg-2
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 104,556 kB
  • sloc: ruby: 193; makefile: 171; xml: 11
file content (92 lines) | stat: -rw-r--r-- 2,162 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
package aws

import (
	"bytes"
	"math/rand"
	"testing"
)

func TestWriteAtBuffer(t *testing.T) {
	b := &WriteAtBuffer{}

	n, err := b.WriteAt([]byte{1}, 0)
	if err != nil {
		t.Errorf("expected no error, but received %v", err)
	}
	if e, a := 1, n; e != a {
		t.Errorf("expected %d, but received %d", e, a)
	}

	n, err = b.WriteAt([]byte{1, 1, 1}, 5)
	if err != nil {
		t.Errorf("expected no error, but received %v", err)
	}
	if e, a := 3, n; e != a {
		t.Errorf("expected %d, but received %d", e, a)
	}

	n, err = b.WriteAt([]byte{2}, 1)
	if err != nil {
		t.Errorf("expected no error, but received %v", err)
	}
	if e, a := 1, n; e != a {
		t.Errorf("expected %d, but received %d", e, a)
	}

	n, err = b.WriteAt([]byte{3}, 2)
	if err != nil {
		t.Errorf("expected no error, but received %v", err)
	}
	if e, a := 1, n; e != a {
		t.Errorf("expected %d, but received %d", e, a)
	}

	if !bytes.Equal([]byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes()) {
		t.Errorf("expected %v, but received %v", []byte{1, 2, 3, 0, 0, 1, 1, 1}, b.Bytes())
	}
}

func BenchmarkWriteAtBuffer(b *testing.B) {
	buf := &WriteAtBuffer{}
	r := rand.New(rand.NewSource(1))

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		to := r.Intn(10) * 4096
		bs := make([]byte, to)
		buf.WriteAt(bs, r.Int63n(10)*4096)
	}
}

func BenchmarkWriteAtBufferOrderedWrites(b *testing.B) {
	// test the performance of a WriteAtBuffer when written in an
	// ordered fashion. This is similar to the behavior of the
	// s3.Downloader, since downloads the first chunk of the file, then
	// the second, and so on.
	//
	// This test simulates a 150MB file being written in 30 ordered 5MB chunks.
	chunk := int64(5e6)
	max := chunk * 30
	// we'll write the same 5MB chunk every time
	tmp := make([]byte, chunk)
	for i := 0; i < b.N; i++ {
		buf := &WriteAtBuffer{}
		for i := int64(0); i < max; i += chunk {
			buf.WriteAt(tmp, i)
		}
	}
}

func BenchmarkWriteAtBufferParallel(b *testing.B) {
	buf := &WriteAtBuffer{}
	r := rand.New(rand.NewSource(1))

	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			to := r.Intn(10) * 4096
			bs := make([]byte, to)
			buf.WriteAt(bs, r.Int63n(10)*4096)
		}
	})
}