File: test_pedantic.py

package info (click to toggle)
python-pytest-benchmark 5.1.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,072 kB
  • sloc: python: 5,232; makefile: 12
file content (133 lines) | stat: -rw-r--r-- 2,998 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import pytest
from pytest import mark
from pytest import raises


def test_single(benchmark):
    runs = []
    benchmark.pedantic(runs.append, args=[123])
    assert runs == [123]


def test_setup(benchmark):
    runs = []

    def stuff(foo, bar=123):
        runs.append((foo, bar))

    def setup():
        return [1], {'bar': 2}

    benchmark.pedantic(stuff, setup=setup)
    assert runs == [(1, 2)]


@pytest.mark.benchmark(cprofile=True)
def test_setup_cprofile(benchmark):
    runs = []

    def stuff(foo, bar=123):
        runs.append((foo, bar))

    def setup():
        return [1], {'bar': 2}

    benchmark.pedantic(stuff, setup=setup)
    assert runs == [(1, 2), (1, 2)]


def test_args_kwargs(benchmark):
    runs = []

    def stuff(foo, bar=123):
        runs.append((foo, bar))

    benchmark.pedantic(stuff, args=[1], kwargs={'bar': 2})
    assert runs == [(1, 2)]


def test_iterations(benchmark):
    runs = []

    benchmark.pedantic(runs.append, args=[1], iterations=10)
    assert runs == [1] * 11


def test_rounds_iterations(benchmark):
    runs = []

    benchmark.pedantic(runs.append, args=[1], iterations=10, rounds=15)
    assert runs == [1] * 151


def test_rounds(benchmark):
    runs = []

    benchmark.pedantic(runs.append, args=[1], rounds=15)
    assert runs == [1] * 15


def test_warmup_rounds(benchmark):
    runs = []

    benchmark.pedantic(runs.append, args=[1], warmup_rounds=15, rounds=5)
    assert runs == [1] * 20


@mark.parametrize('value', [0, 'x'])
def test_rounds_must_be_int(benchmark, value):
    runs = []
    raises(ValueError, benchmark.pedantic, runs.append, args=[1], rounds=value)
    assert runs == []


@mark.parametrize('value', [-15, 'x'])
def test_warmup_rounds_must_be_int(benchmark, value):
    runs = []
    raises(ValueError, benchmark.pedantic, runs.append, args=[1], warmup_rounds=value)
    assert runs == []


def test_setup_many_rounds(benchmark):
    runs = []

    def stuff(foo, bar=123):
        runs.append((foo, bar))

    def setup():
        return [1], {'bar': 2}

    benchmark.pedantic(stuff, setup=setup, rounds=10)
    assert runs == [(1, 2)] * 10


def test_cant_use_both_args_and_setup_with_return(benchmark):
    runs = []

    def stuff(foo, bar=123):
        runs.append((foo, bar))

    def setup():
        return [1], {'bar': 2}

    raises(TypeError, benchmark.pedantic, stuff, setup=setup, args=[123])
    assert runs == []


def test_can_use_both_args_and_setup_without_return(benchmark):
    runs = []

    def stuff(foo, bar=123):
        runs.append((foo, bar))

    benchmark.pedantic(stuff, setup=lambda: None, args=[123])
    assert runs == [(123, 123)]


def test_cant_use_setup_with_many_iterations(benchmark):
    raises(ValueError, benchmark.pedantic, None, setup=lambda: None, iterations=2)


@mark.parametrize('value', [0, -1, 'asdf'])
def test_iterations_must_be_positive_int(benchmark, value):
    raises(ValueError, benchmark.pedantic, None, setup=lambda: None, iterations=value)