File: test_benchmark_aggregate.py

package info (click to toggle)
ormar 0.22.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 2,952 kB
  • sloc: python: 24,085; makefile: 34; sh: 14
file content (57 lines) | stat: -rw-r--r-- 1,500 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from typing import List

import pytest

from benchmarks.conftest import Author

pytestmark = pytest.mark.asyncio


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_count(aio_benchmark, num_models: int, authors_in_db: List[Author]):
    @aio_benchmark
    async def count():
        return await Author.objects.count()

    c = count()
    assert c == len(authors_in_db)


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_avg(aio_benchmark, num_models: int, authors_in_db: List[Author]):
    @aio_benchmark
    async def avg():
        return await Author.objects.avg("score")

    average = avg()
    assert 0 <= average <= 100


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_sum(aio_benchmark, num_models: int, authors_in_db: List[Author]):
    @aio_benchmark
    async def sum_():
        return await Author.objects.sum("score")

    s = sum_()
    assert 0 <= s <= 100 * num_models


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_min(aio_benchmark, num_models: int, authors_in_db: List[Author]):
    @aio_benchmark
    async def min_():
        return await Author.objects.min("score")

    m = min_()
    assert 0 <= m <= 100


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_max(aio_benchmark, num_models: int, authors_in_db: List[Author]):
    @aio_benchmark
    async def max_():
        return await Author.objects.max("score")

    m = max_()
    assert 0 <= m <= 100