1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
|
from typing import List
import pytest
from benchmarks.conftest import Author
pytestmark = pytest.mark.asyncio
@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_count(aio_benchmark, num_models: int, authors_in_db: List[Author]):
@aio_benchmark
async def count():
return await Author.objects.count()
c = count()
assert c == len(authors_in_db)
@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_avg(aio_benchmark, num_models: int, authors_in_db: List[Author]):
@aio_benchmark
async def avg():
return await Author.objects.avg("score")
average = avg()
assert 0 <= average <= 100
@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_sum(aio_benchmark, num_models: int, authors_in_db: List[Author]):
@aio_benchmark
async def sum_():
return await Author.objects.sum("score")
s = sum_()
assert 0 <= s <= 100 * num_models
@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_min(aio_benchmark, num_models: int, authors_in_db: List[Author]):
@aio_benchmark
async def min_():
return await Author.objects.min("score")
m = min_()
assert 0 <= m <= 100
@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_max(aio_benchmark, num_models: int, authors_in_db: List[Author]):
@aio_benchmark
async def max_():
return await Author.objects.max("score")
m = max_()
assert 0 <= m <= 100
|