File: test_benchmark_get.py

package info (click to toggle)
ormar 0.20.2-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 3,136 kB
  • sloc: python: 23,758; makefile: 34; sh: 14
file content (102 lines) | stat: -rw-r--r-- 3,156 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import random
import string
from typing import List

import pytest
import pytest_asyncio

from benchmarks.conftest import Author, Book, Publisher

pytestmark = pytest.mark.asyncio


@pytest_asyncio.fixture()
async def books(author: Author, publisher: Publisher, num_models: int):
    books = [
        Book(
            author=author,
            publisher=publisher,
            title="".join(random.sample(string.ascii_letters, 5)),
            year=random.randint(0, 2000),
        )
        for _ in range(0, num_models)
    ]
    await Book.objects.bulk_create(books)
    return books


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_get_all(aio_benchmark, num_models: int, authors_in_db: List[Author]):
    @aio_benchmark
    async def get_all(authors: List[Author]):
        return await Author.objects.all()

    authors = get_all(authors_in_db)
    for idx, author in enumerate(authors_in_db):
        assert authors[idx].id == author.id


@pytest.mark.parametrize("num_models", [10, 20, 40])
async def test_get_all_with_related_models(
    aio_benchmark, num_models: int, author: Author, books: List[Book]
):
    @aio_benchmark
    async def get_with_related(author: Author):
        return await Author.objects.select_related("books").all(id=author.id)

    authors = get_with_related(author)
    assert len(authors[0].books) == num_models


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_get_one(aio_benchmark, num_models: int, authors_in_db: List[Author]):
    @aio_benchmark
    async def get_one(authors: List[Author]):
        return await Author.objects.get(id=authors[0].id)

    author = get_one(authors_in_db)
    assert author == authors_in_db[0]


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_get_or_none(aio_benchmark, num_models: int, authors_in_db: List[Author]):
    @aio_benchmark
    async def get_or_none(authors: List[Author]):
        return await Author.objects.get_or_none(id=authors[0].id)

    author = get_or_none(authors_in_db)
    assert author == authors_in_db[0]


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_get_or_create_when_get(
    aio_benchmark, num_models: int, authors_in_db: List[Author]
):
    @aio_benchmark
    async def get_or_create(authors: List[Author]):
        author, created = await Author.objects.get_or_create(id=authors[0].id)
        assert not created
        return author

    author = get_or_create(authors_in_db)
    assert author == authors_in_db[0]


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_first(aio_benchmark, num_models: int, authors_in_db: List[Author]):
    @aio_benchmark
    async def first():
        return await Author.objects.first()

    author = first()
    assert author == authors_in_db[0]


@pytest.mark.parametrize("num_models", [250, 500, 1000])
async def test_exists(aio_benchmark, num_models: int, authors_in_db: List[Author]):
    @aio_benchmark
    async def check_exists(authors: List[Author]):
        return await Author.objects.filter(id=authors[0].id).exists()

    exists = check_exists(authors_in_db)
    assert exists