File: pysort

package info (click to toggle)
python-orjson 3.10.7-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 4,180 kB
  • sloc: ansic: 11,270; python: 6,658; sh: 135; makefile: 9
file content (119 lines) | stat: -rwxr-xr-x 3,187 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)

import io
import json
import lzma
import os
from pathlib import Path
from timeit import timeit

import rapidjson
import simplejson
import ujson
from tabulate import tabulate

import orjson

os.sched_setaffinity(os.getpid(), {0, 1})


dirname = os.path.join(os.path.dirname(__file__), "..", "data")


def read_fixture_obj(filename):
    path = Path(dirname, filename)
    if path.suffix == ".xz":
        contents = lzma.decompress(path.read_bytes())
    else:
        contents = path.read_bytes()
    return orjson.loads(contents)


data = read_fixture_obj("twitter.json.xz")

headers = ("Library", "unsorted (ms)", "sorted (ms)", "vs. orjson")

LIBRARIES = ("orjson", "ujson", "rapidjson", "simplejson", "json")

ITERATIONS = 500


def per_iter_latency(val):
    if val is None:
        return None
    return (val * 1000) / ITERATIONS


table = []
for lib_name in LIBRARIES:
    if lib_name == "json":
        time_unsorted = timeit(
            lambda: json.dumps(data).encode("utf-8"),
            number=ITERATIONS,
        )
        time_sorted = timeit(
            lambda: json.dumps(data, sort_keys=True).encode("utf-8"),
            number=ITERATIONS,
        )
    elif lib_name == "simplejson":
        time_unsorted = timeit(
            lambda: simplejson.dumps(data).encode("utf-8"),
            number=ITERATIONS,
        )
        time_sorted = timeit(
            lambda: simplejson.dumps(data, sort_keys=True).encode("utf-8"),
            number=ITERATIONS,
        )
    elif lib_name == "ujson":
        time_unsorted = timeit(
            lambda: ujson.dumps(data).encode("utf-8"),
            number=ITERATIONS,
        )
        time_sorted = timeit(
            lambda: ujson.dumps(data, sort_keys=True).encode("utf-8"),
            number=ITERATIONS,
        )
    elif lib_name == "rapidjson":
        time_unsorted = timeit(
            lambda: rapidjson.dumps(data).encode("utf-8"),
            number=ITERATIONS,
        )
        time_sorted = timeit(
            lambda: rapidjson.dumps(data, sort_keys=True).encode("utf-8"),
            number=ITERATIONS,
        )
    elif lib_name == "orjson":
        time_unsorted = timeit(lambda: orjson.dumps(data), number=ITERATIONS)
        time_sorted = timeit(
            lambda: orjson.dumps(data, None, orjson.OPT_SORT_KEYS),
            number=ITERATIONS,
        )
        orjson_time_sorted = per_iter_latency(time_sorted)
    else:
        raise NotImplementedError

    time_unsorted = per_iter_latency(time_unsorted)
    time_sorted = per_iter_latency(time_sorted)

    if lib_name == "orjson":
        compared_to_orjson = 1
    elif time_unsorted:
        compared_to_orjson = time_sorted / orjson_time_sorted
    else:
        compared_to_orjson = None

    table.append(
        (
            lib_name,
            f"{time_unsorted:,.2f}" if time_unsorted else "",
            f"{time_sorted:,.2f}" if time_sorted else "",
            f"{compared_to_orjson:,.1f}" if compared_to_orjson else "",
        )
    )

buf = io.StringIO()
buf.write(tabulate(table, headers, tablefmt="github"))
buf.write("\n")

print(buf.getvalue())