File: main.rs

package info (click to toggle)
rust-time 0.3.47-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 1,972 kB
  • sloc: makefile: 2
file content (118 lines) | stat: -rw-r--r-- 3,325 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
//! Benchmarks for `time`.
//!
//! These benchmarks are not very precise, but they're good enough to catch major performance
//! regressions. Run them if you think that may be the case. CI **does not** run benchmarks.

#![allow(
    clippy::std_instead_of_core,
    clippy::std_instead_of_alloc,
    clippy::alloc_instead_of_core,
    reason = "irrelevant for benchmarks"
)]
#![allow(
    clippy::missing_docs_in_private_items,
    reason = "may be removed in the future"
)]

#[cfg(not(all(
    feature = "default",
    feature = "alloc",
    feature = "formatting",
    feature = "large-dates",
    feature = "local-offset",
    feature = "macros",
    feature = "parsing",
    feature = "quickcheck",
    feature = "serde-human-readable",
    feature = "serde-well-known",
    feature = "std",
    feature = "rand",
    feature = "serde",
    bench,
)))]
compile_error!(
    "benchmarks must be run as `RUSTFLAGS=\"--cfg bench\" cargo criterion --all-features`"
);

macro_rules! setup_benchmark {
    (
        $group_prefix:literal,
        $(
            $(#[$fn_attr:meta])*
            fn $fn_name:ident ($bencher:ident : $bencher_type:ty)
            $code:block
        )*
    ) => {
        $(
            $(#[$fn_attr])*
            fn $fn_name(
                c: &mut ::criterion::Criterion
            ) {
                c.bench_function(
                    concat!($group_prefix, ": ", stringify!($fn_name)),
                    |$bencher: $bencher_type| $code
                );
            }
        )*

        ::criterion::criterion_group! {
            name = benches;
            config = ::criterion::Criterion::default()
                // Set a stricter statistical significance threshold ("p-value")
                // for deciding what's an actual performance change vs. noise.
                // The more benchmarks, the lower this needs to be in order to
                // not get lots of false positives.
                .significance_level(0.0001)
                // Ignore any performance change less than this (0.05 = 5%) as
                // noise, regardless of statistical significance.
                .noise_threshold(0.05)
                // Reduce the time taken to run each benchmark
                .warm_up_time(::std::time::Duration::from_millis(100))
                .measurement_time(::std::time::Duration::from_millis(500));
            targets = $($fn_name,)*
        }
    };
}

macro_rules! iter_batched_ref {
    ($ben:ident, $initializer:expr,[$($routine:expr),+ $(,)?]) => {$(
        $ben.iter_batched_ref(
            $initializer,
            $routine,
            ::criterion::BatchSize::SmallInput,
        );
    )+};
}

macro_rules! mods {
    ($(mod $mod:ident;)+) => {
        $(mod $mod;)+
        ::criterion::criterion_main!($($mod::benches),+);
    }
}

mods![
    mod date;
    mod duration;
    mod formatting;
    mod instant;
    mod month;
    mod offset_date_time;
    mod parsing;
    mod primitive_date_time;
    mod rand08;
    mod rand09;
    mod time;
    mod utc_offset;
    mod util;
    mod weekday;
];

/// Shuffle a slice in a random but deterministic manner.
fn shuffle<T, const N: usize>(mut slice: [T; N]) -> [T; N] {
    use ::rand09::prelude::*;

    let mut seed = SmallRng::seed_from_u64(0);
    slice.shuffle(&mut seed);
    slice
}