1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
|
#![feature(test)]
extern crate test;
use bitvec::prelude::*;
use test::{
bench::black_box,
Bencher,
};
/* `BitSlice::empty` is not benched, because the compiler const-folds it. It
is not a `const fn`, but it has exactly one function call, which is `const`, and
creates a value object from that function. As such, the compiler can prove that
the return value is a `const` value, and insert the value at all
`BitSlice::empty` call sites. It takes 0ns.
*/
#[bench]
fn element(b: &mut Bencher) {
b.iter(|| BitSlice::<u8, Msb0>::from_element(&!0));
b.iter(|| BitSlice::<u8, Lsb0>::from_element(&!0));
b.iter(|| BitSlice::<u16, Msb0>::from_element(&!0));
b.iter(|| BitSlice::<u16, Lsb0>::from_element(&!0));
b.iter(|| BitSlice::<u32, Msb0>::from_element(&!0));
b.iter(|| BitSlice::<u32, Lsb0>::from_element(&!0));
#[cfg(target_pointer_width = "64")]
{
b.iter(|| BitSlice::<u64, Msb0>::from_element(&!0));
b.iter(|| BitSlice::<u64, Lsb0>::from_element(&!0));
}
}
#[bench]
fn slice(b: &mut Bencher) {
b.iter(|| BitSlice::<u8, Msb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
b.iter(|| BitSlice::<u8, Lsb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
b.iter(|| BitSlice::<u16, Msb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
b.iter(|| BitSlice::<u16, Lsb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
b.iter(|| BitSlice::<u32, Msb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
b.iter(|| BitSlice::<u32, Lsb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
#[cfg(target_pointer_width = "64")]
{
b.iter(|| BitSlice::<u64, Msb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
b.iter(|| BitSlice::<u64, Lsb0>::from_slice(&[0, 1, !0 - 1, !0][..]));
}
}
#[bench]
fn len(b: &mut Bencher) {
let bsb08 = [0u8; 16].view_bits::<Msb0>();
let bsl08 = [0u8; 16].view_bits::<Lsb0>();
b.iter(|| bsb08.len());
b.iter(|| bsl08.len());
let bsb16 = [0u16; 8].view_bits::<Msb0>();
let bsl16 = [0u16; 8].view_bits::<Lsb0>();
b.iter(|| bsb16.len());
b.iter(|| bsl16.len());
let bsb32 = [0u32; 4].view_bits::<Msb0>();
let bsl32 = [0u32; 4].view_bits::<Lsb0>();
b.iter(|| bsb32.len());
b.iter(|| bsl32.len());
#[cfg(target_pointer_width = "64")]
{
let bsb64 = [0u64; 2].view_bits::<Msb0>();
let bsl64 = [0u64; 2].view_bits::<Lsb0>();
b.iter(|| bsb64.len());
b.iter(|| bsl64.len());
}
}
// This index value is not only "nice", it also ensures that the hard path is
// hit in `BitIdx::offset`.
#[bench]
fn index(b: &mut Bencher) {
let bsb08 = [0u8; 16].view_bits::<Msb0>();
let bsl08 = [0u8; 16].view_bits::<Lsb0>();
b.iter(|| assert!(!black_box(bsb08)[black_box(69)]));
b.iter(|| assert!(!black_box(bsl08)[black_box(69)]));
let bsb16 = [0u16; 8].view_bits::<Msb0>();
let bsl16 = [0u16; 8].view_bits::<Lsb0>();
b.iter(|| assert!(!black_box(bsb16)[black_box(69)]));
b.iter(|| assert!(!black_box(bsl16)[black_box(69)]));
let bsb32 = [0u32; 4].view_bits::<Msb0>();
let bsl32 = [0u32; 4].view_bits::<Lsb0>();
b.iter(|| assert!(!black_box(bsb32)[black_box(69)]));
b.iter(|| assert!(!black_box(bsl32)[black_box(69)]));
#[cfg(target_pointer_width = "64")]
{
let bsb64 = [0u64; 2].view_bits::<Msb0>();
let bsl64 = [0u64; 2].view_bits::<Lsb0>();
b.iter(|| assert!(!black_box(bsb64)[black_box(69)]));
b.iter(|| assert!(!black_box(bsl64)[black_box(69)]));
}
}
/* This routine has more work to do: index, create a reference struct, and drop
it. The compiler *should* be able to properly arrange immediate drops, though.
*/
#[bench]
fn get_mut(b: &mut Bencher) {
let mut src = [0u8; 16];
let bsb08 = src.view_bits_mut::<Msb0>();
b.iter(|| *bsb08.get_mut(69).unwrap() = true);
let mut src = [0u8; 16];
let bsl08 = src.view_bits_mut::<Lsb0>();
b.iter(|| *bsl08.get_mut(69).unwrap() = true);
let mut src = [0u16; 8];
let bsb16 = src.view_bits_mut::<Msb0>();
b.iter(|| *bsb16.get_mut(69).unwrap() = true);
let mut src = [0u16; 8];
let bsl16 = src.view_bits_mut::<Lsb0>();
b.iter(|| *bsl16.get_mut(69).unwrap() = true);
let mut src = [0u32; 4];
let bsb32 = src.view_bits_mut::<Msb0>();
b.iter(|| *bsb32.get_mut(69).unwrap() = true);
let mut src = [0u32; 4];
let bsl32 = src.view_bits_mut::<Lsb0>();
b.iter(|| *bsl32.get_mut(69).unwrap() = true);
#[cfg(target_pointer_width = "64")]
{
let mut src = [0u64; 2];
let bsb64 = src.view_bits_mut::<Msb0>();
b.iter(|| *bsb64.get_mut(69).unwrap() = true);
let mut src = [0u64; 2];
let bsl64 = src.view_bits_mut::<Lsb0>();
b.iter(|| *bsl64.get_mut(69).unwrap() = true);
}
}
|