File: sizes.c

package info (click to toggle)
redis 5%3A8.0.2-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 22,304 kB
  • sloc: ansic: 216,903; tcl: 51,562; sh: 4,625; perl: 4,214; cpp: 3,568; python: 2,954; makefile: 2,055; ruby: 639; javascript: 30; csh: 7
file content (53 lines) | stat: -rw-r--r-- 1,078 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
#include "test/jemalloc_test.h"

#include <stdio.h>

/*
 * Print the sizes of various important core data structures.  OK, I guess this
 * isn't really a "stress" test, but it does give useful information about
 * low-level performance characteristics, as the other things in this directory
 * do.
 */

static void
do_print(const char *name, size_t sz_bytes) {
	const char *sizes[] = {"bytes", "KB", "MB", "GB", "TB", "PB", "EB",
		"ZB"};
	size_t sizes_max = sizeof(sizes)/sizeof(sizes[0]);

	size_t ind = 0;
	double sz = sz_bytes;
	while (sz >= 1024 && ind < sizes_max - 1) {
		sz /= 1024;
		ind++;
	}
	if (ind == 0) {
		printf("%-20s: %zu bytes\n", name, sz_bytes);
	} else {
		printf("%-20s: %f %s\n", name, sz, sizes[ind]);
	}
}

int
main() {
#define P(type)								\
	do_print(#type, sizeof(type))
	P(arena_t);
	P(arena_stats_t);
	P(base_t);
	P(decay_t);
	P(edata_t);
	P(ecache_t);
	P(eset_t);
	P(malloc_mutex_t);
	P(prof_tctx_t);
	P(prof_gctx_t);
	P(prof_tdata_t);
	P(rtree_t);
	P(rtree_leaf_elm_t);
	P(slab_data_t);
	P(tcache_t);
	P(tcache_slow_t);
	P(tsd_t);
#undef P
}