File: mpool.h

package info (click to toggle)
spiped 1.6.4-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 1,328 kB
  • sloc: ansic: 11,951; sh: 1,081; makefile: 629; perl: 121
file content (169 lines) | stat: -rw-r--r-- 4,176 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
#ifndef MPOOL_H_
#define MPOOL_H_

#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>

#include "ctassert.h"

/**
 * Memory allocator cache.  Memory allocations can be returned to the pool
 * and reused by a subsequent allocation without returning all the way to
 * free/malloc.  In effect, this is an optimization for the case where we
 * know we will want another allocation of the same size soon, at the expense
 * of keeping memory allocated (and thus preventing any other code from
 * allocating the same memory).
 */

/* Internal data. */
struct mpool {
	size_t stacklen;
	size_t allocsize;
	void ** allocs;
	uint64_t nallocs;
	uint64_t nempties;
	int state;
	void ** allocs_static;
	void (* atexitfunc)(void);
};

static inline void
mpool_atexit(struct mpool * M)
{

	/* Free all items on the stack. */
	while (M->stacklen)
		free(M->allocs[--M->stacklen]);

	/* If we allocated a stack, free it. */
	if (M->allocs != M->allocs_static)
		free(M->allocs);
}

static inline void *
mpool_malloc(struct mpool * M, size_t len)
{

	/* Count the total number of allocation requests. */
	M->nallocs++;

	/* If we have an object on the stack, use that. */
	if (M->stacklen)
		return (M->allocs[--(M->stacklen)]);

	/* Count allocation requests where the pool was already empty. */
	M->nempties++;

	/* Initialize the atexit function (the first time we reach here). */
	if (M->state == 0) {
		atexit(M->atexitfunc);
		M->state = 1;
	}

	/* Allocate a new object. */
	return (malloc(len));
}

static inline void
mpool_free(struct mpool * M, void * p)
{
	void ** allocs_new;

	/* Behave consistently with free(NULL). */
	if (p == NULL)
		return;

	/* If we have space in the stack, cache the object. */
	if (M->stacklen < M->allocsize) {
		M->allocs[M->stacklen++] = p;
		return;
	}

	/*
	 * Autotuning: If more than 1/256 of mpool_malloc() calls resulted in
	 * a malloc(), double the stack.
	 */
	if (M->nempties > (M->nallocs >> 8)) {
		/* Sanity check. */
		assert(M->allocsize > 0);

		/* Allocate new stack and copy pointers into it. */
		allocs_new = (void **)malloc(M->allocsize * 2 * sizeof(void *));
		if (allocs_new) {
			memcpy(allocs_new, M->allocs,
			    M->allocsize * sizeof(void *));
			if (M->allocs != M->allocs_static)
				free(M->allocs);
			M->allocs = allocs_new;
			M->allocsize = M->allocsize * 2;
			M->allocs[M->stacklen++] = p;
		} else
			free(p);
	} else
		free(p);

	/* Reset statistics. */
	M->nempties = 0;
	M->nallocs = 0;
}

/**
 * MPOOL(name, type, size):
 * Define the functions
 *
 * ${type} * mpool_${name}_malloc(void);
 * void mpool_${name}_free(${type} *);
 *
 * which allocate and free structures of type ${type}.  A minimum of ${size}
 * such structures are kept cached after _free is called in order to allow
 * future _malloc calls to be rapidly serviced; this limit will be autotuned
 * upwards depending on the allocation/free pattern.
 *
 * Cached structures will be freed at program exit time in order to aid
 * in the detection of memory leaks.
 */
#define MPOOL(name, type, size)					\
static void mpool_##name##_atexit(void);			\
static void * mpool_##name##_static[size];			\
static struct mpool mpool_##name##_rec =			\
    {0, size, mpool_##name##_static, 0, 0, 0,			\
    mpool_##name##_static, mpool_##name##_atexit};		\
								\
CTASSERT(size > 0);						\
								\
static void							\
mpool_##name##_atexit(void)					\
{								\
								\
	mpool_atexit(&mpool_##name##_rec);			\
}								\
								\
static inline type *						\
mpool_##name##_malloc(void)					\
{								\
								\
	return (mpool_malloc(&mpool_##name##_rec, sizeof(type)));	\
}								\
								\
static inline void						\
mpool_##name##_free(type * p)					\
{								\
								\
	mpool_free(&mpool_##name##_rec, p);			\
}								\
								\
static void (* mpool_##name##_dummyptr)(void);			\
static inline void						\
mpool_##name##_dummyfunc(void)					\
{								\
								\
	(void)mpool_##name##_malloc;				\
	(void)mpool_##name##_free;				\
	(void)mpool_##name##_dummyptr;				\
}								\
static void (* mpool_##name##_dummyptr)(void) = mpool_##name##_dummyfunc; \
struct mpool_##name##_dummy

#endif /* !MPOOL_H_ */