File: slab_api.h

package info (click to toggle)
librnd 4.4.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 12,812 kB
  • sloc: ansic: 126,990; sh: 2,602; makefile: 2,145; awk: 7
file content (45 lines) | stat: -rw-r--r-- 1,316 bytes parent folder | download | duplicates (5)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#ifndef LIBUALLOC_SLAB_API_H
#define LIBUALLOC_SLAB_API_H

#include <libualloc/libualloc.h>

/*
	Allocator: slab - fixed size slabs over unaligned pages

	Serve fixed size slabs from a list of pages allocated in batch.
	Similar to slabap, but is a bit more expensive while not assuming
	2^n aligned page allocation.

	Allocation size:      fixed
	Standard calls:       alloc, free, clean
	Per allocation cost:  2 pointers + alignment
	Per page cost:        2 pointers + 1 long + at most one slab_size
*/

typedef struct uall_slab_s uall_slab_t;
typedef struct uall_slab_page_s uall_slab_page_t;

typedef struct {
	/* configuration */
	uall_sysalloc_t *sys;
	long slab_size;

	void *user_data;

	/* internal states - init all bytes to 0 */
	uall_slab_page_t *pages; /* singly linked list of slab pages */
	uall_slab_t *free_slabs; /* singly linked list cache */
} uall_slabs_t;


/* Return a new allocation of ctx->slab_size */
UALL_INLINE void *uall_slab_alloc(uall_slabs_t *ctx);

/* Free a previously allocated slab (may trigger a page free) */
UALL_INLINE void uall_slab_free(uall_slabs_t *ctx, void *ptr);

/* Free all data and empty ctx, which will be ready to accept new allocations
   again; cheaper than calling uall_slab_free() multiple times */
UALL_INLINE void uall_slab_clean(uall_slabs_t *ctx);

#endif