1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
|
/**
* @file entry_pool.h
*
* Header file for the entry pool object
*
* Copyright (C) 2000 by Mike Perry.
* Distributed WITHOUT WARRANTY under the GPL. See COPYING for details.
*/
#ifndef __NJ_LIB_ENTRY_POOL_H__
#define __NJ_LIB_ENTRY_POOL_H__
#include <sys/types.h>
#include <lib/callstack_pool.h>
#include <lib/heap_entry.h>
#include <lib/stack.h>
#include <lib/table.h>
#include <config.h>
/** Clarity typedef for the entry index */
typedef unsigned int nj_entry_index_t;
/** The entry pool */
struct nj_entry_pool
{
struct nj_callstack_pool cs_pool; /**< The callstack pool */
struct nj_table entry_table; /**< Table of heap entries */
struct nj_stack free_list; /**< Free list of heap entries */
char file[32]; /**< The file for this pool */
int dump_leaks; /**< Do we dump leaks on descruction? */
};
/**@{ @name Heap table macros.
* The heap table must accomodate enough heap_entries to describe the address
* space. Worst case is if each heap_entry describes only 2 pages of memory
* This is approximately 16 megs on a 32bit address space. Don't worry, it's
* only 16 megs of ADDRESS SPACE.. it's not used until faulted, and is synced
* often, so not much of it is resident
*
* Aha. Only create the supertable when the first 32 bit table is full. This
* is then independant of bit width, an allows us to do crazy shit like store
* all info even when it's being freed (SAVE_ALL_INFO). This should be done as
* 32, 16, and 16, with the last 16 not being used on Linux at least.
* @see __nj_new_heap_tbl() save_heap()
*/
#define NJ_ENTRY_POOL_INIT_SIZE (NJ_ALLOCS_IN_ADDRESS_SPACE \
* sizeof(struct nj_heap_entry))
// + sizeof(struct nj_mem_stats_light))
#define NJ_ENTRY_POOL_INDEX_TO_ENTRY(pool, idx) \
NJ_TABLE_INDEX_TO_PTR((pool).entry_table, idx, struct nj_heap_entry)
/* Race conditions here really don't matter. It's only a read, and races
* will only cause false positives, which i s OK. */
#define NJ_ENTRY_POOL_VALID_INDEX(pool, idx) \
(idx*sizeof(struct nj_heap_entry) < (pool).entry_table.top)
#define NJ_ENTRY_POOL_CORRECT_INDEX(pool, idx, block) \
(NJ_ENTRY_POOL_VALID_INDEX(pool, idx) \
&& ((block == NJ_TABLE_INDEX_TO_PTR((pool).entry_table, idx, struct nj_heap_entry)->start) \
|| (block == NJ_ALLOCATOR_BLOCK_UNKNOWN)))
/*@}*/
void __nj_entry_pool_bootstrap_init(struct nj_entry_pool *);
void __nj_entry_pool_user_init(struct nj_entry_pool *, struct nj_prefs *);
void __nj_entry_pool_fini(struct nj_entry_pool *);
nj_entry_index_t __nj_entry_pool_request_index(struct nj_entry_pool *);
void __nj_entry_pool_fini(struct nj_entry_pool *);
nj_entry_index_t __nj_entry_pool_request_index(struct nj_entry_pool *);
nj_entry_index_t __nj_entry_pool_renew_index(struct nj_entry_pool *, nj_entry_index_t, nj_addr_t, size_t, struct nj_dynamic_prefs);
void __nj_entry_pool_index_init(struct nj_entry_pool *, nj_entry_index_t, nj_addr_t, size_t, struct nj_dynamic_prefs);
void __nj_entry_pool_index_fini(struct nj_entry_pool *, nj_entry_index_t, struct nj_dynamic_prefs);
struct nj_heap_entry *__nj_entry_pool_get_valid_entry(struct nj_entry_pool *, nj_entry_index_t, nj_addr_t, nj_addr_t);
void __nj_entry_pool_print_index(struct nj_entry_pool *, nj_entry_index_t);
void __nj_entry_pool_print_by_addr(struct nj_entry_pool *entry_pool, nj_addr_t);
void __nj_entry_pool_dump_leaks(struct nj_entry_pool *);
#endif /* entry_pool.h */
// vim:ts=4
|