1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
|
/* BLURB lgpl
Coda File System
Release 5
Copyright (c) 1987-1999 Carnegie Mellon University
Additional copyrights listed below
This code is distributed "AS IS" without warranty of any kind under
the terms of the GNU Library General Public Licence Version 2, as
shown in the file LICENSE. The technical and financial contributors to
Coda are listed in the file CREDITS.
Additional copyrights
none currently
#*/
/*
* Internal type definitions for the Recoverable Dynamic Storage package.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifdef __STDC__
#include <string.h>
#include "assert.h"
#endif
#include <rvm/rvm.h>
#include <rvm/rvm_segment.h>
#include <rvm/rds.h>
#ifndef _RDS_PRIVATE_H_
#define _RDS_PRIVATE_H_
/********************
*Type definitions
*/
/*
* Rather than wasting cycles to manage locks which will only produce a
* small amount of concurrency, We decided to have one mutex on the entire
* heap. With the exception of coalescing, none of the routines should take
* substantial time since most are memory changes and simple computations.
*/
/* Synchronization and Threads support */
/*
* We can have one of three thread models:
* cthreads: Mach threads (kernel or coroutine)
* lwp: Coda's lightweight process package
* pthreads: POSIX threads
*
* If RVM_USELWP is defined, then lwp support is compiled in.
* If RVM_USEPT is defined, then pthreads support is compiled in.
* If niether of these is defined, then cthreads support is compiled in.
*
* It is assumed in the rds package that cthreads and pthreads use
* preemptive scheduling, and they are synchronized appropriately.
*
* You must define only one of the above targets, and it must be defined
* consistently across the following packages: RVM, RDS, and URT
*/
#ifndef RVM_USELWP /* normal: use Cthreads */
#ifndef RVM_USEPT
#include <cthreads.h>
/* define types symbolically to permit use of non-Cthread thread support */
#define RVM_MUTEX struct mutex
#define RVM_MUTEX_T mutex_t
#define RVM_CONDITION struct condition
#define RVM_CONDITION_T condition_t
/* macro for testing if a lock is free */
#define LOCK_FREE(lck) \
(mutex_try_lock(&(lck)) ? (mutex_unlock(&(lck)), rvm_true) : rvm_false)
#endif
#endif
#ifdef RVM_USELWP /* special thread support for Coda */
#include "rvm_lwp.h"
#endif
#ifdef RVM_USEPT /* special support for pthreads */
#include "rvm_pthread.h"
#endif
#define LEAVE_CRITICAL_SECTION goto end_critical;
#define CRITICAL(body) do { \
mutex_lock(&heap_lock); \
body; \
goto end_critical; /* avoid compiler warning */ \
end_critical: mutex_unlock(&heap_lock); \
} while (0);
/* Guards detect if the block structure had been illegally overwritten.
* One is placed after the size, and before user's data. The other is placed
* at the end of the block. */
#define FREE_GUARD 0x345298af
#define ALLOC_GUARD 0x783bd92c
#define END_GUARD 0xfd10a32e
#define RDS_BLOCK_HDR_SIZE (sizeof(block_size_t) + 2 * sizeof(guard_t))
#define BLOCK_END(bp) ((guard_t *)((char *)(bp) + ((bp)->size * RDS_CHUNK_SIZE)) - 1)
#define USER_BLOCK(bp) ((char *)&((bp)->prev))
#define BLOCK_HDR(bp) ((free_block_t *)((char *)(bp) - \
(sizeof(block_size_t) + sizeof(guard_t))))
typedef unsigned long block_size_t;
typedef unsigned long guard_t;
typedef struct fbt {
guard_t type;
block_size_t size;
struct fbt *prev, *next;
} free_block_t;
#define FREE_LIST_GUARD 0xad938945
typedef struct {
guard_t guard;
free_block_t *head;
} free_list_t;
#define NEXT_CONSECUTIVE_BLOCK(bp) ((free_block_t *)((char *)(bp) + ((bp)->size * RDS_CHUNK_SIZE)))
#define HEAP_LIST_GROWSIZE 20 /* Number of blocks to prealloc */
#define RDS_HEAP_VERSION "Dynamic Allocator Using Rvm Release 0.1 1 Dec 1990"
#define RDS_VERSION_MAX 80
typedef struct {
char version[RDS_VERSION_MAX]; /* Version String */
unsigned long heaplength;
unsigned long chunk_size;
unsigned long nlists;
rds_stats_t stats; /* statistics on heap usage. */
unsigned long maxlist; /* Current non-empty largest list */
unsigned long dummy[10]; /* Space to allow header to grow */
free_list_t lists[1]; /* Number of lists is dynamically set */
} heap_header_t;
/* Global data extern declarations. */
extern heap_header_t *RecoverableHeapStartAddress;
extern free_block_t *RecoverableHeapHighAddress;
extern RVM_MUTEX heap_lock;
extern int rds_tracing;
extern FILE *rds_tracing_file;
#define HEAP_INIT (RecoverableHeapStartAddress != 0)
#define RDS_VERSION_STAMP (RecoverableHeapStartAddress->version)
#define RDS_HEAPLENGTH (RecoverableHeapStartAddress->heaplength)
#define RDS_CHUNK_SIZE (RecoverableHeapStartAddress->chunk_size)
#define RDS_FREE_LIST (RecoverableHeapStartAddress->lists)
#define RDS_NLISTS (RecoverableHeapStartAddress->nlists)
#define RDS_MAXLIST (RecoverableHeapStartAddress->maxlist)
#define RDS_STATS (RecoverableHeapStartAddress->stats)
#define RDS_HIGH_ADDR (RecoverableHeapHighAddress)
/*******************
* byte <-> string
*/
#ifdef __STDC__
#define BCOPY(S,D,L) memcpy((D),(S),(L))
#define BZERO(D,L) memset((D),0,(L))
#else
#define BCOPY(S,D,L) bcopy((S),(D),(L))
#define BZERO(D,L) bzero((D),(L))
#endif
/********************
* Definitions of worker functions.
*/
extern int enqueue();
extern free_block_t *dequeue();
extern int print_heap();
extern free_block_t *split();
extern free_block_t *get_block();
extern int put_block();
/*********************
* Definitions of util functions
*/
free_block_t *dequeue();
int rm_from_list();
/***********************
* Coalesce
*/
int merge_with_next_free(free_block_t *fbp, rvm_tid_t *tid, int *err);
void coalesce(rvm_tid_t *tid, int *err);
#endif /* _RDS_PRIVATE_H_ */
|