1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
|
/* Copyright (C) 1993, 1995 Aladdin Enterprises. All rights reserved.
This file is part of GNU Ghostscript.
GNU Ghostscript is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY. No author or distributor accepts responsibility to
anyone for the consequences of using it or for whether it serves any
particular purpose or works at all, unless he says so in writing. Refer
to the GNU Ghostscript General Public License for full details.
*/
/* iastruct.h */
/* Memory manager implementation structures for Ghostscript */
#include "gxbitmap.h"
#include "ialloc.h"
/* ================ Objects ================ */
/*
* Object headers come in a number of different varieties.
* All arise from the same basic form, which is
-l- -lmsize/mark/back-
-size-
-type/reloc-
* l (large) is a single bit. The size of lmsize/mark/back, size, and type
* varies according to the environment. On machines with N:16 segmented
* addressing, 16-bit ints, and no alignment requirement more severe than
* 2 bytes, we can squeeze an object header into 6 bytes by making these
* fields 16 bits (using _ds addressing). On all other machines, we let the
* lmsize/mark/back field be 1 bit shorter than a uint, and round the header
* size up to the next multiple of the most severe alignment restriction
* (4 or 8 bytes). Miraculously, we can do all this without any case testing.
*
* The mark/back field is used for the mark during the marking phase of
* garbage collection, and for a back pointer value during the compaction
* phase. Since we want to be able to collect local VM independently of
* global VM, we need two different distinguished mark values:
* - For local objects that have not been traced and should be freed
* (compacted out), we use 1...11 in the mark field (o_unmarked).
* - For global objects that have not been traced but should be kept,
* we use 1...10 in the mark field (o_untraced).
* Note that neither of these values is a possible real relocation value.
*
* The lmsize field of large objects overlaps mark and back, so we must
* handle these functions for large objects in some other way.
* Since large objects cannot be moved or relocated, we don't need the
* back field for them; we allocate 2 bits for the 3 mark values.
*/
/*
* The back pointer's meaning depends on whether the object is
* free (unmarked) or in use (marked):
* - In free objects, the back pointer is an offset from the object
* header back to a chunk_head_t structure that contains the location
* to which all the data in this chunk will get moved; the reloc field
* contains the amount by which the following run of useful objects
* will be relocated downwards.
* - In useful objects, the back pointer is an offset from the object
* back to the previous free object; the reloc field is not used (it
* overlays the type field).
* These two cases can be distinguished when scanning a chunk linearly,
* but when simply examining an object via a pointer, the chunk pointer
* is also needed.
*/
#define obj_flag_bits 1
#define obj_mb_bits (arch_sizeof_int * 8 - obj_flag_bits)
#define obj_ls_bits (obj_mb_bits - 2)
#define o_unmarked (((uint)1 << obj_mb_bits) - 1)
#define o_l_unmarked (o_unmarked & 3)
#define o_set_unmarked_large(pp) (pp)->o_lmark = o_l_unmarked
#define o_set_unmarked(pp)\
if ( (pp)->o_large ) o_set_unmarked_large(pp);\
else (pp)->o_smark = o_unmarked
#define o_is_unmarked_large(pp) ((pp)->o_lmark == o_l_unmarked)
#define o_is_unmarked(pp)\
((pp)->o_large ? o_is_unmarked_large(pp) :\
((pp)->o_smark == o_unmarked))
#define o_untraced (((uint)1 << obj_mb_bits) - 2)
#define o_l_untraced (o_untraced & 3)
#define o_set_untraced(pp)\
if ( (pp)->o_large ) (pp)->o_lmark = o_l_untraced;\
else (pp)->o_smark = o_untraced
#define o_is_untraced(pp)\
((pp)->o_large ? (pp)->o_lmark == o_l_untraced :\
((pp)->o_smark == o_untraced))
#define o_marked 0
#define o_mark_large(pp) (pp)->o_lmark = o_marked
#define o_mark(pp)\
if ( (pp)->o_large ) o_mark_large(pp);\
else (pp)->o_smark = o_marked
#define obj_back_shift obj_flag_bits
#define obj_back_scale (1 << obj_back_shift)
typedef struct obj_header_data_s {
union _f {
struct _h { unsigned large : 1; } h;
struct _l { unsigned _ : 1, lmark : 2, lsize : obj_ls_bits; } l;
struct _m { unsigned _ : 1, smark : obj_mb_bits; } m;
struct _b { unsigned _ : 1, back : obj_mb_bits; } b;
} f;
uint size;
union _t {
gs_memory_type_ptr_t type;
uint reloc;
} t;
} obj_header_data_t;
/*
* Define the alignment modulus for aligned objects. We assume all
* alignment values are powers of 2; we can avoid nested 'max'es that way.
* The final | is because back pointer values are divided by obj_back_scale,
* so objects must be aligned at least 0 mod obj_back_scale.
*/
#define obj_align_mod\
(((arch_align_long_mod - 1) | (arch_align_ptr_mod - 1) |\
(arch_align_double_mod - 1) | (align_bitmap_mod - 1) |\
(obj_back_scale - 1)) + 1)
#define log2_obj_align_mod small_exact_log2(obj_align_mod)
#define obj_align_mask (obj_align_mod-1)
#define obj_align_round(siz)\
(uint)(((siz) + obj_align_mask) & -obj_align_mod)
#define obj_size_round(siz)\
obj_align_round((siz) + sizeof(obj_header_t))
/* Define the real object header type, taking alignment into account. */
struct obj_header_s { /* must be a struct because of forward reference */
union _d {
obj_header_data_t o;
byte _pad[round_up(sizeof(obj_header_data_t), obj_align_mod)];
} d;
};
/* Define some reasonable abbreviations for the fields. */
#define o_large d.o.f.h.large
#define o_lsize d.o.f.l.lsize
#define o_lmark d.o.f.l.lmark
#define o_back d.o.f.b.back
#define o_smark d.o.f.m.smark
#define o_size d.o.size
#define o_type d.o.t.type
#define o_nreloc d.o.t.reloc
/*
* The macros for getting the sizes of objects all take pointers to
* the object header, for use when scanning storage linearly.
*/
#define pre_obj_small_size(pp)\
((pp)->o_size)
#if arch_sizeof_long > arch_sizeof_int
/* Large objects need to use o_lsize. */
#define pre_obj_large_size(pp)\
(((ulong)(pp)->o_lsize << (arch_sizeof_int * 8)) + (pp)->o_size)
#define pre_obj_set_large_size(pp, lsize)\
((pp)->o_lsize = (lsize) >> (arch_sizeof_int * 8),\
(pp)->o_size = (uint)(lsize))
#define pre_obj_contents_size(pp)\
((pp)->o_large ? pre_obj_large_size(pp) : pre_obj_small_size(pp))
#else
/* Large objects don't need to use o_lsize. */
#define pre_obj_large_size(pp)\
pre_obj_small_size(pp)
#define pre_obj_set_large_size(pp, lsize)\
((pp)->o_lsize = 0,\
(pp)->o_size = (lsize))
#define pre_obj_contents_size(pp)\
pre_obj_small_size(pp)
#endif
#define pre_obj_rounded_size(pp)\
obj_size_round(pre_obj_contents_size(pp))
#define pre_obj_next(pp)\
((obj_header_t *)((byte *)(pp) + obj_align_round(\
pre_obj_contents_size(pp) + sizeof(obj_header_t) )))
/*
* Define the header that free objects point back to when relocating.
* Every chunk, including inner chunks, has one of these.
*/
typedef struct chunk_head_s {
byte *dest; /* destination for objects */
#if obj_align_mod > arch_sizeof_ptr
byte *_pad[obj_align_mod / arch_sizeof_ptr - 1];
#endif
obj_header_t free; /* header for a free object, */
/* in case the first real object */
/* is in use */
} chunk_head_t;
|