1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
|
/*
Title: Multi-Threaded Garbage Collector - Copy phase
Copyright (c) 2010-12 David C. J. Matthews
Based on the original garbage collector code
Copyright 2000-2008
Cambridge University Technical Services Limited
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
This is the second, copy, phase of the garbage collector. The previous,
mark, phase has identified all the live data and set the bits in the bit-maps.
This phase compacts the memory by copying cells from lower in the segment or
from other segments. When a cell is copied the length-word is modified to be
a tomb-stone that gives the new location for the cell. Cells can be copied to
areas of memory that are shown as free in the bit-maps and the destination area
is then marked as allocated. Because there are tomb-stones left behind the original
location of a cell must remain as allocated and its space cannot be reused until the
GC is complete.
We copy cells in a defined order to avoid copying loops.
The ordering on the addresses is:
Immutable areas (for immutable cells) (highest)
Mutable areas
Allocation areas (lowest)
Within each group a lower position in the gMem.lSpaces is higher
MemMgr::AddLocalSpace enters spaces gMem.lSpaces such that immutable
areas come before mutable areas which come before allocation areas
so this reduces to the order in that table.
Within a space higher addresses are higher.
So we try to copy data out of the allocation areas and to copy any
cells that are now immutable out of the mutable areas. We try to copy
data out of higher numbered spaces in order to try to free them
completely and to compact data towards the top of a space if we
can't.
Once a thread has started copying into or out of an area it takes
ownership of the area and no other thread can use the area. This
avoids
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#elif defined(_WIN32)
#include "winconfig.h"
#else
#error "No configuration file"
#endif
#ifdef HAVE_ASSERT_H
#include <assert.h>
#define ASSERT(x) assert(x)
#else
#define ASSERT(x)
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#endif
#include "globals.h"
#include "machine_dep.h"
#include "processes.h"
#include "gc.h"
#include "scanaddrs.h"
#include "bitmap.h"
#include "memmgr.h"
#include "gctaskfarm.h"
#include "locking.h"
#include "diagnostics.h"
static PLock copyLock("Copy");
// Search the area downwards looking for n consecutive free words.
// Return the address of the word if successful or 0 on failure.
// "limit" is the bit position of the bottom of the area or, if we're compacting an area,
// the bit position of the object we'd like to move to a higher address.
static inline PolyWord *FindFreeAndAllocate(LocalMemSpace *dst, POLYUNSIGNED limit, POLYUNSIGNED n)
{
if (dst == 0) return 0; // No current space
/* SPF's version of the start caching code. SPF 2/10/96 */
// The idea of it is to avoid having to search over an area that is
// already known not to have any spaces large enough for an object of
// a given size. Knowing that there is no space for an object of
// size n implies that there is no space for anything of size larger
// than n. SPF's idea is that after finding the space in the bitmap
// we update only the element for the size we are looking for rather
// than everything larger.
unsigned truncated_n = n < NSTARTS ? (unsigned)n : NSTARTS - 1;
// If we're looking for something larger than last time update
// all the entries last time's size and this size.
for (unsigned i = dst->start_index; i < truncated_n; i ++)
{
if (dst->start[i] < dst->start[i+1])
dst->start[i+1] = dst->start[i];
}
dst->start_index = truncated_n;
POLYUNSIGNED start = dst->start[truncated_n];
if (start <= limit)
return 0;
// Look in the bitmap. Returns "start" if it can't find space.
POLYUNSIGNED free = dst->bitmap.FindFree(limit, start, n);
// If we failed to allocate the space (free == start) we set this to
// zero to indicate that there is no space for anything of this size
// or larger.
if (n < NSTARTS)
dst->start[n] = free == start ? 0 : free;
if (free == start)
return 0;
// Allocate the space.
dst->bitmap.SetBits(free, n);
PolyWord *newp = dst->wordAddr(free); /* New object address */
// Update dst->upperAllocPtr, so the new object doesn't get trampled.
if (newp < dst->upperAllocPtr)
dst->upperAllocPtr = newp;
return newp;
}
// This does nothing to the addresses but by applying it in ScanConstantsWithinCode we
// adjust any relative addresses so they are relative to the new location.
class MTGCProcessIdentity: public ScanAddress {
public:
virtual PolyObject *ScanObjectAddress(PolyObject *base) { return base; }
};
// Copy a cell to its new address.
void CopyObjectToNewAddress(PolyObject *srcAddress, PolyObject *destAddress, POLYUNSIGNED L)
{
destAddress->SetLengthWord(L); /* copy length word */
POLYUNSIGNED n = OBJ_OBJECT_LENGTH(L);
// Unroll loop for most common cases.
switch (n)
{
default:
memcpy(destAddress, srcAddress, n * sizeof(PolyWord));
break;
case 4:
destAddress->Set(3, srcAddress->Get(3));
case 3:
destAddress->Set(2, srcAddress->Get(2));
case 2:
destAddress->Set(1, srcAddress->Get(1));
case 1:
destAddress->Set(0, srcAddress->Get(0));
}
// If this is a code object flush out anything from the instruction cache
// that might previously have been at this address
if (OBJ_IS_CODE_OBJECT(L))
{
MTGCProcessIdentity identity;
machineDependent->FlushInstructionCache(destAddress, n * sizeof(PolyWord));
// We have to update any relative addresses in the code.
machineDependent->ScanConstantsWithinCode(destAddress, srcAddress, OBJ_OBJECT_LENGTH(L), &identity);
}
}
// Find the next space in the sequence. It may return with the space unchanged if it
// is unable to find a suitable space.
static bool FindNextSpace(LocalMemSpace *src, LocalMemSpace **dst, bool isMutable, GCTaskId *id)
{
std::vector<LocalMemSpace*>::iterator m = gMem.lSpaces.begin();
// If we're compressing the space and it's full that's it.
if (*dst == src)
return false;
if (*dst != 0)
{
// Find the next space after this
while (*m != *dst) m++;
m++;
}
for (; m < gMem.lSpaces.end(); m++) {
LocalMemSpace *lSpace = *m;
if (lSpace == src)
{
// The only possibility is to compact this area.
ASSERT(!isMutable || src->isMutable);
*dst = src;
return true; // We already own it
}
if (lSpace->isMutable == isMutable && !lSpace->allocationSpace && lSpace->spaceOwner == 0)
{
// Now acquire the lock. We have to retest spaceOwner with the lock held.
PLocker lock(©Lock);
if (lSpace->spaceOwner == 0)
{
// Change the space.
lSpace->spaceOwner = id;
*dst = lSpace; // Return the space
if (debugOptions & DEBUG_GC_ENHANCED)
Log("GC: Copy: copying %s cells from %p to %p\n",
isMutable ? "mutable" : "immutable", src, lSpace);
return true;
}
}
}
return false;
}
// Copy objects from the source space into an earlier space or up within the
// current space.
static void copyAllData(GCTaskId *id, void * /*arg1*/, void * /*arg2*/)
{
LocalMemSpace *mutableDest = 0, *immutableDest = 0;
for (std::vector<LocalMemSpace*>::reverse_iterator i = gMem.lSpaces.rbegin(); i != gMem.lSpaces.rend(); i++)
{
LocalMemSpace *src = *i;
if (src->spaceOwner == 0)
{
PLocker lock(©Lock);
if (src->spaceOwner == 0)
src->spaceOwner = id;
else continue;
}
else if (src->spaceOwner != id)
continue;
if (debugOptions & DEBUG_GC_ENHANCED)
Log("GC: Copy: copying area %p (thread %p) %s \n", src, id, src->spaceTypeString());
// We start at fullGCLowerLimit which is the lowest marked object in the heap
// N.B. It's essential that the first set bit at or above this corresponds
// to the length word of a real object.
POLYUNSIGNED bitno = src->wordNo(src->fullGCLowerLimit);
// Set the limit to the top so we won't rescan this. That can
// only happen if copying takes a very short time and the same
// thread runs multiple tasks.
src->fullGCLowerLimit = src->top;
// src->highest is the bit position that corresponds to the top of
// generation we're copying.
POLYUNSIGNED highest = src->wordNo(src->top);
for (;;)
{
if (bitno >= highest) break;
/* SPF version; Invariant: 0 < highest - bitno */
bitno += src->bitmap.CountZeroBits(bitno, highest - bitno);
if (bitno >= highest) break;
/* first set bit corresponds to the length word */
PolyWord *old = src->wordAddr(bitno); /* Old object address */
PolyObject *obj = (PolyObject*)(old+1);
POLYUNSIGNED L = obj->LengthWord();
ASSERT (OBJ_IS_LENGTH(L));
POLYUNSIGNED n = OBJ_OBJECT_LENGTH(L) + 1 ;/* Length of allocation (including length word) */
bitno += n;
// Find a mutable space for the mutable objects and an immutable space for
// the immutables. We copy objects into earlier spaces or within its own
// space but we don't copy an object to a later space. This avoids the
// risk of copying an object multiple times. Previously this copied objects
// into later spaces but that doesn't work well if we have converted old
// saved state segments into local areas. It's much better to delete them
// if possible.
bool isMutable = OBJ_IS_MUTABLE_OBJECT(L);
LocalMemSpace *destSpace = isMutable || immutableDest == 0 ? mutableDest : immutableDest;
PolyWord *newp = FindFreeAndAllocate(destSpace, (src == destSpace) ? bitno : 0, n);
if (newp == 0 && src != destSpace)
{
// See if we can find a different space.
// N.B. FindNextSpace side-effects mutableDest/immutableDest to give the next space.
if (FindNextSpace(src, isMutable ? &mutableDest : &immutableDest, isMutable, id))
{
bitno -= n; // Redo this object
continue;
}
// else just leave it
}
if (newp == 0) /* no room */
{
// We're not going to move this object
// Update src->upperAllocPtr, so the old object doesn't get trampled.
if (old < src->upperAllocPtr)
src->upperAllocPtr = old;
// Previously this continued compressing to try to make space available
// on the next GC. Normally full GCs are infrequent so the chances are
// that at the next GC other data will have been freed. Just stop at
// this point.
// However if we're compressing a mutable area and there is immutable
// data in it we should move those out because the mutable area is scanned
// on every partial GC.
if (! src->isMutable || src->i_marked == 0)
break;
}
else
{
PolyObject *destAddress = (PolyObject*)(newp+1);
obj->SetForwardingPtr(destAddress);
CopyObjectToNewAddress(obj, destAddress, L);
if (debugOptions & DEBUG_GC_DETAIL)
Log("GC: Copy: %p %lu %u -> %p\n", obj, OBJ_OBJECT_LENGTH(L),
GetTypeBits(L), destAddress);
}
}
if (mutableDest == src)
mutableDest = 0;
if (immutableDest == src)
immutableDest = 0;
}
}
void GCCopyPhase()
{
mainThreadPhase = MTP_GCPHASECOMPACT;
for(std::vector<LocalMemSpace*>::iterator i = gMem.lSpaces.begin(); i < gMem.lSpaces.end(); i++)
{
LocalMemSpace *lSpace = *i;
POLYUNSIGNED highest = lSpace->wordNo(lSpace->top);
for (unsigned i = 0; i < NSTARTS; i++)
lSpace->start[i] = highest;
lSpace->start_index = NSTARTS - 1;
lSpace->spaceOwner = 0;
// Reset the allocation pointers. This puts garbage (and real data) below them.
// At the end of the compaction the allocation pointer will point below the
// lowest real data.
lSpace->upperAllocPtr = lSpace->top;
}
// Copy the mutable data into a lower area if possible.
if (gpTaskFarm->ThreadCount() == 0)
copyAllData(globalTask, 0, 0);
else
{
// We start as many tasks as we have threads. If the amount of work to
// be done is very small one thread could process more than one task.
// Have to be careful because we use the task ID to decide which space
// to scan.
for (unsigned j = 0; j < gpTaskFarm->ThreadCount(); j++)
gpTaskFarm->AddWorkOrRunNow(©AllData, 0, 0);
}
gpTaskFarm->WaitForCompletion();
}
|