1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-01-21 12:03:55 -08:00
change.mps.dylan.kinglet.160189 - merging branch MMdevel_tony_sunset onto the trunk

Copied from Perforce
 Change: 21001
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Tony Mann 1999-11-18 13:18:34 +00:00
parent 3ef27029c1
commit 149efb9790
3 changed files with 1054 additions and 0 deletions

View file

@ -0,0 +1,421 @@
/* impl.c.reserv: ARENA RESERVOIR
*
* $HopeName: MMsrc!reserv.c(MMdevel_tony_sunset.3) $
* Copyright (C) 1999. Harlequin Limited. All rights reserved.
*
* .readership: Any MPS developer
*
* IMPROVEMENTS
*
* .improve.contiguous: there should be a means of grouping
* contiguous tracts together so that there's a likelihood
* of being able to meet requests for regions larger than the
* arena alignment.
*
*/
#include "mpm.h"
SRCID(reserv, "$HopeName: MMsrc!reserv.c(MMdevel_tony_sunset.3) $");
/* The reservoir pool is defined here. See design.mps.reservoir */
#define PoolPoolReservoir(pool) PARENT(ReservoirStruct, poolStruct, pool)
/* Management of tracts
*
* The reservoir maintains a linked list of tracts in arbitrary order.
* (see .improve.contiguous)
*
* Tracts are chained using the TractP field.
*/
#define resTractNext(tract) ((Tract)TractP((tract)))
#define resTractSetNext(tract, next) (TractSetP((tract), (void*)(next)))
#define reservoirArena(reservoir) ((reservoir)->poolStruct.arena)
/* ResPoolInit -- Reservoir pool init method */
static Res ResPoolInit(Pool pool, va_list arg)
{
UNUSED(arg);
AVER(pool != NULL);
return ResOK;
}
/* ResPoolFinish -- Reservoir pool finish method
*
* .reservoir.finish: This might be called from ArenaFinish, so the
* arena cannot be checked at this time. In order to avoid the
* check, insist that the reservoir is empty, by AVERing that
* the reserve list is NULL.
*/
static void ResPoolFinish(Pool pool)
{
Reservoir reservoir;
AVERT(Pool, pool);
reservoir = PoolPoolReservoir(pool);
AVERT(Reservoir, reservoir);
AVER(NULL == reservoir->reserve); /* .reservoir.finish */
}
/* ReservoirPoolClass -- Class definition */
DEFINE_POOL_CLASS(ReservoirPoolClass, this)
{
INHERIT_CLASS(this, AbstractPoolClass);
this->name = "Reservoir";
this->size = sizeof(ReservoirStruct);
this->offset = offsetof(ReservoirStruct, poolStruct);
this->init = ResPoolInit;
this->finish = ResPoolFinish;
}
/* ReservoirCheck -- Reservoir check method */
Bool ReservoirCheck(Reservoir reservoir)
{
ReservoirPoolClass reservoircl = EnsureReservoirPoolClass();
Arena arena;
Tract tract;
CHECKS(Reservoir, reservoir);
CHECKD(Pool, &reservoir->poolStruct);
CHECKL(reservoir->poolStruct.class == reservoircl);
arena = reservoirArena(reservoir);
CHECKS(Arena, arena); /* Can't use CHECKD; circularly referenced */
/* could call ReservoirIsConsistent, but it's costly. */
tract = reservoir->reserve;
if (tract != NULL) {
CHECKL(TractCheck(tract));
CHECKL(TractPool(tract) == &reservoir->poolStruct);
}
CHECKL(SizeIsAligned(reservoir->reservoirLimit, ArenaAlign(arena)));
CHECKL(SizeIsAligned(reservoir->reservoirSize, ArenaAlign(arena)));
return TRUE;
}
/* ReservoirIsConsistent
*
* Returns FALSE if the reservoir is corrupt.
*/
static Bool ReservoirIsConsistent(Reservoir reservoir)
{
Bool res;
Size alignment, size = 0;
Tract tract;
Pool pool;
Arena arena;
AVERT(Reservoir, reservoir);
arena = reservoirArena(reservoir);
AVERT(Arena, arena);
pool = &reservoir->poolStruct;
AVERT(Pool, pool);
/* Check that the size of the tracts matches reservoirSize */
alignment = ArenaAlign(arena);
tract = reservoir->reserve;
while (tract != NULL) {
AVERT(Tract, tract);
AVER(TractPool(tract) == pool);
tract = resTractNext(tract);
size += alignment;
}
if (size != reservoir->reservoirSize)
return FALSE;
/* design.mps.reservoir.align */
res = SizeIsAligned(reservoir->reservoirLimit, alignment) &&
SizeIsAligned(reservoir->reservoirSize, alignment) &&
(reservoir->reservoirLimit >= reservoir->reservoirSize);
return res;
}
/* ReservoirEnsureFull
*
* Ensures that the reservoir is the right size, by topping it up
* with fresh memory from the arena if possible.
*/
Res ReservoirEnsureFull(Reservoir reservoir)
{
Size limit, alignment;
Pool pool;
Arena arena;
AVERT(Reservoir, reservoir);
arena = reservoirArena(reservoir);
AVERT(Arena, arena);
alignment = ArenaAlign(arena);
limit = reservoir->reservoirLimit;
/* optimize the common case of a full reservoir */
if (reservoir->reservoirSize == limit)
return ResOK;
pool = &reservoir->poolStruct;
AVERT(Pool, pool);
/* really ought to try hard to allocate contiguous tracts */
/* see .improve.contiguous */
while (reservoir->reservoirSize < limit) {
Res res;
Addr base;
Tract tract;
res = (*arena->class->alloc)(&base, &tract, SegPrefDefault(),
alignment, pool);
if (res != ResOK) {
AVER(ReservoirIsConsistent(reservoir));
return res;
}
reservoir->reservoirSize += alignment;
resTractSetNext(tract, reservoir->reserve);
reservoir->reserve = tract;
}
AVER(ReservoirIsConsistent(reservoir));
return ResOK;
}
/* ReservoirShrink -- Reduce the size of the reservoir */
static void ReservoirShrink(Reservoir reservoir, Size want)
{
Arena arena;
Pool pool;
Size alignment;
AVERT(Reservoir, reservoir);
pool = &reservoir->poolStruct;
AVERT(Pool, pool);
arena = reservoirArena(reservoir);
AVERT(Arena, arena);
AVER(SizeIsAligned(want, ArenaAlign(arena)));
AVER(reservoir->reservoirSize >= want);
if (reservoir->reservoirSize == want)
return;
/* Iterate over tracts, freeing them while reservoir is too big */
alignment = ArenaAlign(arena);
while (reservoir->reservoirSize > want) {
Tract tract = reservoir->reserve;
AVER(tract != NULL);
reservoir->reserve = resTractNext(tract);
(*arena->class->free)(TractBase(tract), alignment, pool);
reservoir->reservoirSize -= alignment;
}
AVER(reservoir->reservoirSize == want);
AVER(ReservoirIsConsistent(reservoir));
}
/* ReservoirWithdraw -- Attempt to supply memory from the reservoir */
Res ReservoirWithdraw(Addr *baseReturn, Tract *baseTractReturn,
Reservoir reservoir, Size size, Pool pool)
{
Pool respool;
Arena arena;
AVER(baseReturn != NULL);
AVER(baseTractReturn != NULL);
AVERT(Reservoir, reservoir);
arena = reservoirArena(reservoir);
AVERT(Arena, arena);
AVER(SizeIsAligned(size, ArenaAlign(arena)));
AVER(size > 0);
AVERT(Pool, pool);
respool = &reservoir->poolStruct;
AVERT(Pool, respool);
/* @@@ As a short-term measure, we only permit the reservoir to */
/* allocate single-page regions. */
/* See .improve.contiguous & change.dylan.jackdaw.160125 */
if(size != ArenaAlign(arena))
return ResMEMORY;
if (size <= reservoir->reservoirSize) {
/* Return the first tract */
Tract tract = reservoir->reserve;
Addr base;
AVER(tract != NULL);
base = TractBase(tract);
reservoir->reserve = resTractNext(tract);
reservoir->reservoirSize -= ArenaAlign(arena);
TractFinish(tract);
TractInit(tract, pool, base);
AVER(ReservoirIsConsistent(reservoir));
*baseReturn = base;
*baseTractReturn = tract;
return ResOK;
}
AVER(ReservoirIsConsistent(reservoir));
return ResMEMORY; /* no suitable region in the reservoir */
}
/* ReservoirDeposit -- Top up the reservoir */
void ReservoirDeposit(Reservoir reservoir, Addr base, Size size)
{
Pool respool;
Addr addr, limit;
Size reslimit, alignment;
Arena arena;
Tract tract;
AVERT(Reservoir, reservoir);
arena = reservoirArena(reservoir);
AVERT(Arena, arena);
respool = &reservoir->poolStruct;
AVERT(Pool, respool);
alignment = ArenaAlign(arena);
AVER(AddrIsAligned(base, alignment));
AVER(SizeIsAligned(size, alignment));
limit = AddrAdd(base, size);
reslimit = reservoir->reservoirLimit;
/* put as many pages as necessary into the reserve & free the rest */
TRACT_FOR(tract, addr, arena, base, limit) {
if (reservoir->reservoirSize < reslimit) {
/* Reassign the tract to the reservoir pool */
TractFinish(tract);
TractInit(tract, respool, addr);
reservoir->reservoirSize += alignment;
resTractSetNext(tract, reservoir->reserve);
reservoir->reserve = tract;
} else {
/* free the tract */
(*arena->class->free)(addr, alignment, TractPool(tract));
}
}
AVER(addr == limit);
AVER(ReservoirIsConsistent(reservoir));
}
/* MutatorBufferCount
*
* Returns the number of mutator buffers for the arena.
*/
static Count MutatorBufferCount(Arena arena)
{
Ring nodep, nextp;
Count count = 0;
AVERT(Arena, arena);
/* Iterate over all pools, and count the mutator buffers in each */
RING_FOR(nodep, &arena->poolRing, nextp) {
Pool pool = RING_ELT(Pool, arenaRing, nodep);
Ring nodeb, nextb;
RING_FOR(nodeb, &pool->bufferRing, nextb) {
Buffer buff = RING_ELT(Buffer, poolRing, nodeb);
if (buff->isMutator)
count++;
}
}
return count;
}
/* ReservoirSetLimit -- Set the reservoir limit */
void ReservoirSetLimit(Reservoir reservoir, Size size)
{
Size needed;
Arena arena;
AVERT(Reservoir, reservoir);
arena = reservoirArena(reservoir);
AVERT(Arena, arena);
if (size > 0) {
Size wastage;
/* design.mps.reservoir.wastage */
wastage = ArenaAlign(arena) * MutatorBufferCount(arena);
/* design.mps.reservoir.align */
needed = SizeAlignUp(size, ArenaAlign(arena)) + wastage;
} else {
needed = 0; /* design.mps.reservoir.really-empty */
}
AVER(SizeIsAligned(needed, ArenaAlign(arena)));
if (needed > reservoir->reservoirSize) {
/* Try to grow the reservoir */
reservoir->reservoirLimit = needed;
ReservoirEnsureFull(reservoir);
} else {
/* Shrink the reservoir */
ReservoirShrink(reservoir, needed);
reservoir->reservoirLimit = needed;
AVER(ReservoirIsConsistent(reservoir));
}
}
/* ReservoirLimit -- Return the reservoir limit */
Size ReservoirLimit(Reservoir reservoir)
{
AVERT(Reservoir, reservoir);
AVER(ReservoirIsConsistent(reservoir));
return reservoir->reservoirLimit;
}
/* ReservoirAvailable -- Return the amount in the reservoir */
Size ReservoirAvailable(Reservoir reservoir)
{
AVERT(Reservoir, reservoir);
ReservoirEnsureFull(reservoir);
return reservoir->reservoirSize;
}
/* ReservoirInit -- Initialize a reservoir */
Res ReservoirInit(Reservoir reservoir, Arena arena)
{
/* reservoir and arena are not initialized and can't be checked */
Res res;
reservoir->reservoirLimit = (Size)0;
reservoir->reservoirSize = (Size)0;
reservoir->reserve = NULL;
reservoir->sig = ReservoirSig;
/* initialize the reservoir pool, design.mps.reservoir */
res = PoolInit(&reservoir->poolStruct,
arena, EnsureReservoirPoolClass());
if (res == ResOK) {
AVERT(Reservoir, reservoir);
}
return res;
}
/* ReservoirFinish -- Finish a reservoir */
void ReservoirFinish (Reservoir reservoir)
{
PoolFinish(&reservoir->poolStruct);
reservoir->sig = SigInvalid;
}

View file

@ -0,0 +1,189 @@
/* impl.c.tract: TRACTS
*
* $HopeName: MMsrc!tract.c(MMdevel_tony_sunset.1) $
* Copyright (C) 1999. Harlequin Limited. All rights reserved.
*
* .readership: Any MPS developer
*/
#include "mpm.h"
SRCID(tract, "$HopeName: MMsrc!tract.c(MMdevel_tony_sunset.1) $");
#define TractArena(seg) PoolArena(TractPool(tract))
/* TractCheck -- check the integrity of a tract */
Bool TractCheck(Tract tract)
{
CHECKU(Pool, TractPool(tract));
CHECKL(AddrIsAligned(TractBase(tract),
ArenaAlign(PoolArena(TractPool(tract)))));
if (TractHasSeg(tract)) {
CHECKL(TraceSetCheck(TractWhite(tract)));
CHECKU(Seg, (Seg)TractP(tract));
} else {
CHECKL(TractWhite(tract) == TraceSetEMPTY);
}
return TRUE;
}
/* TractInit -- initialize a tract */
void TractInit(Tract tract, Pool pool, Addr base)
{
AVER(tract != NULL);
AVERT(Pool, pool);
tract->pool = pool;
tract->base = base;
tract->p = NULL;
tract->white = TraceSetEMPTY;
tract->hasSeg = FALSE;
AVERT(Tract, tract);
}
/* TractFinish -- finish a tract */
void TractFinish(Tract tract)
{
AVERT(Tract, tract);
/* Check that there's no segment - and hence no shielding */
AVER(!TractHasSeg(tract));
tract->pool = NULL;
}
/* .tract.critical: These tract functions are low-level and used
* throughout. They are therefore on the critical path and their
* AVERs are so-marked.
*/
/* TractBase -- return the base address of a tract */
Addr (TractBase)(Tract tract)
{
Addr base;
AVERT_CRITICAL(Tract, tract); /* .tract.critical */
base = tract->base;
return base;
}
/* TractLimit -- return the limit address of a segment */
Addr TractLimit(Tract tract)
{
Arena arena;
AVERT_CRITICAL(Tract, tract); /* .tract.critical */
arena = TractArena(tract);
AVERT_CRITICAL(Arena, arena);
return AddrAdd(TractBase(tract), arena->alignment);
}
/* TractOfAddr -- return the tract the given address is in, if any */
Bool TractOfAddr(Tract *tractReturn, Arena arena, Addr addr)
{
AVER_CRITICAL(tractReturn != NULL);
AVERT_CRITICAL(Arena, arena);
return (*arena->class->tractOfAddr)(tractReturn, arena, addr);
}
/* TractOfBaseAddr -- return a tract given a base address
*
* The address must have been allocated to some pool
*/
Tract TractOfBaseAddr(Arena arena, Addr addr)
{
Tract tract;
Bool found;
AVERT_CRITICAL(Arena, arena);
AVER_CRITICAL(AddrIsAligned(addr, arena->alignment));
/* check first in the cache - design.mps.arena.tract.cache */
if (arena->lastTractBase == addr) {
tract = arena->lastTract;
} else {
found = (*arena->class->tractOfAddr)(&tract, arena, addr);
AVER_CRITICAL(found);
}
AVER_CRITICAL(TractBase(tract) == addr);
return tract;
}
/* TractFirst -- return the first tract in the arena
*
* This is used to start an iteration over all tracts in the arena.
*/
Bool TractFirst(Tract *tractReturn, Arena arena)
{
AVER(tractReturn != NULL);
AVERT(Arena, arena);
return (*arena->class->tractFirst)(tractReturn, arena);
}
/* TractNext -- return the "next" tract in the arena
*
* This is used as the iteration step when iterating over all
* tracts in the arena.
*
* TractNext finds the tract with the lowest base address which is
* greater than a specified address. The address must be (or once
* have been) the base address of a tract.
*/
Bool TractNext(Tract *tractReturn, Arena arena, Addr addr)
{
AVER_CRITICAL(tractReturn != NULL); /* .tract.critical */
AVERT_CRITICAL(Arena, arena);
return (*arena->class->tractNext)(tractReturn, arena, addr);
}
/* TractNextContig -- return the contiguously following tract
*
* This is used as the iteration step when iterating over all
* tracts in a contiguous area belonging to a pool.
*/
Tract TractNextContig(Arena arena, Tract tract)
{
Tract next;
AVERT_CRITICAL(Tract, tract);
AVER_CRITICAL(NULL != TractPool(tract));
next = (*arena->class->tractNextContig)(arena, tract);
AVER_CRITICAL(TractPool(next) == TractPool(tract));
AVER_CRITICAL(TractBase(next) ==
AddrAdd(TractBase(tract), arena->alignment));
return next;
}

View file

@ -0,0 +1,444 @@
/* impl.c.walk: OBJECT WALKER
*
* $HopeName: MMsrc!walk.c(MMdevel_tony_sunset.3) $
* Copyright (C) 1999. Harlequin Limited. All rights reserved.
*
* .readership: Any MPS developer
*
*/
#include "mpm.h"
#include "mps.h"
SRCID(walk, "$HopeName: MMsrc!walk.c(MMdevel_tony_sunset.3) $");
/* Heap Walking
*
*/
#define FormattedObjectsStepClosureSig ((Sig)0x519F05C1)
typedef struct FormattedObjectsStepClosureStruct *FormattedObjectsStepClosure;
typedef struct FormattedObjectsStepClosureStruct {
Sig sig;
mps_formatted_objects_stepper_t f;
void *p;
size_t s;
} FormattedObjectsStepClosureStruct;
static Bool FormattedObjectsStepClosureCheck(FormattedObjectsStepClosure c)
{
CHECKS(FormattedObjectsStepClosure, c);
CHECKL(FUNCHECK(c->f));
/* p and s fields are arbitrary closures which cannot be checked */
return TRUE;
}
static void ArenaFormattedObjectsStep(Addr object, Format format, Pool pool,
void *p, Size s)
{
FormattedObjectsStepClosure c;
/* Can't check object */
AVERT(Format, format);
AVERT(Pool, pool);
c = p;
AVERT(FormattedObjectsStepClosure, c);
AVER(s == 0);
(*c->f)((mps_addr_t)object, (mps_fmt_t)format, (mps_pool_t)pool,
c->p, c->s);
}
/* ArenaFormattedObjectsWalk -- iterate over all objects
*
* so called because it walks all formatted objects in an arena
*/
static void ArenaFormattedObjectsWalk(Arena arena,
FormattedObjectsStepMethod f,
void *p, Size s)
{
Seg seg;
FormattedObjectsStepClosure c;
AVERT(Arena, arena);
AVER(FUNCHECK(f));
AVER(f == ArenaFormattedObjectsStep);
/* p and s are arbitrary closures. */
/* Know that p is a FormattedObjectsStepClosure */
/* Know that s is 0 */
AVER(p != NULL);
AVER(s == 0);
c = p;
AVERT(FormattedObjectsStepClosure, c);
if(SegFirst(&seg, arena)) {
Addr base;
do {
Pool pool;
base = SegBase(seg);
pool = SegPool(seg);
if(pool->class->attr & AttrFMT) {
ShieldExpose(arena, seg);
PoolWalk(pool, seg, f, p, s);
ShieldCover(arena, seg);
}
} while(SegNext(&seg, arena, base));
}
}
/* mps_arena_formatted_objects_walk -- iterate over all objects
*
* Client interface to ArenaFormattedObjectsWalk
*/
void mps_arena_formatted_objects_walk(mps_arena_t mps_arena,
mps_formatted_objects_stepper_t f,
void *p,
size_t s)
{
Arena arena = (Arena)mps_arena;
FormattedObjectsStepClosureStruct c;
ArenaEnter(arena);
AVERT(Arena, arena);
AVER(FUNCHECK(f));
/* p and s are arbitrary closures, hence can't be checked */
c.sig = FormattedObjectsStepClosureSig;
c.f = f;
c.p = p;
c.s = s;
ArenaFormattedObjectsWalk(arena, ArenaFormattedObjectsStep, &c, 0);
ArenaLeave(arena);
}
/* Root Walking
*
* This involves more code than it should. The roots are walked
* by scanning them. But there's no direct support for
* invoking the scanner without there being a trace, and there's
* no direct support for creating a trace without also condemning
* part of the heap. (@@@@ This looks like a useful canditate for
* inclusion in the future). For now, the root walker contains
* its own code for creating a minimal trace and scan state.
*
* ASSUMPTIONS
*
* .assume.parked: The root walker must be invoked with a parked
* arena. It's only strictly necessary for there to be no current
* trace, but the client has no way to ensure this apart from
* parking the arena.
*
* .assume.rootaddr: The client closure is called with a parameter
* which is the address of a reference to an object referenced from
* a root. The client may desire this address to be the address of
* the actual reference in the root (so that the debugger can be
* used to determine details about the root). This is not always
* possible, since the root might actually be a register, or the
* format scan method might not pass this address directly to the
* fix method. If the format code does pass on the address, the
* client can be sure to be passed the address of any root other
* than a register or stack.
*
*/
/* RootsStepClosure -- closure environment for root walker
*
* Defined as a subclass of ScanState
*/
/* SIGnature Roots Step CLOsure */
#define RootsStepClosureSig ((Sig)0x51965C10)
typedef struct RootsStepClosureStruct *RootsStepClosure;
typedef struct RootsStepClosureStruct {
ScanStateStruct ssStruct; /* generic scan state object */
mps_roots_stepper_t f; /* client closure function */
void *p; /* client closure data */
size_t s; /* client closure data */
Root root; /* current root, or NULL */
Sig sig; /* impl.h.misc.sig */
} RootsStepClosureStruct;
static Bool RootsStepClosureCheck(RootsStepClosure rsc)
{
CHECKS(RootsStepClosure, rsc);
CHECKD(ScanState, &rsc->ssStruct);
CHECKL(FUNCHECK(rsc->f));
/* p and s fields are arbitrary closures which cannot be checked */
if (rsc->root != NULL) {
CHECKL(RootCheck(rsc->root));
}
return TRUE;
}
static ScanState RootsStepClosureScanState(RootsStepClosure rsc)
{
AVERT(RootsStepClosure, rsc);
return &rsc->ssStruct;
}
static RootsStepClosure ScanStateRootsStepClosure(ScanState ss)
{
AVERT(ScanState, ss);
return PARENT(RootsStepClosureStruct, ssStruct, ss);
}
/* RootsStepClosureInit -- Initialize a RootsStepClosure
*
* Initialize the parent ScanState too.
*/
static void RootsStepClosureInit(RootsStepClosure rsc,
Arena arena,
Trace trace,
TraceFixMethod rootFix,
mps_roots_stepper_t f,
void *p, Size s)
{
ScanState ss;
/* we are initing it, so we can't check rsc */
AVERT(Arena, arena);
AVERT(Trace, trace);
AVER(FUNCHECK(rootFix));
AVER(FUNCHECK(f));
/* p and s are arbitrary client-provided closure data. */
/* First initialize the ScanState superclass */
ss = &rsc->ssStruct;
ScanStateInit(ss, TraceSetSingle(trace->ti),
arena, RankAMBIG, trace->white);
/* Initialize the fix method in the ScanState */
ss->fix = rootFix;
/* Initialize subclass specific data */
rsc->f = f;
rsc->p = p;
rsc->s = s;
rsc->root = NULL;
rsc->sig = RootsStepClosureSig;
AVERT(RootsStepClosure, rsc);
}
/* RootsStepClosureFinish -- Finish a RootsStepClosure
*
* Finish the parent ScanState too.
*/
static void RootsStepClosureFinish(RootsStepClosure rsc)
{
ScanState ss;
AVERT(RootsStepClosure, rsc);
ss = RootsStepClosureScanState(rsc);
rsc->sig = SigInvalid;
ScanStateFinish(ss);
}
/* RootsWalkTraceStart -- Initialize a minimal trace for root walking
*/
static Res RootsWalkTraceStart(Trace trace)
{
Ring ring, node, next;
Arena arena;
AVERT(Trace, trace);
arena = trace->arena;
/* Set the white ref set to universal so that the scanner */
/* doesn't filter out any references from roots into the arena */
trace->white = RefSetUNIV;
/* Make the roots grey so that they are scanned */
ring = ArenaRootRing(arena);
RING_FOR(node, ring, next) {
Root root = RING_ELT(Root, arenaRing, node);
RootGrey(root, trace);
}
return ResOK;
}
/* RootsWalkTraceFinish -- Finish a minimal trace for root walking
*/
static void RootsWalkTraceFinish(Trace trace)
{
Arena arena;
AVERT(Trace, trace);
/* Make this trace look like any other finished trace. */
/* Need to set the state of the trace, and add it to the */
/* arena's set of flipped traces */
arena = trace->arena;
arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace->ti);
trace->state = TraceFINISHED;
TraceDestroy(trace);
}
/* RootsWalkFix -- the fix method used during root walking
*
* This doesn't cause further scanning of transitive references,
* it just calls the client closure.
*/
static Res RootsWalkFix(ScanState ss, Ref *refIO)
{
RootsStepClosure rsc;
Root root;
Ref ref;
Seg seg;
Arena arena;
AVERT(ScanState, ss);
AVER(refIO != NULL);
rsc = ScanStateRootsStepClosure(ss);
AVERT(RootsStepClosure, rsc);
root = rsc->root;
AVERT(Root, root);
arena = ss->arena;
ref = *refIO;
/* Check that the reference is to a valid segment */
if(SegOfAddr(&seg, arena, ref)) {
/* Test if the segment belongs to a GCable pool */
/* If it isn't then it's not in the heap, and the reference */
/* shouldn't be passed to the client */
if ((SegPool(seg)->class->attr & AttrGC) != 0) {
/* Call the client closure - .assume.rootaddr */
rsc->f((mps_addr_t*)refIO,
(mps_root_t)root,
rsc->p, rsc->s);
}
} else {
/* See design.mps.trace.exact.legal */
AVER(ss->rank < RankEXACT
|| !ArenaIsReservedAddr(arena, ref));
}
/* See design.mps.trace.fix.fixed.all */
ss->fixedSummary = RefSetAdd(ss->arena, ss->fixedSummary, *refIO);
AVER(ref == *refIO); /* can walk object graph - but not modify it */
return ResOK;
}
/* ArenaRootsWalk -- starts the trace and scans the roots
*/
static Res ArenaRootsWalk(Arena arena,
mps_roots_stepper_t f,
void *p, size_t s)
{
RootsStepClosureStruct rscStruct;
RootsStepClosure rsc = &rscStruct;
Trace trace;
ScanState ss;
Rank rank;
Res res;
AVERT(Arena, arena);
AVER(FUNCHECK(f));
/* p and s are arbitrary client-provided closure data. */
/* Scan all the roots with a minimal trace. */
/* Invoke the scanner with a RootsStepClosure, which */
/* is a subclass of ScanState and contains the client */
/* provided closure. Supply a special fix method */
/* in order to call the client closure. This fix method */
/* must perform no tracing operations of its own */
res = TraceCreate(&trace, arena);
/* Have to fail if no trace available. Unlikely due to .assume.parked */
if(res != ResOK)
return res;
res = RootsWalkTraceStart(trace);
if(res != ResOK)
return res;
RootsStepClosureInit(rsc, arena, trace, RootsWalkFix, f, p, s);
ss = RootsStepClosureScanState(rsc);
for(rank = RankAMBIG; rank < RankMAX; ++rank) {
Ring ring = ArenaRootRing(arena);
Ring node, next;
ss->rank = rank;
AVERT(ScanState, ss);
RING_FOR(node, ring, next) {
Root root = RING_ELT(Root, arenaRing, node);
if(RootRank(root) == ss->rank) {
/* set the root for the benefit of the fix method */
rsc->root = root;
/* Scan it */
ScanStateSetSummary(ss, RefSetEMPTY);
res = RootScan(ss, root);
if(res != ResOK) {
return res;
}
}
}
}
RootsStepClosureFinish(rsc);
RootsWalkTraceFinish(trace);
return ResOK;
}
/* mps_arena_roots_walk -- Client interface
*/
void mps_arena_roots_walk(mps_arena_t mps_arena,
mps_roots_stepper_t f,
void *p,
size_t s)
{
Arena arena = (Arena)mps_arena;
Res res;
ArenaEnter(arena);
AVERT(Arena, arena);
AVER(FUNCHECK(f));
/* p and s are arbitrary closures, hence can't be checked */
AVER(TRUE == arena->clamped); /* .assume.parked */
AVER(arena->busyTraces == TraceSetEMPTY); /* .assume.parked */
res = ArenaRootsWalk(arena, f, p, s);
AVER(res == ResOK);
ArenaLeave(arena);
}