1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-01-18 00:50:44 -08:00

Merging mmdevel_greylist (change.dylan.honeybee.170421 assignment 1)

Copied from Perforce
 Change: 18678
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Richard Brooksby 1997-08-26 16:39:04 +01:00
parent e18796aaa1
commit 87e859bab1
13 changed files with 427 additions and 369 deletions

View file

@ -1,6 +1,6 @@
/* impl.c.arena: ARENA IMPLEMENTATION
*
* $HopeName: MMsrc!arena.c(trunk.2) $
* $HopeName: MMsrc!arena.c(trunk.3) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* .readership: Any MPS developer
@ -38,7 +38,7 @@
#include "mpm.h"
SRCID(arena, "$HopeName: MMsrc!arena.c(trunk.2) $");
SRCID(arena, "$HopeName: MMsrc!arena.c(trunk.3) $");
/* All static data objects are declared here. See .static */
@ -86,6 +86,7 @@ Bool ArenaCheck(Arena arena)
Index i;
Size depth;
RefSet rs;
Rank rank;
/* we check the fields in order. We can't yet check the serials,
* pollThreshold, actionInterval, or epoch. nickb 1997-07-21 */
@ -163,6 +164,9 @@ Bool ArenaCheck(Arena arena)
CHECKD(Lock, &arenaRingLock);
/* can't check arenaSerial */
for(rank = 0; rank < RankMAX; ++rank)
CHECKL(RingCheck(&arena->greyRing[rank]));
return TRUE;
}
@ -177,6 +181,7 @@ Bool ArenaCheck(Arena arena)
void ArenaInit(Arena arena, ArenaClass class)
{
Index i;
Rank rank;
/* We do not check the arena argument, because it's _supposed_ to */
/* point to an uninitialized block of memory. */
@ -214,6 +219,8 @@ void ArenaInit(Arena arena, ArenaClass class)
arena->alignment = MPS_PF_ALIGN; /* usually overridden by init */
arena->zoneShift = ARENA_ZONESHIFT; /* usually overridden by init */
arena->poolReady = FALSE; /* design.mps.arena.pool.ready */
for(rank = 0; rank < RankMAX; ++rank)
RingInit(&arena->greyRing[rank]);
arena->sig = ArenaSig;
arena->serial = arenaSerial; /* design.mps.arena.static.serial */
@ -286,6 +293,8 @@ failInit:
void ArenaFinish(Arena arena)
{
Rank rank;
arena->sig = SigInvalid;
LockFinish(&arena->lockStruct);
RingFinish(&arena->poolRing);
@ -293,6 +302,8 @@ void ArenaFinish(Arena arena)
RingFinish(&arena->rootRing);
RingFinish(&arena->threadRing);
RingFinish(&arena->globalRing);
for(rank = 0; rank < RankMAX; ++rank)
RingFinish(&arena->greyRing[rank]);
}
@ -579,6 +590,8 @@ Res ArenaDescribe(Arena arena, mps_lib_FILE *stream)
if(res != ResOK) return res;
}
/* @@@@ What about grey rings? */
res = WriteF(stream,
"} Arena $P ($U)\n", (WriteFP)arena, (WriteFU)arena->serial,
NULL);

View file

@ -1,7 +1,7 @@
/* impl.h.eventcom -- Event Logging Common Types
*
* Copyright (C) 1997 Harlequin Group, all rights reserved.
* $HopeName: MMsrc!eventcom.h(trunk.2) $
* $HopeName: MMsrc!eventcom.h(trunk.3) $
*
* .readership: MPS developers.
* .sources: mps.design.telemetry
@ -53,7 +53,7 @@ typedef Word EventType;
#define EventTraceStart ((EventType)0xEF26AC52) /* TRACe STart */
#define EventTraceCreate ((EventType)0xEF26ACC6) /* TRACe CReate */
#define EventTraceDestroy ((EventType)0xEF26ACDE) /* TRACe DEstroy */
#define EventTraceSegGreyen ((EventType)0xEF26A599) /* TRAce SeG Greyen */
#define EventSegSetGrey ((EventType)0xEF59596A) /* SeG Set GRAy */
#define EventTraceFlipBegin ((EventType)0xEF26AF7B) /* TRAce FLip Begin */
#define EventTraceFlipEnd ((EventType)0xEF26AF7E) /* TRAce FLip End */
#define EventTraceReclaim ((EventType)0xEF26A6EC) /* TRAce REClaim */

View file

@ -1,6 +1,6 @@
/* impl.h.eventdef -- Event Logging Definitions
*
* $HopeName: MMsrc!eventdef.h(trunk.3) $
* $HopeName: MMsrc!eventdef.h(trunk.4) $
* Copyright (C) 1997 Harlequin Group, all rights reserved.
*
* .readership: MPS developers.
@ -77,7 +77,7 @@ RELATION(PoolFree , 0x0018, TRUE, Object, PAW)
RELATION(TraceStart , 0x001c, TRUE, Trace, PPP)
RELATION(TraceCreate , 0x001d, TRUE, Trace, PPPU)
RELATION(TraceDestroy , 0x001e, TRUE, Trace, P)
RELATION(TraceSegGreyen , 0x001f, TRUE, Seg, PPU)
RELATION(SegSetGrey , 0x001f, TRUE, Seg, PPU)
RELATION(TraceFlipBegin , 0x0020, TRUE, Trace, PP)
RELATION(TraceFlipEnd , 0x0021, TRUE, Trace, PP)
RELATION(TraceReclaim , 0x0022, TRUE, Seg, P)

View file

@ -1,6 +1,6 @@
/* impl.h.mpm: MEMORY POOL MANAGER DEFINITIONS
*
* $HopeName: MMsrc!mpm.h(trunk.39) $
* $HopeName: MMsrc!mpm.h(trunk.40) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*/
@ -258,8 +258,6 @@ extern Res PoolDescribe(Pool pool, mps_lib_FILE *stream);
extern Arena (PoolArena)(Pool pool);
#define PoolArena(pool) ((pool)->arena)
/* backward compatibility */
#define PoolSpace(pool) ((pool)->arena)
extern Align (PoolAlignment)(Pool pool);
#define PoolAlignment(pool) ((pool)->alignment)
@ -267,8 +265,6 @@ extern Align (PoolAlignment)(Pool pool);
extern Ring (PoolSegRing)(Pool pool);
#define PoolSegRing(pool) (&(pool)->segRing)
extern Res PoolSegAlloc(Seg *segReturn, SegPref pref, Pool pool, Size size);
extern void PoolSegFree(Pool pool, Seg seg);
extern Bool PoolOfAddr(Pool *poolReturn, Arena arena, Addr addr);
extern Bool PoolHasAddr(Pool pool, Addr addr);
@ -347,8 +343,6 @@ extern Res TracePoll(Trace trace);
extern void TraceAccess(Arena arena, Seg seg, AccessSet mode);
extern Res TraceFix(ScanState ss, Ref *refIO);
extern void TraceSegGreyen(Arena arena, Seg seg, TraceSet ts);
extern void TraceSetSummary(Arena arena, Seg seg, RefSet summary);
extern Size TraceGreyEstimate(Arena arena, RefSet refSet);
/* Equivalent to impl.h.mps MPS_SCAN_BEGIN */
@ -439,6 +433,8 @@ extern void ArenaFree(Arena arena, void *base, Size size);
#define ArenaTrace(arena, ti) (&(arena)->trace[ti])
#define ArenaZoneShift(arena) ((arena)->zoneShift)
#define ArenaAlign(arena) ((arena)->alignment)
#define ArenaGreyRing(arena, rank) \
(&arena->greyRing[rank])
extern Size ArenaReserved(Arena arena);
extern Size ArenaCommitted(Arena arena);
@ -467,6 +463,9 @@ extern Res SegPrefExpress(SegPref pref, SegPrefKind kind, void *p);
extern Bool SegCheck(Seg seg);
extern void SegInit(Seg seg, Pool pool);
extern void SegFinish(Seg seg);
extern void SegSetGrey(Seg seg, TraceSet grey);
extern void SegSetSummary(Seg seg, RefSet summary);
extern void SegSetRankSet(Seg seg, RankSet rankSet);
#define SegPool(seg) ((seg)->_pool)
#define SegSingle(seg) ((seg)->_single)
@ -481,17 +480,15 @@ extern void SegFinish(Seg seg);
#define SegBuffer(seg) ((seg)->_buffer)
#define SegPoolRing(seg) (&(seg)->_poolRing)
#define SegOfPoolRing(node) RING_ELT(Seg, _poolRing, node)
#define SegGreyRing(seg) (&(seg)->_greyRing)
#define SegOfGreyRing(node) RING_ELT(Seg, _greyRing, node)
#define SegSetPool(seg, pool) ((void)((seg)->_pool = (pool)))
#define SegSetSingle(seg, s) ((void)((seg)->_single = (s)))
#define SegSetRankSet(seg, rs) ((void)((seg)->_rankSet = (rs)))
#define SegSetPM(seg, mode) ((void)((seg)->_pm = (mode)))
#define SegSetSM(seg, mode) ((void)((seg)->_sm = (mode)))
#define SegSetDepth(seg, d) ((void)((seg)->_depth = (d)))
#define SegSetP(seg, pp) ((void)((seg)->_p = (pp)))
#define SegSetGrey(seg, ts) ((void)((seg)->_grey = (ts)))
#define SegSetWhite(seg, ts) ((void)((seg)->_white = (ts)))
#define SegSetSummary(seg, rs) ((void)((seg)->_summary = (rs)))
#define SegSetBuffer(seg, b) ((void)((seg)->_buffer = (b)))

View file

@ -1,6 +1,6 @@
/* impl.h.mpmst: MEMORY POOL MANAGER DATA STRUCTURES
*
* $HopeName: MMsrc!mpmst.h(trunk.30) $
* $HopeName: MMsrc!mpmst.h(trunk.31) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* .readership: MM developers.
@ -187,6 +187,7 @@ typedef struct MVStruct { /* MV pool outer structure */
typedef struct SegStruct { /* segment structure */
Pool _pool; /* MUST BE FIRST (design.mps.seg.field.pool) */
RingStruct _poolRing; /* link in list of segs in pool */
RingStruct _greyRing; /* link in list of grey segs */
void *_p; /* pointer for use of owning pool */
Buffer _buffer; /* non-NULL if seg is buffered */
RefSet _summary; /* summary of references out of seg */
@ -445,7 +446,6 @@ typedef struct TraceStruct {
Arena arena; /* owning arena */
Action action; /* the action that launched the trace */
RefSet white; /* superset of refs in white set */
RankSet grey; /* ranks for which grey segs (may) exist */
TraceState state; /* current state of trace */
Size interval; /* polling interval */
} TraceStruct;
@ -552,6 +552,7 @@ typedef struct ArenaStruct {
TraceSet flippedTraces; /* set of running and flipped traces */
TraceStruct trace[TRACE_MAX]; /* trace structures. See
design.mps.trace.intance.limit */
RingStruct greyRing[RankMAX]; /* ring of grey segments at each rank */
/* location dependency fields (impl.c.ld) */
Epoch epoch; /* design.mps.arena.ld.epoch */

View file

@ -1,6 +1,6 @@
/* impl.c.pool: POOL IMPLEMENTATION
*
* $HopeName: MMsrc!pool.c(trunk.33) $
* $HopeName: MMsrc!pool.c(trunk.34) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* This is the implementation of the generic pool interface. The
@ -12,7 +12,7 @@
#include "mpm.h"
SRCID(pool, "$HopeName: MMsrc!pool.c(trunk.33) $");
SRCID(pool, "$HopeName: MMsrc!pool.c(trunk.34) $");
Bool PoolClassCheck(PoolClass class)
@ -410,58 +410,6 @@ Arena (PoolArena)(Pool pool)
}
/* PoolSegAlloc -- allocate a segment in a pool
*
* @@@@ There's no need for this routine. The segment could be
* attached in SegInit.
*/
Res PoolSegAlloc(Seg *segReturn, SegPref pref, Pool pool, Size size)
{
Res res;
Seg seg;
Arena arena;
AVER(segReturn != NULL);
AVERT(Pool, pool);
AVERT(SegPref, pref);
arena = PoolArena(pool);
AVER(SizeIsAligned(size, ArenaAlign(arena)));
res = SegAlloc(&seg, pref, arena, size, pool);
if(res != ResOK) return res;
RingAppend(&pool->segRing, SegPoolRing(seg));
*segReturn = seg;
return ResOK;
}
/* PoolSegFree -- free a segment from a pool
*
* @@@@ There's no need for this routine. The segment could be
* detached in SegFinish.
*/
void PoolSegFree(Pool pool, Seg seg)
{
Arena arena;
AVERT(Pool, pool);
AVERT(Seg, seg);
AVER(SegPool(seg) == pool);
arena = PoolArena(pool);
ShieldFlush(arena); /* See impl.c.shield.shield.flush */
RingRemove(SegPoolRing(seg));
SegFree(arena, seg);
}
Bool PoolOfAddr(Pool *poolReturn, Arena arena, Addr addr)
{
Seg seg;
@ -700,16 +648,8 @@ void PoolTrivGrey(Pool pool, Trace trace, Seg seg)
/* @@@@ The trivial grey method probably shouldn't exclude */
/* the white segments, since they might also contain grey objects. */
/* It's probably also the Tracer's responsibility to raise the */
/* shield. */
/* @@@@ This should be calculated by comparing colour */
/* with the mutator colour. For the moment we assume */
/* a read-barrier collector. */
if(!TraceSetIsMember(SegWhite(seg), trace->ti)) {
SegGrey(seg) = TraceSetAdd(SegGrey(seg), trace->ti);
ShieldRaise(trace->arena, seg, AccessREAD);
}
if(!TraceSetIsMember(SegWhite(seg), trace->ti))
SegSetGrey(seg, TraceSetSingle(trace->ti));
}
Res PoolNoScan(ScanState ss, Pool pool, Seg seg)

View file

@ -1,6 +1,6 @@
/* impl.c.poolams: AUTOMATIC MARK & SWEEP POOL CLASS
*
* $HopeName: MMsrc!poolams.c(trunk.2) $
* $HopeName: MMsrc!poolams.c(trunk.3) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* NOTES
@ -19,7 +19,7 @@
#include "mpm.h"
#include "mpscams.h"
SRCID(poolams, "$HopeName: MMsrc!poolams.c(trunk.2) $");
SRCID(poolams, "$HopeName: MMsrc!poolams.c(trunk.3) $");
/* These two BT utility functions should be in the BT module.
* See design.mps.poolams.bt.utilities */
@ -94,13 +94,13 @@ static Bool AMSCheck(AMS ams);
/* design.mps.poolams.addr-index.slow */
#define AMSGroupSpace(group) PoolSpace(AMSPool((group)->ams))
#define AMSGroupArena(group) PoolArena(AMSPool((group)->ams))
#define AMSGrains(ams,size) ((size) >> (ams)->grainShift)
#define AMSGroupBase(group) SegBase(AMSGroupSpace(group), (group)->seg)
#define AMSGroupBase(group) SegBase(AMSGroupArena(group), (group)->seg)
#define AMSGroupLimit(group) SegLimit(AMSGroupSpace(group), (group)->seg)
#define AMSGroupLimit(group) SegLimit(AMSGroupArena(group), (group)->seg)
#define AMSGroupShift(group) ((group)->ams->grainShift)
@ -125,10 +125,10 @@ static Bool AMSGroupCheck(AMSGroup group)
/* do the grains check both ways, to avoid rounding and overflow errors */
CHECKL(group->grains ==
(SegSize(PoolSpace(AMSPool(group->ams)), group->seg) >>
(SegSize(PoolArena(AMSPool(group->ams)), group->seg) >>
group->ams->grainShift));
CHECKL((group->grains << group->ams->grainShift) ==
SegSize(PoolSpace(AMSPool(group->ams)), group->seg));
SegSize(PoolArena(AMSPool(group->ams)), group->seg));
if (SegWhite(group->seg) != TraceSetEMPTY) {
CHECKL(TraceSetSingle(SegWhite(group->seg)));
@ -194,17 +194,17 @@ static int AMSGrainColour(AMSGroup group, Index index)
/* AMSBTCreate -- allocate a BT from the control pool */
static Res AMSBTCreate(BT *btReturn, Space space, Count length)
static Res AMSBTCreate(BT *btReturn, Arena arena, Count length)
{
Res res;
BT bt;
void *p;
AVER(btReturn != NULL);
AVERT(Space, space);
AVERT(Arena, arena);
AVER(length > 0);
res = ArenaAlloc(&p, space, BTSize(length));
res = ArenaAlloc(&p, arena, BTSize(length));
if(res != ResOK)
return res;
bt = (BT)p;
@ -217,13 +217,13 @@ static Res AMSBTCreate(BT *btReturn, Space space, Count length)
/* AMSBTDestroy -- free a BT to the control pool */
static void AMSBTDestroy(BT bt, Space space, Count length)
static void AMSBTDestroy(BT bt, Arena arena, Count length)
{
AVER(bt != NULL);
AVERT(Space, space);
AVERT(Arena, arena);
AVER(length > 0);
ArenaFree(space, bt, BTSize(length));
ArenaFree(arena, bt, BTSize(length));
}
static Res AMSGroupCreate(AMSGroup *groupReturn, Pool pool, Size size,
@ -232,7 +232,7 @@ static Res AMSGroupCreate(AMSGroup *groupReturn, Pool pool, Size size,
AMSGroup group; /* the group */
AMS ams;
Res res;
Space space;
Arena arena;
Seg seg;
void *p;
@ -244,18 +244,18 @@ static Res AMSGroupCreate(AMSGroup *groupReturn, Pool pool, Size size,
ams = PoolPoolAMS(pool);
AVERT(AMS,ams);
space = PoolSpace(pool);
arena = PoolArena(pool);
size = SizeAlignUp(size, ArenaAlign(space));
size = SizeAlignUp(size, ArenaAlign(arena));
if (size == 0)
return ResMEMORY;
res = ArenaAlloc(&p, space, (Size)sizeof(AMSGroupStruct));
res = ArenaAlloc(&p, arena, (Size)sizeof(AMSGroupStruct));
if (res != ResOK)
goto failGroup;
group = (AMSGroup)p;
res = PoolSegAlloc(&seg, SegPrefDefault(), pool, size);
res = SegAlloc(&seg, SegPrefDefault(), arena, size, pool);
if (res != ResOK)
goto failSeg;
@ -268,15 +268,15 @@ static Res AMSGroupCreate(AMSGroup *groupReturn, Pool pool, Size size,
group->grains = size >> ams->grainShift;
group->marked = FALSE; /* design.mps.poolams.marked.unused */
res = AMSBTCreate(&group->allocTable, space, group->grains);
res = AMSBTCreate(&group->allocTable, arena, group->grains);
if (res != ResOK)
goto failAlloc;
res = AMSBTCreate(&group->markTable, space, group->grains);
res = AMSBTCreate(&group->markTable, arena, group->grains);
if (res != ResOK)
goto failMark;
res = AMSBTCreate(&group->scanTable, space, group->grains);
res = AMSBTCreate(&group->scanTable, arena, group->grains);
if (res != ResOK)
goto failScan;
@ -292,13 +292,13 @@ static Res AMSGroupCreate(AMSGroup *groupReturn, Pool pool, Size size,
return ResOK;
failScan:
AMSBTDestroy(group->markTable, space, group->grains);
AMSBTDestroy(group->markTable, arena, group->grains);
failMark:
AMSBTDestroy(group->allocTable, space, group->grains);
AMSBTDestroy(group->allocTable, arena, group->grains);
failAlloc:
PoolSegFree(pool, seg);
SegFree(arena, seg);
failSeg:
ArenaFree(space, group, (Size)sizeof(AMSGroupStruct));
ArenaFree(arena, group, (Size)sizeof(AMSGroupStruct));
failGroup:
return res;
}
@ -306,26 +306,26 @@ failGroup:
static void AMSGroupDestroy(AMSGroup group)
{
AMS ams;
Space space;
Arena arena;
AVERT(AMSGroup, group);
ams = group->ams;
AVERT(AMS, ams);
space = PoolSpace(AMSPool(ams));
AVERT(Space, space);
arena = PoolArena(AMSPool(ams));
AVERT(Arena, arena);
AVER(ams->size >= SegSize(space, group->seg));
AVER(ams->size >= SegSize(arena, group->seg));
ams->size -= SegSize(space, group->seg);
ams->size -= SegSize(arena, group->seg);
ams->lastReclaimed = ams->size;
group->sig = SigInvalid;
AMSBTDestroy(group->allocTable, space, group->grains);
AMSBTDestroy(group->markTable, space, group->grains);
AMSBTDestroy(group->scanTable, space, group->grains);
PoolSegFree(AMSPool(ams), group->seg);
ArenaFree(space, group, (Size)sizeof(AMSGroupStruct));
AMSBTDestroy(group->allocTable, arena, group->grains);
AMSBTDestroy(group->markTable, arena, group->grains);
AMSBTDestroy(group->scanTable, arena, group->grains);
SegFree(arena, group->seg);
ArenaFree(arena, group, (Size)sizeof(AMSGroupStruct));
}
static Res AMSInit(Pool pool, va_list arg)
@ -574,7 +574,7 @@ static Res AMSCondemn(Pool pool, Trace trace, Seg seg, Action action)
}
static Res AMSScanGroupOnce(ScanState ss, AMS ams, AMSGroup group,
Seg seg, Space space, Bool scanAllObjects)
Seg seg, Arena arena, Bool scanAllObjects)
{
Res res;
Format format;
@ -582,11 +582,11 @@ static Res AMSScanGroupOnce(ScanState ss, AMS ams, AMSGroup group,
Addr p;
Addr limit;
limit = SegLimit(space, seg);
limit = SegLimit(arena, seg);
format = ams->format;
alignment = AMSPool(ams)->alignment;
p = SegBase(space, seg);
p = SegBase(arena, seg);
while (p < limit) {
Addr next;
Buffer buffer = SegBuffer(seg);
@ -626,7 +626,7 @@ static Res AMSScan(ScanState ss, Pool pool, Seg seg)
{
Res res;
AMS ams;
Space space;
Arena arena;
AMSGroup group;
Bool scanOnce;
Bool scanAllObjects;
@ -638,7 +638,7 @@ static Res AMSScan(ScanState ss, Pool pool, Seg seg)
AVERT(Pool, pool);
ams = PoolPoolAMS(pool);
AVERT(AMS, ams);
space = PoolSpace(pool);
arena = PoolArena(pool);
AVER(SegCheck(seg));
group = AMSSegGroup(seg);
@ -655,7 +655,7 @@ static Res AMSScan(ScanState ss, Pool pool, Seg seg)
Bool wasMarked = group->marked; /* for checking */
group->marked = FALSE; /* for checking */
res = AMSScanGroupOnce(ss, ams, group, seg, space, scanAllObjects);
res = AMSScanGroupOnce(ss, ams, group, seg, arena, scanAllObjects);
AVER(!group->marked);
group->marked = wasMarked; /* restore marked flag */
if (res != ResOK)
@ -666,7 +666,7 @@ static Res AMSScan(ScanState ss, Pool pool, Seg seg)
AVER(group->marked);
do { /* design.mps.poolams.marked.scan */
group->marked = FALSE;
res = AMSScanGroupOnce(ss, ams, group, seg, space, scanAllObjects);
res = AMSScanGroupOnce(ss, ams, group, seg, arena, scanAllObjects);
if (res != ResOK) {
group->marked = TRUE; /* design.mps.poolams.marked.scan.fail */
return res;
@ -692,7 +692,7 @@ static Res AMSFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
{
AMS ams;
AMSGroup group;
Space space;
Arena arena;
Index i;
Ref ref;
int colour;
@ -708,7 +708,7 @@ static Res AMSFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
group = AMSSegGroup(seg);
AVERT(AMSGroup, group);
space = PoolSpace(pool);
arena = PoolArena(pool);
ref = *refIO;
i = AMSAddrIndex(group, ref);
@ -739,7 +739,8 @@ static Res AMSFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
/* design.mps.poolams.fix.to-black */
if (RefSetInter(SegSummary(seg), ss->white) != RefSetEMPTY) {
TraceSegGreyen(space, seg, ss->traces); /* turn this segment grey */
/* turn this segment grey */
SegSetGrey(seg, TraceSetUnion(SegGrey(seg), ss->traces));
group->marked = TRUE; /* design.mps.poolams.marked.fix */
} else {
BTSet(group->scanTable, i); /* turn this object black */
@ -758,7 +759,7 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
{
AMS ams;
AMSGroup group;
Space space;
Arena arena;
Format format;
Addr p;
Addr limit;
@ -773,11 +774,11 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
AVER(group->marked == FALSE); /* design.mps.poolams.marked.reclaim */
space = PoolSpace(pool);
limit = SegLimit(space, seg);
arena = PoolArena(pool);
limit = SegLimit(arena, seg);
format = ams->format;
buffer = SegBuffer(seg);
p = SegBase(space, seg);
p = SegBase(arena, seg);
anySurvivors = FALSE;
while (p < limit) {

View file

@ -1,6 +1,6 @@
/* impl.c.poolawl: AUTOMATIC WEAK LINKED POOL CLASS
*
* $HopeName: MMsrc!poolawl.c(trunk.13) $
* $HopeName: MMsrc!poolawl.c(trunk.14) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* READERSHIP
@ -16,7 +16,7 @@
#include "mpm.h"
#include "mpscawl.h"
SRCID(poolawl, "$HopeName: MMsrc!poolawl.c(trunk.13) $");
SRCID(poolawl, "$HopeName: MMsrc!poolawl.c(trunk.14) $");
#define AWLSig ((Sig)0x519b7a37) /* SIGPooLAWL */
@ -81,7 +81,7 @@ static void AWLGroupDestroy(AWLGroup group)
segGrains = SegSize(arena, seg) >> awl->alignShift;
AVER(segGrains == group->grains);
tableSize = BTSize(segGrains);
PoolSegFree(pool, seg);
SegFree(arena, seg);
ArenaFree(arena, group->alloc, tableSize);
ArenaFree(arena, group->scanned, tableSize);
ArenaFree(arena, group->mark, tableSize);
@ -118,7 +118,7 @@ static Res AWLGroupCreate(AWLGroup *groupReturn,
if(size == 0) {
return ResMEMORY;
}
res = PoolSegAlloc(&seg, SegPrefDefault(), pool, size);
res = SegAlloc(&seg, SegPrefDefault(), arena, size, pool);
if(res != ResOK)
goto failSegAlloc;
res = ArenaAlloc(&v, arena, sizeof *group);
@ -143,8 +143,8 @@ static Res AWLGroupCreate(AWLGroup *groupReturn,
BTResRange(group->mark, 0, bits);
BTResRange(group->scanned, 0, bits);
BTResRange(group->alloc, 0, bits);
SegSetSummary(seg, RefSetUNIV);
SegSetRankSet(seg, BufferRankSet(buffer));
SegSetSummary(seg, RefSetUNIV);
SegSetP(seg, group);
group->seg = seg;
group->sig = AWLGroupSig;
@ -159,7 +159,7 @@ failArenaAllocScanned:
failArenaAllocMark:
ArenaFree(arena, group, sizeof *group);
failArenaAlloc0:
PoolSegFree(pool, seg);
SegFree(arena, seg);
failSegAlloc:
return res;
}
@ -379,7 +379,6 @@ static void AWLGrey(Pool pool, Trace trace, Seg seg)
AVERT(AWLGroup, group);
SegSetGrey(seg, TraceSetAdd(SegGrey(seg), trace->ti));
ShieldRaise(trace->arena, seg, AccessREAD);
BTSetRange(group->mark, 0, group->grains);
BTResRange(group->scanned, 0, group->grains);
}
@ -483,7 +482,7 @@ notFinished:
b = SegOfAddr(&dependentSeg, arena, dependentObj);
if(b == TRUE) {
ShieldExpose(arena, dependentSeg);
TraceSetSummary(arena, dependentSeg, RefSetUNIV);
SegSetSummary(dependentSeg, RefSetUNIV);
} else {
dependent = FALSE;
}
@ -550,7 +549,7 @@ static Res AWLFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
*refIO = (Ref)0;
} else {
BTSet(group->mark, i);
TraceSegGreyen(arena, seg, ss->traces);
SegSetGrey(seg, TraceSetUnion(SegGrey(seg), ss->traces));
}
}
break;

View file

@ -1,6 +1,6 @@
/* impl.c.poolmfs: MANUAL FIXED SMALL UNIT POOL
*
* $HopeName: MMsrc!poolmfs.c(trunk.20) $
* $HopeName: MMsrc!poolmfs.c(trunk.21) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* This is the implementation of the MFS pool class.
@ -35,7 +35,7 @@
#include "mpm.h"
#include "poolmfs.h"
SRCID(poolmfs, "$HopeName: MMsrc!poolmfs.c(trunk.20) $");
SRCID(poolmfs, "$HopeName: MMsrc!poolmfs.c(trunk.21) $");
/* == Round up ==
@ -123,7 +123,7 @@ static void MFSFinish(Pool pool)
seg = mfs->segList;
while(seg != NULL) {
Seg nextSeg = (Seg)SegP(seg); /* .seg.chain */
PoolSegFree(pool, seg);
SegFree(PoolArena(pool), seg);
seg = nextSeg;
}
@ -166,7 +166,7 @@ static Res MFSAlloc(Addr *pReturn, Pool pool, Size size)
arena = PoolArena(pool);
/* Create a new segment and attach it to the pool. */
res = PoolSegAlloc(&seg, SegPrefDefault(), pool, mfs->extendBy);
res = SegAlloc(&seg, SegPrefDefault(), arena, mfs->extendBy, pool);
if(res != ResOK)
return res;

View file

@ -2,7 +2,7 @@
*
* MANUAL RANK GUARDIAN POOL
*
* $HopeName: MMsrc!poolmrg.c(trunk.11) $
* $HopeName: MMsrc!poolmrg.c(trunk.12) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* READERSHIP
@ -28,7 +28,7 @@
#include "mpm.h"
#include "poolmrg.h"
SRCID(poolmrg, "$HopeName: MMsrc!poolmrg.c(trunk.11) $");
SRCID(poolmrg, "$HopeName: MMsrc!poolmrg.c(trunk.12) $");
#define MRGSig ((Sig)0x519369B0) /* SIGnature MRG POol */
@ -48,32 +48,32 @@ typedef struct MRGStruct {
static Pool MRGPool(MRG mrg);
/* design.mps.poolmrg.guardian.assoc */
static Index indexOfRefPart(Addr a, Space space)
static Index indexOfRefPart(Addr a, Arena arena)
{
Seg seg;
Bool b;
Addr base;
Addr *pbase, *pa;
b = SegOfAddr(&seg, space, a);
b = SegOfAddr(&seg, arena, a);
AVER(b);
base = SegBase(space, seg);
base = SegBase(arena, seg);
pbase = (Addr *)base;
pa = (Addr *)a;
return pa - pbase;
}
/* design.mps.poolmrg.guardian.assoc */
static Index indexOfLinkPart(Addr a, Space space)
static Index indexOfLinkPart(Addr a, Arena arena)
{
Seg seg;
Bool b;
Addr base ;
RingStruct *pbase, *pa;
b = SegOfAddr(&seg, space, a);
b = SegOfAddr(&seg, arena, a);
AVER(b);
base = SegBase(space, seg);
base = SegBase(arena, seg);
pbase = (RingStruct *)base;
pa = (RingStruct *)a;
return pa - pbase;
@ -91,13 +91,13 @@ typedef MRGGroupStruct *MRGGroup;
static void MRGGroupDestroy(MRGGroup group, MRG mrg)
{
Pool pool;
Arena arena;
pool = MRGPool(mrg);
arena = PoolArena(MRGPool(mrg));
RingRemove(&group->group);
PoolSegFree(pool, group->refseg);
PoolSegFree(pool, group->linkseg);
ArenaFree(PoolSpace(pool), group, (Size)sizeof(MRGGroupStruct));
SegFree(arena, group->refseg);
SegFree(arena, group->linkseg);
ArenaFree(arena, group, (Size)sizeof(MRGGroupStruct));
}
static Res MRGGroupCreate(MRGGroup *groupReturn, MRG mrg)
@ -110,25 +110,25 @@ static Res MRGGroupCreate(MRGGroup *groupReturn, MRG mrg)
Seg linkseg;
Seg refseg;
Size linksegsize;
Space space;
Arena arena;
Addr *refpart;
Word i, guardians;
void *v;
pool = MRGPool(mrg);
space = PoolSpace(pool);
res = ArenaAlloc(&v, space, (Size)sizeof(MRGGroupStruct));
arena = PoolArena(pool);
res = ArenaAlloc(&v, arena, (Size)sizeof(MRGGroupStruct));
if(res != ResOK)
goto failArenaAlloc;
group = v;
res = PoolSegAlloc(&refseg, SegPrefDefault(), pool, mrg->extendBy);
res = SegAlloc(&refseg, SegPrefDefault(), arena, mrg->extendBy, pool);
if(res != ResOK)
goto failRefSegAlloc;
guardians = mrg->extendBy / sizeof(Addr); /* per seg */
linksegsize = guardians * sizeof(RingStruct);
linksegsize = SizeAlignUp(linksegsize, ArenaAlign(space));
res = PoolSegAlloc(&linkseg, SegPrefDefault(), pool, linksegsize);
linksegsize = SizeAlignUp(linksegsize, ArenaAlign(arena));
res = SegAlloc(&linkseg, SegPrefDefault(), arena, linksegsize, pool);
if(res != ResOK)
goto failLinkSegAlloc;
@ -137,17 +137,17 @@ static Res MRGGroupCreate(MRGGroup *groupReturn, MRG mrg)
/* The ref part of each guardian is cleared. */
AVER(guardians > 0);
base = SegBase(space, linkseg);
base = SegBase(arena, linkseg);
linkpart = (RingStruct *)base;
refpart = (Addr *)SegBase(space, refseg);
refpart = (Addr *)SegBase(arena, refseg);
for(i=0; i<guardians; ++i) {
RingInit(&linkpart[i]);
RingAppend(&mrg->free, &linkpart[i]);
refpart[i] = 0;
}
AVER((Addr)(&linkpart[i]) <= SegLimit(space, linkseg));
AVER((Addr)(&refpart[i]) <= SegLimit(space, refseg));
AVER((Addr)(&linkpart[i]) <= SegLimit(arena, linkseg));
AVER((Addr)(&refpart[i]) <= SegLimit(arena, refseg));
SegSetRankSet(refseg, RankSetSingle(RankFINAL)); /* design.mps.seg.field.rankSet.start */
SegSetSummary(refseg, RefSetUNIV); /* design.mps.seg.field.summary.start */
@ -162,9 +162,9 @@ static Res MRGGroupCreate(MRGGroup *groupReturn, MRG mrg)
return ResOK;
failLinkSegAlloc:
PoolSegFree(pool, refseg);
SegFree(arena, refseg);
failRefSegAlloc:
ArenaFree(space, group, (Size)sizeof(MRGGroupStruct));
ArenaFree(arena, group, (Size)sizeof(MRGGroupStruct));
failArenaAlloc:
return res;
}
@ -173,15 +173,15 @@ static Res MRGGroupScan(ScanState ss, MRGGroup group, MRG mrg)
{
Addr base;
Res res;
Space space;
Arena arena;
Addr *refpart;
Word guardians, i;
space = PoolSpace(MRGPool(mrg));
arena = PoolArena(MRGPool(mrg));
guardians = mrg->extendBy / sizeof(Addr); /* per seg */
AVER(guardians > 0);
base = SegBase(space, group->refseg);
base = SegBase(arena, group->refseg);
refpart = (Addr *)base;
TRACE_SCAN_BEGIN(ss) {
for(i=0; i<guardians; ++i) {
@ -192,7 +192,7 @@ static Res MRGGroupScan(ScanState ss, MRGGroup group, MRG mrg)
}
if(ss->rank == RankFINAL && !ss->wasMarked) { /* .improve.rank */
RingStruct *linkpart =
(RingStruct *)SegBase(space, group->linkseg);
(RingStruct *)SegBase(arena, group->linkseg);
RingRemove(&linkpart[i]);
RingAppend(&mrg->exit, &linkpart[i]);
}
@ -215,7 +215,7 @@ static Res MRGInit(Pool pool, va_list args)
RingInit(&mrg->exit);
RingInit(&mrg->free);
RingInit(&mrg->group);
mrg->extendBy = ArenaAlign(PoolSpace(pool));
mrg->extendBy = ArenaAlign(PoolArena(pool));
mrg->sig = MRGSig;
AVERT(MRG, mrg);
@ -261,7 +261,7 @@ static Res MRGAlloc(Addr *pReturn, Pool pool, Size size)
Res res;
Ring f;
Seg seg;
Space space;
Arena arena;
AVERT(Pool, pool);
mrg = PoolPoolMRG(pool);
@ -270,7 +270,7 @@ static Res MRGAlloc(Addr *pReturn, Pool pool, Size size)
AVER(pReturn != NULL);
AVER(size == sizeof(Addr)); /* design.mps.poolmrg.alloc.one-size */
space = PoolSpace(pool);
arena = PoolArena(pool);
f = RingNext(&mrg->free);
@ -288,11 +288,11 @@ static Res MRGAlloc(Addr *pReturn, Pool pool, Size size)
/* design.mps.poolmrg.alloc.pop */
RingRemove(f);
RingAppend(&mrg->entry, f);
gi = indexOfLinkPart((Addr)f, space);
b = SegOfAddr(&seg, space, (Addr)f);
gi = indexOfLinkPart((Addr)f, arena);
b = SegOfAddr(&seg, arena, (Addr)f);
AVER(b);
group = SegP(seg);
refpart = (Addr *)SegBase(space, group->refseg);
refpart = (Addr *)SegBase(arena, group->refseg);
/* design.mps.poolmrg.guardian.ref.alloc */
*pReturn = (Addr)(&refpart[gi]);
@ -303,7 +303,7 @@ static void MRGFree(Pool pool, Addr old, Size size)
{
MRG mrg;
Index gi;
Space space;
Arena arena;
Seg seg;
MRGGroup group;
Bool b;
@ -316,14 +316,14 @@ static void MRGFree(Pool pool, Addr old, Size size)
AVER(old != (Addr)0);
AVER(size == sizeof(Addr));
space = PoolSpace(pool);
b = SegOfAddr(&seg, space, old);
arena = PoolArena(pool);
b = SegOfAddr(&seg, arena, old);
AVER(b);
group = SegP(seg);
linkpart = (RingStruct *)SegBase(space, group->linkseg);
linkpart = (RingStruct *)SegBase(arena, group->linkseg);
/* design.mps.poolmrg.guardian.ref.free */
gi = indexOfRefPart(old, space);
gi = indexOfRefPart(old, arena);
AVERT(Ring, &linkpart[gi]);
RingRemove(&linkpart[gi]);
@ -335,7 +335,7 @@ static Res MRGDescribe(Pool pool, mps_lib_FILE *stream)
{
MRG mrg;
Ring r;
Space space;
Arena arena;
Bool b;
MRGGroup group;
Index gi;
@ -347,16 +347,16 @@ static Res MRGDescribe(Pool pool, mps_lib_FILE *stream)
AVERT(MRG, mrg);
/* Cannot check stream */
space = PoolSpace(pool);
arena = PoolArena(pool);
WriteF(stream, " extendBy $W\n", mrg->extendBy, NULL);
WriteF(stream, " Entry queue:\n", NULL);
RING_FOR(r, &mrg->entry) {
b = SegOfAddr(&seg, space, (Addr)r);
b = SegOfAddr(&seg, arena, (Addr)r);
AVER(b);
group = SegP(seg);
refpart = (Addr *)SegBase(space, group->refseg);
gi = indexOfLinkPart((Addr)r, space);
refpart = (Addr *)SegBase(arena, group->refseg);
gi = indexOfLinkPart((Addr)r, arena);
WriteF(stream,
" at $A ref $A\n",
(WriteFA)&refpart[gi], (WriteFA)refpart[gi],
@ -364,11 +364,11 @@ static Res MRGDescribe(Pool pool, mps_lib_FILE *stream)
}
WriteF(stream, " Exit queue:\n", NULL);
RING_FOR(r, &mrg->exit) {
b = SegOfAddr(&seg, space, (Addr)r);
b = SegOfAddr(&seg, arena, (Addr)r);
AVER(b);
group = SegP(seg);
refpart = (Addr *)SegBase(space, group->refseg);
gi = indexOfLinkPart((Addr)r, space);
refpart = (Addr *)SegBase(arena, group->refseg);
gi = indexOfLinkPart((Addr)r, arena);
WriteF(stream,
" at $A ref $A\n",
(WriteFA)&refpart[gi], (WriteFA)refpart[gi],
@ -438,7 +438,7 @@ PoolClass PoolClassMRG(void)
*/
static Bool MRGCheck(MRG mrg)
{
Space space;
Arena arena;
CHECKS(MRG, mrg);
CHECKD(Pool, &mrg->poolStruct);
@ -447,7 +447,7 @@ static Bool MRGCheck(MRG mrg)
CHECKL(RingCheck(&mrg->exit));
CHECKL(RingCheck(&mrg->free));
CHECKL(RingCheck(&mrg->group));
space = PoolSpace(&mrg->poolStruct); /* .check.norecurse */
CHECKL(mrg->extendBy == ArenaAlign(space));
arena = PoolArena(&mrg->poolStruct); /* .check.norecurse */
CHECKL(mrg->extendBy == ArenaAlign(arena));
return TRUE;
}

View file

@ -1,6 +1,6 @@
/* impl.c.poolmv: MANUAL VARIABLE POOL
*
* $HopeName: MMsrc!poolmv.c(trunk.21) $
* $HopeName: MMsrc!poolmv.c(trunk.22) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* **** RESTRICTION: This pool may not allocate from the arena control
@ -37,7 +37,7 @@
#include "poolmfs.h"
#include "mpscmv.h"
SRCID(poolmv, "$HopeName: MMsrc!poolmv.c(trunk.21) $");
SRCID(poolmv, "$HopeName: MMsrc!poolmv.c(trunk.22) $");
#define BLOCKPOOL(mv) (MFSPool(&(mv)->blockPoolStruct))
@ -131,8 +131,8 @@ static Bool MVSpanCheck(MVSpan span)
/* This is just defined this way. It shouldn't change. */
CHECKL(span->limit.next == NULL);
/* The sentinels should mark the ends of the segment. */
CHECKL(span->base.base == SegBase(PoolSpace(MVPool(span->mv)), span->seg));
CHECKL(span->limit.limit == SegLimit(PoolSpace(MVPool(span->mv)), span->seg));
CHECKL(span->base.base == SegBase(PoolArena(MVPool(span->mv)), span->seg));
CHECKL(span->limit.limit == SegLimit(PoolArena(MVPool(span->mv)), span->seg));
/* The sentinels mustn't overlap. */
CHECKL(span->base.limit <= span->limit.base);
/* The remaining space can't be more than the gap between the sentinels. */
@ -145,7 +145,7 @@ static Res MVInit(Pool pool, va_list arg)
{
Size extendBy, avgSize, maxSize, blockExtendBy, spanExtendBy;
MV mv;
Space space;
Arena arena;
Res res;
extendBy = va_arg(arg, Size);
@ -159,7 +159,7 @@ static Res MVInit(Pool pool, va_list arg)
AVER(extendBy <= maxSize);
mv = PoolPoolMV(pool);
space = PoolSpace(pool);
arena = PoolArena(pool);
/* At 100% fragmentation we will need one block descriptor for every other */
/* allocated block, or (extendBy/avgSize)/2 descriptors. See note 1. */
@ -169,7 +169,7 @@ static Res MVInit(Pool pool, va_list arg)
}
res = PoolInit(&mv->blockPoolStruct.poolStruct,
space, PoolClassMFS(),
arena, PoolClassMFS(),
blockExtendBy, sizeof(MVBlockStruct));
if(res != ResOK)
return res;
@ -177,7 +177,7 @@ static Res MVInit(Pool pool, va_list arg)
spanExtendBy = sizeof(MVSpanStruct) * (maxSize/extendBy);
res = PoolInit(&mv->spanPoolStruct.poolStruct,
space, PoolClassMFS(),
arena, PoolClassMFS(),
spanExtendBy, sizeof(MVSpanStruct));
if(res != ResOK)
return res;
@ -213,7 +213,7 @@ static void MVFinish(Pool pool)
RING_FOR(node, spans) {
span = RING_ELT(MVSpan, spans, node);
AVERT(MVSpan, span);
PoolSegFree(pool, span->seg);
SegFree(PoolArena(pool), span->seg);
}
mv->sig = SigInvalid;
@ -370,7 +370,7 @@ static Res MVAlloc(Addr *pReturn, Pool pool, Size size)
{
Res res;
MVSpan span;
Space space;
Arena arena;
MV mv;
Size segSize;
Ring spans, node = NULL; /* gcc whinge stop */
@ -414,13 +414,13 @@ static Res MVAlloc(Addr *pReturn, Pool pool, Size size)
else
segSize = size;
space = PoolSpace(pool);
segSize = SizeAlignUp(segSize, ArenaAlign(space));
arena = PoolArena(pool);
segSize = SizeAlignUp(segSize, ArenaAlign(arena));
res = PoolSegAlloc(&span->seg, SegPrefDefault(), pool, segSize);
res = SegAlloc(&span->seg, SegPrefDefault(), arena, segSize, pool);
if(res != ResOK) { /* try again with a segment big enough for this object */
segSize = SizeAlignUp(size, ArenaAlign(space));
res = PoolSegAlloc(&span->seg, SegPrefDefault(), pool, segSize);
segSize = SizeAlignUp(size, ArenaAlign(arena));
res = SegAlloc(&span->seg, SegPrefDefault(), arena, segSize, pool);
if (res != ResOK) {
PoolFree(SPANPOOL(mv), (Addr)span, sizeof(MVSpanStruct));
return res;
@ -430,8 +430,8 @@ static Res MVAlloc(Addr *pReturn, Pool pool, Size size)
span->mv = mv;
SegSetP(span->seg, (void *)span);
RingInit(&span->spans);
span->base.base = span->base.limit = SegBase(space, span->seg);
span->limit.base = span->limit.limit = SegLimit(space, span->seg);
span->base.base = span->base.limit = SegBase(arena, span->seg);
span->limit.base = span->limit.limit = SegLimit(arena, span->seg);
span->space = AddrOffset(span->base.limit, span->limit.base);
span->limit.next = NULL;
span->base.next = &span->limit;
@ -474,7 +474,7 @@ static void MVFree(Pool pool, Addr old, Size size)
/* Map the pointer onto the segment which contains it, and thence */
/* onto the span. */
b = SegOfAddr(&seg, PoolSpace(pool), old);
b = SegOfAddr(&seg, PoolArena(pool), old);
AVER(b);
span = (MVSpan)SegP(seg);
AVERT(MVSpan, span);
@ -497,7 +497,7 @@ static void MVFree(Pool pool, Addr old, Size size)
/* both blocks are the trivial sentinel blocks */
AVER(span->base.limit == span->base.base);
AVER(span->limit.limit == span->limit.base);
PoolSegFree(pool, span->seg);
SegFree(PoolArena(pool), span->seg);
RingRemove(&span->spans);
PoolFree(SPANPOOL(mv), (Addr)span, sizeof(MVSpanStruct));
}
@ -675,7 +675,7 @@ size_t mps_mv_size(mps_pool_t mps_pool)
Pool pool;
MV mv;
MVSpan span;
Space space;
Arena arena;
Size f = 0;
Ring spans, node = NULL; /* gcc whinge stop */
@ -684,13 +684,13 @@ size_t mps_mv_size(mps_pool_t mps_pool)
AVERT(Pool, pool);
mv = PoolPoolMV(pool);
AVERT(MV, mv);
space = PoolSpace(pool);
arena = PoolArena(pool);
spans = &mv->spans;
RING_FOR(node, spans) {
span = RING_ELT(MVSpan, spans, node);
AVERT(MVSpan, span);
f += SegSize(space, span->seg);
f += SegSize(arena, span->seg);
}
return (size_t)f;

View file

@ -1,6 +1,6 @@
/* impl.c.seg: SEGMENTS
*
* $HopeName: MMsrc!seg.c(trunk.4) $
* $HopeName: MMsrc!seg.c(trunk.5) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* .design: The design for this module is design.mps.seg.
@ -16,40 +16,60 @@
#include "mpm.h"
SRCID(seg, "$HopeName: MMsrc!seg.c(trunk.4) $");
SRCID(seg, "$HopeName: MMsrc!seg.c(trunk.5) $");
/* SegCheck -- check the integrity of a segment */
Bool SegCheck(Seg seg)
{
CHECKU(Pool, SegPool(seg));
CHECKL(TraceSetCheck(SegWhite(seg)));
CHECKL(TraceSetCheck(SegGrey(seg)));
if(SegBuffer(seg) != NULL) {
CHECKU(Buffer, SegBuffer(seg));
CHECKU(Pool, seg->_pool);
CHECKL(TraceSetCheck(seg->_white));
CHECKL(TraceSetCheck(seg->_grey));
if(seg->_buffer != NULL) {
CHECKU(Buffer, seg->_buffer);
/* design.mps.seg.field.buffer.owner */
CHECKL(BufferPool(SegBuffer(seg)) == SegPool(seg));
CHECKL(BufferPool(seg->_buffer) == seg->_pool);
}
CHECKL(RingCheck(SegPoolRing(seg)));
CHECKL(RankSetCheck(SegRankSet(seg)));
if(SegRankSet(seg) == RankSetEMPTY) {
CHECKL(RingCheck(&seg->_poolRing));
/* The segment must belong to some pool, so it should be on a */
/* pool's segment ring. (Actually, this isn't true just after */
/* the segment is initialized.) */
/* CHECKL(RingNext(&seg->_poolRing) != &seg->_poolRing); */
/* The segment should be on a grey ring if and only if it is grey. */
CHECKL(RingCheck(&seg->_greyRing));
CHECKL((seg->_grey == TraceSetEMPTY) ==
RingIsSingle(&seg->_greyRing));
CHECKL(RankSetCheck(seg->_rankSet));
if(seg->_rankSet == RankSetEMPTY) {
/* design.mps.seg.field.rankSet.empty: If there are no refs */
/* in the segment then it cannot contain black or grey refs. */
CHECKL(SegGrey(seg) == TraceSetEMPTY);
CHECKL(SegSummary(seg) == RefSetEMPTY);
CHECKL(SegSM(seg) == AccessSetEMPTY);
CHECKL(SegPM(seg) == AccessSetEMPTY);
CHECKL(seg->_grey == TraceSetEMPTY);
CHECKL(seg->_summary == RefSetEMPTY);
CHECKL(seg->_sm == AccessSetEMPTY);
CHECKL(seg->_pm == AccessSetEMPTY);
} else {
/* design.mps.seg.field.rankSet.single: The Tracer only permits */
/* one rank per segment [ref?] so this field is either empty or a */
/* singleton. */
CHECKL(RankSetIsSingle(SegRankSet(seg)));
/* .check.wb: If summary isn't universal then it must be Write shielded */
CHECKL(SegSummary(seg) == RefSetUNIV || (SegSM(seg) & AccessWRITE));
CHECKL(RankSetIsSingle(seg->_rankSet));
/* Can't check barrier invariants because SegCheck is called */
/* when raising or lowering the barrier. */
/* .check.wb: If summary isn't universal then it must be */
/* write shielded. */
/* CHECKL(seg->_summary == RefSetUNIV || (seg->_sm & AccessWRITE)); */
/* @@@@ What can be checked about the read barrier? */
}
/* "pm", "sm", and "depth" not checked. See .check.shield. */
CHECKL(BoolCheck(SegSingle(seg)));
CHECKL(BoolCheck(seg->_single));
return TRUE;
}
@ -58,20 +78,26 @@ Bool SegCheck(Seg seg)
void SegInit(Seg seg, Pool pool)
{
SegSetPool(seg, pool);
SegSetP(seg, NULL);
SegSetRankSet(seg, RankSetEMPTY);
SegSetWhite(seg, TraceSetEMPTY);
SegSetGrey(seg, TraceSetEMPTY);
SegSetSummary(seg, RefSetEMPTY);
SegSetBuffer(seg, NULL);
RingInit(SegPoolRing(seg));
SegSetPM(seg, AccessSetEMPTY);
SegSetSM(seg, AccessSetEMPTY);
SegSetDepth(seg, 0);
SegSetSingle(seg, FALSE);
AVER(seg != NULL);
AVERT(Pool, pool);
seg->_pool = pool;
seg->_p = NULL;
seg->_rankSet = RankSetEMPTY;
seg->_white = TraceSetEMPTY;
seg->_grey = TraceSetEMPTY;
seg->_summary = RefSetEMPTY;
seg->_buffer = NULL;
RingInit(&seg->_poolRing);
RingInit(&seg->_greyRing);
seg->_pm = AccessSetEMPTY;
seg->_sm = AccessSetEMPTY;
seg->_depth = 0;
seg->_single = FALSE;
AVERT(Seg, seg);
RingAppend(&pool->segRing, SegPoolRing(seg));
}
@ -83,10 +109,150 @@ void SegFinish(Seg seg)
/* Check that the segment is not exposed, or in the shield */
/* cache (see impl.c.shield.def.depth). */
AVER(SegDepth(seg) == 0);
AVER(seg->_depth == 0);
/* Don't leave a dangling buffer allocating into hyperspace. */
AVER(SegBuffer(seg) == NULL);
AVER(seg->_buffer == NULL);
RingFinish(SegPoolRing(seg));
/* See impl.c.shield.shield.flush */
ShieldFlush(PoolArena(seg->_pool));
RingRemove(SegPoolRing(seg));
/* Detach the segment from the grey list if it is grey. It is OK */
/* to delete a grey segment provided the objects in it have been */
/* proven to be unreachable by another trace. */
if(seg->_grey != TraceSetEMPTY)
RingRemove(&seg->_greyRing);
RingFinish(&seg->_poolRing);
RingFinish(&seg->_greyRing);
}
/* SegSetSummary -- change the summary on a segment
*
* In fact, we only need to raise the write barrier if the
* segment contains references, and its summary is strictly smaller
* than the summary of the unprotectable data (i.e. the mutator).
* We don't maintain such a summary, assuming that the mutator can
* access all references, so its summary is RefSetUNIV.
*/
void SegSetSummary(Seg seg, RefSet summary)
{
RefSet oldSummary;
Arena arena;
AVERT(Seg, seg);
arena = PoolArena(seg->_pool);
oldSummary = seg->_summary;
seg->_summary = summary;
AVER(seg->_rankSet != RankSetEMPTY);
/* Note: !RefSetSuper is a test for a strict subset */
if(!RefSetSuper(summary, RefSetUNIV)) {
if(RefSetSuper(oldSummary, RefSetUNIV))
ShieldRaise(arena, seg, AccessWRITE);
} else {
if(!RefSetSuper(oldSummary, RefSetUNIV))
ShieldLower(arena, seg, AccessWRITE);
}
}
/* SegSetGrey -- change the greyness of a segment
*
* Sets the segment greyness to the trace set ts and adjusts
* the shielding on the segment appropriately.
*/
void SegSetGrey(Seg seg, TraceSet grey)
{
Arena arena;
TraceSet oldGrey, flippedTraces;
Rank rank;
AVERT(Seg, seg);
AVER(TraceSetCheck(grey));
AVER(seg->_rankSet != RankSetEMPTY);
arena = PoolArena(seg->_pool);
oldGrey = seg->_grey;
seg->_grey = grey;
/* If the segment is now grey and wasn't before, add it to the */
/* appropriate grey list so that TraceFindGrey can locate it */
/* quickly later. If it is no longer grey and was before, */
/* remove it from the list. */
if(oldGrey == TraceSetEMPTY) {
if(grey != TraceSetEMPTY) {
AVER(RankSetIsSingle(seg->_rankSet));
for(rank = 0; rank < RankMAX; ++rank)
if(RankSetIsMember(seg->_rankSet, rank)) {
RingInsert(ArenaGreyRing(arena, rank), &seg->_greyRing);
break;
}
AVER(rank != RankMAX); /* there should've been a match */
}
} else {
if(grey == TraceSetEMPTY)
RingRemove(&seg->_greyRing);
}
/* The read barrier is raised when the segment is grey for */
/* some _flipped_ trace, i.e. is grey for a trace for which */
/* the mutator is black. */
flippedTraces = arena->flippedTraces;
if(TraceSetInter(oldGrey, flippedTraces) == TraceSetEMPTY) {
if(TraceSetInter(grey, flippedTraces) != TraceSetEMPTY)
ShieldRaise(arena, seg, AccessREAD);
} else {
if(TraceSetInter(grey, flippedTraces) == TraceSetEMPTY)
ShieldLower(arena, seg, AccessREAD);
}
EVENT_PPU(SegSetGrey, arena, seg, grey);
}
/* SegSetRankSet -- set the rank set of a segment
*
* If the rank set is made non-empty then the segment's summary is
* now a subset of the mutator's (which is assumed to be RefSetUNIV)
* so the write barrier must be imposed on the segment. If the
* rank set is made empty then there are no longer any references
* on the segment so the barrier is removed.
*
* The caller must set the summary to empty before setting the rank
* set to empty. The caller must set the rank set to non-empty before
* setting the summary to non-empty.
*/
void SegSetRankSet(Seg seg, RankSet rankSet)
{
RankSet oldRankSet;
Arena arena;
AVERT(Seg, seg);
AVER(RankSetCheck(rankSet));
AVER(rankSet == RankSetEMPTY || RankSetIsSingle(rankSet));
arena = PoolArena(seg->_pool);
oldRankSet = seg->_rankSet;
seg->_rankSet = rankSet;
if(oldRankSet == RankSetEMPTY) {
if(rankSet != RankSetEMPTY) {
AVER(seg->_summary == RefSetEMPTY);
ShieldRaise(arena, seg, AccessWRITE);
}
} else {
if(rankSet == RankSetEMPTY) {
AVER(seg->_summary == RefSetEMPTY);
ShieldLower(arena, seg, AccessWRITE);
}
}
}

View file

@ -1,12 +1,12 @@
/* impl.c.trace: GENERIC TRACER IMPLEMENTATION
*
* $HopeName: MMsrc!trace.c(trunk.37) $
* $HopeName: MMsrc!trace.c(trunk.38) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*/
#include "mpm.h"
SRCID(trace, "$HopeName: MMsrc!trace.c(trunk.37) $");
SRCID(trace, "$HopeName: MMsrc!trace.c(trunk.38) $");
/* ScanStateCheck -- check consistency of a ScanState object */
@ -59,7 +59,6 @@ Bool TraceCheck(Trace trace)
CHECKL(TraceIdCheck(trace->ti));
CHECKL(trace == &trace->arena->trace[trace->ti]);
CHECKL(TraceSetIsMember(trace->arena->busyTraces, trace->ti));
CHECKL(RankSetCheck(trace->grey));
/* Can't check trace->white -- not in O(1) anyway. */
/* Use trace->state to check more invariants. */
switch(trace->state) {
@ -268,10 +267,6 @@ found:
trace->ti = ti;
trace->state = TraceINIT;
trace->interval = (Size)4096; /* @@@@ should be progress control */
/* We conservatively assume that there may be grey segments at all */
/* ranks when we create the trace. (almost certainly we could do */
/* better) */
trace->grey = RankSetUNIV;
trace->sig = TraceSig;
AVERT(Trace, trace);
@ -308,10 +303,6 @@ void TraceDestroy(Trace trace)
{
AVERT(Trace, trace);
AVER(trace->state == TraceFINISHED);
#if 0
/* removed AVER for now as it is not true for the first trace */
AVER(trace->grey == RankSetEMPTY);
#endif
PoolTraceEnd(trace->action->pool, trace, trace->action);
@ -324,38 +315,6 @@ void TraceDestroy(Trace trace)
}
/* TraceSetGreyen -- turn a segment more grey
*
* Adds the trace set ts to the greyness of the segment and adjusts
* the shielding on the segment appropriately. (If it causes the
* segment to become grey for a flipped trace the shield is raised.)
* @@@@ Why does it seem to be write and a read barrier?
*/
void TraceSegGreyen(Arena arena, Seg seg, TraceSet ts)
{
TraceSet grey;
AVERT(Arena, arena);
AVERT(Seg, seg);
AVER(TraceSetCheck(ts));
grey = SegGrey(seg);
grey = TraceSetUnion(grey, ts);
if(grey != SegGrey(seg)) {
/* Currently we assume that there is only one trace. */
/* This makes it simpler to greyen each trace. */
AVER(ts == 1); /* @@@@ Hack */
ArenaTrace(arena, 0)->grey =
RankSetUnion(ArenaTrace(arena, 0)->grey, SegRankSet(seg));
if(TraceSetInter(grey, arena->flippedTraces) != TraceSetEMPTY)
ShieldRaise(arena, seg, AccessREAD);
}
SegSetGrey(seg, grey);
EVENT_PPU(TraceSegGreyen, arena, seg, ts);
}
/* TraceFlipBuffers -- flip all buffers in the arena */
static void TraceFlipBuffers(Arena arena)
@ -390,36 +349,6 @@ static void TraceFlipBuffers(Arena arena)
}
/* TraceSetSummary -- change the summary on a segment
*
* The order of setting summary and lowering shield is important.
* This code preserves the invariant that the segment is write-
* shielded whenever the summary is not universal.
* See impl.c.seg.check.wb.
*
* @@@@ In fact, we only need to raise the write barrier if the
* summary is strictly smaller than the summary of the unprotectable
* data (i.e. the mutator). We don't maintain such a summary at the
* moment, and assume that the mutator's summary is RefSetUNIV.
*/
void TraceSetSummary(Arena arena, Seg seg, RefSet summary)
{
AVERT(Arena, arena);
AVERT(Seg, seg);
if(summary == RefSetUNIV) {
SegSetSummary(seg, summary); /* NB summary == RefSetUNIV */
if(SegSM(seg) & AccessWRITE)
ShieldLower(arena, seg, AccessWRITE);
} else {
if(!(SegSM(seg) & AccessWRITE))
ShieldRaise(arena, seg, AccessWRITE);
SegSetSummary(seg, summary);
}
}
/* TraceFlip -- blacken the mutator */
static Res TraceFlip(Trace trace)
@ -428,6 +357,7 @@ static Res TraceFlip(Trace trace)
Ring node;
Arena arena;
ScanStateStruct ss;
Rank rank;
Res res;
AVERT(Trace, trace);
@ -436,6 +366,7 @@ static Res TraceFlip(Trace trace)
ShieldSuspend(arena);
AVER(trace->state == TraceUNFLIPPED);
AVER(!TraceSetIsMember(arena->flippedTraces, trace->ti));
EVENT_PP(TraceFlipBegin, trace, arena);
@ -448,13 +379,6 @@ static Res TraceFlip(Trace trace)
/* necessarily move. */
LDAge(arena, trace->white);
/* The trace is marked as flipped here, apparently prematurely, */
/* so that TraceSegGreyen will DTRT when things are scanned below. */
/* @@@@ This isn't right. When flippedTraces is changed _all_ */
/* grey segments should have their shield modes fixed up anyway. */
trace->state = TraceFLIPPED;
arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace->ti);
/* At the moment we must scan all roots, because we don't have */
/* a mechanism for shielding them. There can't be any weak or */
/* final roots either, since we must protect these in order to */
@ -497,6 +421,28 @@ static Res TraceFlip(Trace trace)
ss.sig = SigInvalid; /* just in case */
/* Now that the mutator is black we must prevent it from reading */
/* grey objects so that it can't obtain white pointers. This is */
/* achieved by read protecting all segments containing objects */
/* which are grey for any of the flipped traces. */
for(rank = 0; rank < RankMAX; ++rank)
RING_FOR(node, ArenaGreyRing(arena, rank)) {
Seg seg = SegOfGreyRing(node);
if(TraceSetInter(SegGrey(seg),
arena->flippedTraces) == TraceSetEMPTY &&
TraceSetIsMember(SegGrey(seg), trace->ti))
ShieldRaise(arena, seg, AccessREAD);
}
/* @@@@ When write barrier collection is implemented, this is where */
/* write protection should be removed for all segments which are */
/* no longer blacker than the mutator. Possibly this can be done */
/* lazily as they are touched. */
/* Mark the trace as flipped. */
trace->state = TraceFLIPPED;
arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace->ti);
EVENT_PP(TraceFlipEnd, trace, arena);
ShieldResume(arena);
@ -530,8 +476,14 @@ static void TraceReclaim(Trace trace)
PoolReclaim(SegPool(seg), trace, seg);
/* If the segment still exists, it should no longer be white. */
AVER(!(SegOfAddr(&seg, arena, base) &&
TraceSetIsMember(SegWhite(seg), trace->ti)));
/* Note that the seg returned by this SegOfAddr may not be */
/* the same as the one above, but in that case it's new and */
/* still shouldn't be white for this trace. */
{
Seg nonWhiteSeg;
AVER(!(SegOfAddr(&nonWhiteSeg, arena, base) &&
TraceSetIsMember(SegWhite(nonWhiteSeg), trace->ti)));
}
}
} while(SegNext(&seg, arena, base));
}
@ -540,7 +492,7 @@ static void TraceReclaim(Trace trace)
}
/* FindGrey -- find a grey segment
/* traceFindGrey -- find a grey segment
*
* This function finds a segment which is grey for any of the traces
* in ts and which does not have a higher rank than any other such
@ -548,17 +500,14 @@ static void TraceReclaim(Trace trace)
*
* This is equivalent to choosing a grey node from the grey set
* of a partition.
*
* @@@@ This must be optimised by using better data structures at
* the cost of some bookkeeping elsewhere, esp. during fix.
*/
static Bool FindGrey(Seg *segReturn, Rank *rankReturn,
Arena arena, TraceId ti)
static Bool traceFindGrey(Seg *segReturn, Rank *rankReturn,
Arena arena, TraceId ti)
{
Rank rank;
Trace trace;
Seg seg;
Ring node;
AVER(segReturn != NULL);
AVERT(Arena, arena);
@ -567,24 +516,20 @@ static Bool FindGrey(Seg *segReturn, Rank *rankReturn,
trace = ArenaTrace(arena, ti);
for(rank = 0; rank < RankMAX; ++rank) {
if(RankSetIsMember(trace->grey, rank)) {
if(SegFirst(&seg, arena)) {
Addr base;
do {
base = SegBase(arena, seg);
if(RankSetIsMember(SegRankSet(seg), rank) &&
TraceSetIsMember(SegGrey(seg), ti)) {
*segReturn = seg;
*rankReturn = rank;
return TRUE;
}
} while(SegNext(&seg, arena, base));
RING_FOR(node, ArenaGreyRing(arena, rank)) {
Seg seg = SegOfGreyRing(node);
AVERT(Seg, seg);
AVER(SegGrey(seg) != TraceSetEMPTY);
AVER(RankSetIsMember(SegRankSet(seg), rank));
if(TraceSetIsMember(SegGrey(seg), ti)) {
*segReturn = seg;
*rankReturn = rank;
return TRUE;
}
trace->grey = RankSetDel(trace->grey, rank);
}
}
AVER(trace->grey == RankSetEMPTY);
/* There are no grey segments for this trace. */
return FALSE;
}
@ -632,10 +577,8 @@ static Res TraceScan(TraceSet ts, Rank rank,
ShieldExpose(arena, seg);
res = PoolScan(&ss, SegPool(seg), seg);
if(res != ResOK) {
ShieldCover(arena, seg);
return res;
}
if(res != ResOK)
goto failScan;
/* .scan.post-condition: */
/* The summary of reference seens by scan (ss.summary) is a subset */
@ -657,23 +600,21 @@ static Res TraceScan(TraceSet ts, Rank rank,
/* .fix.fixed.all */
AVER(RefSetSub(ss.summary, SegSummary(seg)));
TraceSetSummary(arena, seg,
TraceSetUnion(ss.fixed,
TraceSetDiff(ss.summary, ss.white)));
SegSetSummary(seg, TraceSetUnion(ss.fixed,
TraceSetDiff(ss.summary, ss.white)));
ss.sig = SigInvalid; /* just in case */
/* The segment has been scanned, so remove the greyness from it. */
SegSetGrey(seg, TraceSetDiff(SegGrey(seg), ts));
/* If the segment is no longer grey for any flipped trace it */
/* doesn't need to be behind the read barrier. */
if(TraceSetInter(SegGrey(seg), arena->flippedTraces) == TraceSetEMPTY)
ShieldLower(arena, seg, AccessREAD);
/* Cover the segment again, now it's been scanned. */
ShieldCover(arena, seg);
return ResOK;
failScan:
ShieldCover(arena, seg);
return res;
}
@ -720,7 +661,7 @@ void TraceAccess(Arena arena, Seg seg, AccessSet mode)
/* because the latter may set the summary and raise the write barrier. */
if((mode & SegSM(seg) & AccessWRITE) != 0) /* write barrier? */
TraceSetSummary(arena, seg, RefSetUNIV);
SegSetSummary(seg, RefSetUNIV);
/* The segment must now be accessible. */
AVER((mode & SegSM(seg)) == AccessSetEMPTY);
@ -739,7 +680,7 @@ static Res TraceRun(Trace trace)
arena = trace->arena;
if(FindGrey(&seg, &rank, arena, trace->ti)) {
if(traceFindGrey(&seg, &rank, arena, trace->ti)) {
AVER((SegPool(seg)->class->attr & AttrSCAN) != 0);
res = TraceScan(TraceSetSingle(trace->ti), rank,
arena, seg);