1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-01-08 20:50:52 -08:00

New pool method poolsegpoolgen gets the pool generation for a segment. (this allows the segment whiten and reclaim methods not to need to know the pool class.)

Copied from Perforce
 Change: 193084
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Gareth Rees 2017-03-31 13:28:25 +01:00
parent d8a5c865ba
commit 2b3fbbb8d3
12 changed files with 170 additions and 123 deletions

View file

@ -52,8 +52,6 @@ typedef struct GenDescStruct {
/* PoolGen -- descriptor of a generation in a pool */
typedef struct PoolGenStruct *PoolGen;
#define PoolGenSig ((Sig)0x519B009E) /* SIGnature POOl GEn */
typedef struct PoolGenStruct {

View file

@ -225,6 +225,7 @@ extern void PoolDestroy(Pool pool);
extern BufferClass PoolDefaultBufferClass(Pool pool);
extern Res PoolAlloc(Addr *pReturn, Pool pool, Size size);
extern void PoolFree(Pool pool, Addr old, Size size);
extern PoolGen PoolSegPoolGen(Pool pool, Seg seg);
extern Res PoolTraceBegin(Pool pool, Trace trace);
extern void PoolFreeWalk(Pool pool, FreeBlockVisitor f, void *p);
extern Size PoolTotalSize(Pool pool);
@ -236,6 +237,7 @@ extern Res PoolNoAlloc(Addr *pReturn, Pool pool, Size size);
extern Res PoolTrivAlloc(Addr *pReturn, Pool pool, Size size);
extern void PoolNoFree(Pool pool, Addr old, Size size);
extern void PoolTrivFree(Pool pool, Addr old, Size size);
extern PoolGen PoolNoSegPoolGen(Pool pool, Seg seg);
extern Res PoolNoBufferFill(Addr *baseReturn, Addr *limitReturn,
Pool pool, Buffer buffer, Size size);
extern Res PoolTrivBufferFill(Addr *baseReturn, Addr *limitReturn,

View file

@ -57,6 +57,7 @@ typedef struct mps_pool_class_s {
PoolInitMethod init; /* initialize the pool descriptor */
PoolAllocMethod alloc; /* allocate memory from pool */
PoolFreeMethod free; /* free memory to pool */
PoolSegPoolGenMethod segPoolGen; /* get pool generation of segment */
PoolBufferFillMethod bufferFill; /* out-of-line reserve */
PoolBufferEmptyMethod bufferEmpty; /* out-of-line commit */
PoolRampBeginMethod rampBegin;/* begin a ramp pattern */

View file

@ -105,6 +105,7 @@ typedef struct LandClassStruct *LandClass; /* <design/land/> */
typedef unsigned FindDelete; /* <design/land/> */
typedef struct ShieldStruct *Shield; /* design.mps.shield */
typedef struct HistoryStruct *History; /* design.mps.arena.ld */
typedef struct PoolGenStruct *PoolGen; /* <design/strategy/> */
/* Arena*Method -- see <code/mpmst.h#ArenaClassStruct> */
@ -197,6 +198,7 @@ typedef void (*PoolVarargsMethod)(ArgStruct args[], va_list varargs);
typedef Res (*PoolInitMethod)(Pool pool, Arena arena, PoolClass klass, ArgList args);
typedef Res (*PoolAllocMethod)(Addr *pReturn, Pool pool, Size size);
typedef void (*PoolFreeMethod)(Pool pool, Addr old, Size size);
typedef PoolGen (*PoolSegPoolGenMethod)(Pool pool, Seg seg);
typedef Res (*PoolBufferFillMethod)(Addr *baseReturn, Addr *limitReturn,
Pool pool, Buffer buffer, Size size);
typedef void (*PoolBufferEmptyMethod)(Pool pool, Buffer buffer,

View file

@ -45,6 +45,7 @@ Bool PoolClassCheck(PoolClass klass)
CHECKL(FUNCHECK(klass->init));
CHECKL(FUNCHECK(klass->alloc));
CHECKL(FUNCHECK(klass->free));
CHECKL(FUNCHECK(klass->segPoolGen));
CHECKL(FUNCHECK(klass->bufferFill));
CHECKL(FUNCHECK(klass->bufferEmpty));
CHECKL(FUNCHECK(klass->rampBegin));
@ -259,6 +260,17 @@ void PoolFree(Pool pool, Addr old, Size size)
}
/* PoolSegPoolGen -- get pool generation for a segment */
PoolGen PoolSegPoolGen(Pool pool, Seg seg)
{
AVERT(Pool, pool);
AVERT(Seg, seg);
AVER(pool == SegPool(seg));
return Method(Pool, pool, segPoolGen)(pool, seg);
}
/* PoolFreeWalk -- walk free blocks in this pool
*
* PoolFreeWalk is not required to find all free blocks.

View file

@ -167,6 +167,7 @@ DEFINE_CLASS(Pool, AbstractPool, klass)
klass->rampEnd = PoolNoRampEnd;
klass->framePush = PoolNoFramePush;
klass->framePop = PoolNoFramePop;
klass->segPoolGen = PoolNoSegPoolGen;
klass->freewalk = PoolTrivFreeWalk;
klass->bufferClass = PoolNoBufferClass;
klass->debugMixin = PoolNoDebugMixin;
@ -236,6 +237,14 @@ void PoolTrivFree(Pool pool, Addr old, Size size)
NOOP; /* trivial free has no effect */
}
PoolGen PoolNoSegPoolGen(Pool pool, Seg seg)
{
AVERT(Pool, pool);
AVERT(Seg, seg);
AVER(pool == SegPool(seg));
NOTREACHED;
return NULL;
}
Res PoolNoBufferFill(Addr *baseReturn, Addr *limitReturn,
Pool pool, Buffer buffer, Size size)

View file

@ -1132,6 +1132,17 @@ static void AMCRampEnd(Pool pool, Buffer buf)
}
/* amcSegPoolGen -- get pool generation for a segment */
static PoolGen amcSegPoolGen(Pool pool, Seg seg)
{
amcSeg amcseg = MustBeA(amcSeg, seg);
AVERT(Pool, pool);
AVER(pool == SegPool(seg));
return &amcseg->gen->pgen;
}
/* amcSegWhiten -- condemn the segment for the trace
*
* If the segment has a mutator buffer on it, we nail the buffer,
@ -2020,6 +2031,7 @@ DEFINE_CLASS(Pool, AMCZPool, klass)
klass->bufferEmpty = AMCBufferEmpty;
klass->rampBegin = AMCRampBegin;
klass->rampEnd = AMCRampEnd;
klass->segPoolGen = amcSegPoolGen;
klass->bufferClass = amcBufClassGet;
klass->totalSize = AMCTotalSize;
klass->freeSize = AMCFreeSize;

View file

@ -54,13 +54,14 @@ typedef struct AMSDebugStruct *AMSDebug;
Bool AMSSegCheck(AMSSeg amsseg)
{
Seg seg = AMSSeg2Seg(amsseg);
Seg seg = MustBeA(Seg, amsseg);
Pool pool = SegPool(seg);
CHECKS(AMSSeg, amsseg);
CHECKD(GCSeg, &amsseg->gcSegStruct);
CHECKU(AMS, amsseg->ams);
CHECKL(AMSPool(amsseg->ams) == SegPool(seg));
CHECKL(amsseg->grains == AMSGrains(amsseg->ams, SegSize(seg)));
CHECKL(amsseg->grains == PoolSizeGrains(pool, SegSize(seg)));
CHECKL(amsseg->grains > 0);
CHECKL(amsseg->grains == amsseg->freeGrains + amsseg->bufferedGrains
+ amsseg->oldGrains + amsseg->newGrains);
@ -113,11 +114,13 @@ void AMSSegFreeWalk(AMSSeg amsseg, FreeBlockVisitor f, void *p)
next, amsseg->grains, 1);
if (!found)
break;
(*f)(AMS_INDEX_ADDR(seg, base), AMS_INDEX_ADDR(seg, limit), pool, p);
(*f)(PoolAddrOfIndex(SegBase(seg), pool, base),
PoolAddrOfIndex(SegBase(seg), pool, limit), pool, p);
next = limit + 1;
}
} else if (amsseg->firstFree < amsseg->grains)
(*f)(AMS_INDEX_ADDR(seg, amsseg->firstFree), SegLimit(seg), pool, p);
(*f)(PoolAddrOfIndex(SegBase(seg), pool, amsseg->firstFree),
SegLimit(seg), pool, p);
}
@ -242,7 +245,7 @@ static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
arena = PoolArena(pool);
/* no useful checks for base and size */
amsseg->grains = size >> ams->grainShift;
amsseg->grains = PoolSizeGrains(pool, size);
amsseg->freeGrains = amsseg->grains;
amsseg->bufferedGrains = (Count)0;
amsseg->newGrains = (Count)0;
@ -324,6 +327,7 @@ static Res AMSSegMerge(Seg seg, Seg segHi,
{
Count loGrains, hiGrains, allGrains;
AMSSeg amsseg, amssegHi;
Pool pool;
Arena arena;
AMS ams;
BT allocTable, nongreyTable, nonwhiteTable; /* .table-names */
@ -336,15 +340,16 @@ static Res AMSSegMerge(Seg seg, Seg segHi,
AVERT(AMSSeg, amsseg);
AVERT(AMSSeg, amssegHi);
/* other parameters are checked by next-method */
arena = PoolArena(SegPool(seg));
ams = PoolAMS(SegPool(seg));
pool = SegPool(seg);
arena = PoolArena(pool);
ams = PoolAMS(pool);
loGrains = amsseg->grains;
hiGrains = amssegHi->grains;
allGrains = loGrains + hiGrains;
/* checks for .grain-align */
AVER(allGrains == AddrOffset(base, limit) >> ams->grainShift);
AVER(allGrains == PoolSizeGrains(pool, AddrOffset(base, limit)));
/* checks for .empty */
AVER(amssegHi->freeGrains == hiGrains);
AVER(!amssegHi->marksChanged);
@ -405,6 +410,7 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
{
Count loGrains, hiGrains, allGrains;
AMSSeg amsseg, amssegHi;
Pool pool;
Arena arena;
AMS ams;
BT allocTableLo, nongreyTableLo, nonwhiteTableLo; /* .table-names */
@ -417,11 +423,12 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
amssegHi = Seg2AMSSeg(segHi);
AVERT(AMSSeg, amsseg);
/* other parameters are checked by next-method */
arena = PoolArena(SegPool(seg));
ams = PoolAMS(SegPool(seg));
pool = SegPool(seg);
arena = PoolArena(pool);
ams = PoolAMS(pool);
loGrains = AMSGrains(ams, AddrOffset(base, mid));
hiGrains = AMSGrains(ams, AddrOffset(mid, limit));
loGrains = PoolSizeGrains(pool, AddrOffset(base, mid));
hiGrains = PoolSizeGrains(pool, AddrOffset(mid, limit));
allGrains = loGrains + hiGrains;
/* checks for .grain-align */
@ -504,7 +511,9 @@ failCreateTablesLo:
#define WRITE_BUFFER_LIMIT(i, accessor, code) \
BEGIN \
if (hasBuffer && (i) == AMS_ADDR_INDEX(seg, accessor(buffer))) { \
if (hasBuffer && \
(i) == PoolIndexOfAddr(SegBase(seg), SegPool(seg), accessor(buffer))) \
{ \
Res _res = WriteF(stream, 0, code, NULL); \
if (_res != ResOK) return _res; \
} \
@ -710,6 +719,7 @@ failSize:
static void AMSSegsDestroy(AMS ams)
{
Pool pool = AMSPool(ams);
Ring ring, node, next; /* for iterating over the segments */
ring = PoolSegRing(AMSPool(ams));
@ -722,9 +732,9 @@ static void AMSSegsDestroy(AMS ams)
AVER(amsseg->bufferedGrains == 0);
AMSSegFreeCheck(amsseg);
PoolGenFree(ams->pgen, seg,
AMSGrainsSize(ams, amsseg->freeGrains),
AMSGrainsSize(ams, amsseg->oldGrains),
AMSGrainsSize(ams, amsseg->newGrains),
PoolGrainsSize(pool, amsseg->freeGrains),
PoolGrainsSize(pool, amsseg->oldGrains),
PoolGrainsSize(pool, amsseg->newGrains),
FALSE);
}
}
@ -798,7 +808,6 @@ static Res AMSInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
AVER(pool->format != NULL);
pool->alignment = pool->format->alignment;
pool->alignShift = SizeLog2(pool->alignment);
ams->grainShift = SizeLog2(PoolAlignment(pool));
/* .ambiguous.noshare: If the pool is required to support ambiguous */
/* references, the alloc and white tables cannot be shared. */
ams->shareAllocTable = !supportAmbiguous;
@ -860,7 +869,7 @@ void AMSFinish(Inst inst)
static Bool amsSegAlloc(Index *baseReturn, Index *limitReturn,
Seg seg, Size size)
{
AMS ams;
Pool pool;
AMSSeg amsseg;
Size grains;
Bool canAlloc; /* can we allocate in this segment? */
@ -871,13 +880,12 @@ static Bool amsSegAlloc(Index *baseReturn, Index *limitReturn,
/* seg has already been checked, in AMSBufferFill. */
amsseg = Seg2AMSSeg(seg);
ams = amsseg->ams;
AVERT(AMS, ams);
pool = SegPool(seg);
AVER(size > 0);
AVER(SizeIsAligned(size, PoolAlignment(AMSPool(ams))));
AVER(SizeIsAligned(size, PoolAlignment(pool)));
grains = AMSGrains(ams, size);
grains = PoolSizeGrains(pool, size);
AVER(grains > 0);
if (grains > amsseg->grains)
return FALSE;
@ -947,7 +955,7 @@ static Res AMSBufferFill(Addr *baseReturn, Addr *limitReturn,
seg = SegOfPoolRing(node);
amsseg = Seg2AMSSeg(seg);
AVERT_CRITICAL(AMSSeg, amsseg);
if (amsseg->freeGrains >= AMSGrains(ams, size)) {
if (amsseg->freeGrains >= PoolSizeGrains(pool, size)) {
if (SegRankSet(seg) == rankSet
&& !SegHasBuffer(seg)
/* Can't use a white or grey segment, see d.m.p.fill.colour. */
@ -969,7 +977,8 @@ static Res AMSBufferFill(Addr *baseReturn, Addr *limitReturn,
found:
AVER(b);
baseAddr = AMS_INDEX_ADDR(seg, base); limitAddr = AMS_INDEX_ADDR(seg, limit);
baseAddr = PoolAddrOfIndex(SegBase(seg), pool, base);
limitAddr = PoolAddrOfIndex(SegBase(seg), pool, limit);
DebugPoolFreeCheck(pool, baseAddr, limitAddr);
allocatedSize = AddrOffset(baseAddr, limitAddr);
@ -1008,8 +1017,8 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
amsseg = Seg2AMSSeg(seg);
AVERT(AMSSeg, amsseg);
initIndex = AMS_ADDR_INDEX(seg, init);
limitIndex = AMS_ADDR_INDEX(seg, limit);
initIndex = PoolIndexOfAddr(SegBase(seg), pool, init);
limitIndex = PoolIndexOfAddr(SegBase(seg), pool, limit);
AVER(initIndex <= limitIndex);
if (init < limit) {
@ -1056,8 +1065,18 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
amsseg->freeGrains += unusedGrains;
amsseg->bufferedGrains = 0;
amsseg->newGrains += usedGrains;
PoolGenAccountForEmpty(ams->pgen, AMSGrainsSize(ams, usedGrains),
AMSGrainsSize(ams, unusedGrains), FALSE);
PoolGenAccountForEmpty(ams->pgen, PoolGrainsSize(pool, usedGrains),
PoolGrainsSize(pool, unusedGrains), FALSE);
}
/* amsSegPoolGen -- get pool generation for an AMS segment */
static PoolGen amsSegPoolGen(Pool pool, Seg seg)
{
AMS ams = MustBeA(AMSPool, pool);
AVERT(Seg, seg);
return ams->pgen;
}
@ -1085,7 +1104,7 @@ static Res amsSegWhiten(Seg seg, Trace trace)
Count agedGrains, uncondemnedGrains;
AMSSeg amsseg = MustBeA(AMSSeg, seg);
Pool pool = SegPool(seg);
AMS ams = MustBeA(AMSPool, pool);
PoolGen pgen = PoolSegPoolGen(pool, seg);
AVERT(Trace, trace);
@ -1104,7 +1123,7 @@ static Res amsSegWhiten(Seg seg, Trace trace)
}
/* Start using allocTable as the white table, if so configured. */
if (ams->shareAllocTable) {
if (amsseg->ams->shareAllocTable) {
if (amsseg->allocTableInUse) {
/* During the collection, it can't use allocTable for AMS_ALLOCED, so */
/* make it use firstFree. */
@ -1118,8 +1137,8 @@ static Res amsSegWhiten(Seg seg, Trace trace)
if (SegBuffer(&buffer, seg)) { /* <design/poolams/#condemn.buffer> */
Index scanLimitIndex, limitIndex;
scanLimitIndex = AMS_ADDR_INDEX(seg, BufferScanLimit(buffer));
limitIndex = AMS_ADDR_INDEX(seg, BufferLimit(buffer));
scanLimitIndex = PoolIndexOfAddr(SegBase(seg), pool, BufferScanLimit(buffer));
limitIndex = PoolIndexOfAddr(SegBase(seg), pool, BufferLimit(buffer));
amsSegRangeWhiten(seg, 0, scanLimitIndex);
if (scanLimitIndex < limitIndex)
@ -1135,8 +1154,8 @@ static Res amsSegWhiten(Seg seg, Trace trace)
/* The unused part of the buffer remains buffered: the rest becomes old. */
AVER(amsseg->bufferedGrains >= uncondemnedGrains);
agedGrains = amsseg->bufferedGrains - uncondemnedGrains;
PoolGenAccountForAge(ams->pgen, AMSGrainsSize(ams, agedGrains),
AMSGrainsSize(ams, amsseg->newGrains), FALSE);
PoolGenAccountForAge(pgen, PoolGrainsSize(pool, agedGrains),
PoolGrainsSize(pool, amsseg->newGrains), FALSE);
amsseg->oldGrains += agedGrains + amsseg->newGrains;
amsseg->bufferedGrains = uncondemnedGrains;
amsseg->newGrains = 0;
@ -1144,8 +1163,8 @@ static Res amsSegWhiten(Seg seg, Trace trace)
amsseg->ambiguousFixes = FALSE;
if (amsseg->oldGrains > 0) {
GenDescCondemned(ams->pgen->gen, trace,
AMSGrainsSize(ams, amsseg->oldGrains));
GenDescCondemned(pgen->gen, trace,
PoolGrainsSize(pool, amsseg->oldGrains));
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
} else {
amsseg->colourTablesInUse = FALSE;
@ -1177,7 +1196,7 @@ typedef Res (*AMSObjectFunction)(
static Res semSegIterate(Seg seg, AMSObjectFunction f, void *closure)
{
Res res;
AMS ams;
Pool pool;
AMSSeg amsseg;
Format format;
Align alignment;
@ -1192,15 +1211,15 @@ static Res semSegIterate(Seg seg, AMSObjectFunction f, void *closure)
amsseg = Seg2AMSSeg(seg);
AVERT(AMSSeg, amsseg);
ams = amsseg->ams;
AVERT(AMS, ams);
format = AMSPool(ams)->format;
pool = SegPool(seg);
AVERT(Pool, pool);
format = pool->format;
AVERT(Format, format);
alignment = PoolAlignment(AMSPool(ams));
alignment = PoolAlignment(pool);
/* If we're using the alloc table as a white table, we can't use it to */
/* determine where there are objects. */
AVER(!(ams->shareAllocTable && amsseg->colourTablesInUse));
AVER(!(amsseg->ams->shareAllocTable && amsseg->colourTablesInUse));
p = SegBase(seg);
limit = SegLimit(seg);
@ -1216,7 +1235,7 @@ static Res semSegIterate(Seg seg, AMSObjectFunction f, void *closure)
|| (p < BufferScanLimit(buffer))
|| (p >= BufferLimit(buffer))); /* not in the buffer */
i = AMS_ADDR_INDEX(seg, p);
i = PoolIndexOfAddr(SegBase(seg), pool, p);
if (!AMS_ALLOCED(seg, i)) { /* no object here */
if (amsseg->allocTableInUse) {
Index dummy, nextIndex;
@ -1227,7 +1246,7 @@ static Res semSegIterate(Seg seg, AMSObjectFunction f, void *closure)
i, amsseg->grains, 1);
AVER(more);
AVER(dummy == i);
next = AMS_INDEX_ADDR(seg, nextIndex);
next = PoolAddrOfIndex(SegBase(seg), pool, nextIndex);
} else {
/* If there's no allocTable, this is the free block at the end. */
next = limit;
@ -1293,7 +1312,7 @@ static Res amsScanObject(Seg seg, Index i, Addr p, Addr next, void *clos)
if (res != ResOK)
return res;
if (!closure->scanAllObjects) {
Index j = AMS_ADDR_INDEX(seg, next);
Index j = PoolIndexOfAddr(SegBase(seg), SegPool(seg), next);
AVER(!AMS_IS_INVALID_COLOUR(seg, i));
AMS_GREY_BLACKEN(seg, i);
if (i+1 < j)
@ -1364,7 +1383,7 @@ static Res amsSegScan(Bool *totalReturn, Seg seg, ScanState ss)
&& AMSFindGrey(&i, &j, seg, j, amsseg->grains)) {
Addr clientP, clientNext;
AVER(!AMS_IS_INVALID_COLOUR(seg, i));
p = AMS_INDEX_ADDR(seg, i);
p = PoolAddrOfIndex(SegBase(seg), pool, i);
clientP = AddrAdd(p, format->headerSize);
if (format->skip != NULL) {
clientNext = (*format->skip)(clientP);
@ -1373,7 +1392,7 @@ static Res amsSegScan(Bool *totalReturn, Seg seg, ScanState ss)
clientNext = AddrAdd(clientP, alignment);
next = AddrAdd(p, alignment);
}
j = AMS_ADDR_INDEX(seg, next);
j = PoolIndexOfAddr(SegBase(seg), pool, next);
res = FormatScan(format, ss, clientP, clientNext);
if (res != ResOK) {
/* <design/poolams/#marked.scan.fail> */
@ -1436,7 +1455,7 @@ static Res amsSegFix(Seg seg, ScanState ss, Ref *refIO)
return ResOK;
}
i = AMS_ADDR_INDEX(seg, base);
i = PoolIndexOfAddr(SegBase(seg), pool, base);
AVER_CRITICAL(i < amsseg->grains);
AVER_CRITICAL(!AMS_IS_INVALID_COLOUR(seg, i));
@ -1476,7 +1495,7 @@ static Res amsSegFix(Seg seg, ScanState ss, Ref *refIO)
next = AddrSub(clientNext, format->headerSize);
/* Part of the object might be grey, because of ambiguous */
/* fixes, but that's OK, because scan will ignore that. */
AMS_RANGE_WHITE_BLACKEN(seg, i, AMS_ADDR_INDEX(seg, next));
AMS_RANGE_WHITE_BLACKEN(seg, i, PoolIndexOfAddr(SegBase(seg), pool, next));
} else { /* turn it grey */
AMS_WHITE_GREYEN(seg, i);
SegSetGrey(seg, TraceSetUnion(SegGrey(seg), ss->traces));
@ -1501,10 +1520,10 @@ static Res amsSegFix(Seg seg, ScanState ss, Ref *refIO)
static Res amsSegBlackenObject(Seg seg, Index i, Addr p, Addr next, void *clos)
{
UNUSED(p);
AVER(clos == NULL);
AVER(clos == UNUSED_POINTER);
/* Do what amsScanObject does, minus the scanning. */
if (AMS_IS_GREY(seg, i)) {
Index j = AMS_ADDR_INDEX(seg, next);
Index j = PoolIndexOfAddr(SegBase(seg), SegPool(seg), next);
AVER(!AMS_IS_INVALID_COLOUR(seg, i));
AMS_GREY_BLACKEN(seg, i);
if (i+1 < j)
@ -1526,7 +1545,7 @@ static void amsSegBlacken(Seg seg, TraceSet traceSet)
AVERT(AMSSeg, amsseg);
AVER(amsseg->marksChanged); /* there must be something grey */
amsseg->marksChanged = FALSE;
res = semSegIterate(seg, amsSegBlackenObject, NULL);
res = semSegIterate(seg, amsSegBlackenObject, UNUSED_POINTER);
AVER(res == ResOK);
}
}
@ -1538,7 +1557,7 @@ static void amsSegReclaim(Seg seg, Trace trace)
{
AMSSeg amsseg = MustBeA(AMSSeg, seg);
Pool pool = SegPool(seg);
AMS ams = MustBeA(AMSPool, pool);
PoolGen pgen = PoolSegPoolGen(pool, seg);
Count nowFree, grains, reclaimedGrains;
Size preservedInPlaceSize;
PoolDebugMixin debug;
@ -1557,7 +1576,8 @@ static void amsSegReclaim(Seg seg, Trace trace)
while(j < grains && AMS_FIND_WHITE_RANGE(&i, &j, seg, j, grains)) {
AVER(!AMS_IS_INVALID_COLOUR(seg, i));
DebugPoolFreeSplat(pool, AMS_INDEX_ADDR(seg, i), AMS_INDEX_ADDR(seg, j));
DebugPoolFreeSplat(pool, PoolAddrOfIndex(SegBase(seg), pool, i),
PoolAddrOfIndex(SegBase(seg), pool, j));
++j; /* we know next grain is not white */
}
}
@ -1571,7 +1591,7 @@ static void amsSegReclaim(Seg seg, Trace trace)
|| BTIsResRange(amsseg->nonwhiteTable,
amsseg->firstFree, grains));
} else {
if (ams->shareAllocTable) {
if (amsseg->ams->shareAllocTable) {
/* Stop using allocTable as the white table. */
amsseg->allocTableInUse = TRUE;
} else {
@ -1584,11 +1604,11 @@ static void amsSegReclaim(Seg seg, Trace trace)
AVER(amsseg->oldGrains >= reclaimedGrains);
amsseg->oldGrains -= reclaimedGrains;
amsseg->freeGrains += reclaimedGrains;
PoolGenAccountForReclaim(ams->pgen, AMSGrainsSize(ams, reclaimedGrains), FALSE);
STATISTIC(trace->reclaimSize += AMSGrainsSize(ams, reclaimedGrains));
PoolGenAccountForReclaim(pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE);
STATISTIC(trace->reclaimSize += PoolGrainsSize(pool, reclaimedGrains));
/* preservedInPlaceCount is updated on fix */
preservedInPlaceSize = AMSGrainsSize(ams, amsseg->oldGrains);
GenDescSurvived(ams->pgen->gen, trace, 0, preservedInPlaceSize);
preservedInPlaceSize = PoolGrainsSize(pool, amsseg->oldGrains);
GenDescSurvived(pgen->gen, trace, 0, preservedInPlaceSize);
/* Ensure consistency of segment even if are just about to free it */
amsseg->colourTablesInUse = FALSE;
@ -1597,10 +1617,10 @@ static void amsSegReclaim(Seg seg, Trace trace)
if (amsseg->freeGrains == grains && !SegHasBuffer(seg)) {
/* No survivors */
AVER(amsseg->bufferedGrains == 0);
PoolGenFree(ams->pgen, seg,
AMSGrainsSize(ams, amsseg->freeGrains),
AMSGrainsSize(ams, amsseg->oldGrains),
AMSGrainsSize(ams, amsseg->newGrains),
PoolGenFree(pgen, seg,
PoolGrainsSize(pool, amsseg->freeGrains),
PoolGrainsSize(pool, amsseg->oldGrains),
PoolGrainsSize(pool, amsseg->newGrains),
FALSE);
}
}
@ -1640,7 +1660,7 @@ static void amsSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
/* either before the buffer, or after it, never in it */
AVER(object < BufferGetInit(buffer) || BufferLimit(buffer) <= object);
}
i = AMS_ADDR_INDEX(seg, object);
i = PoolIndexOfAddr(SegBase(seg), pool, object);
if (!AMS_ALLOCED(seg, i)) {
/* This grain is free */
object = AddrAdd(object, PoolAlignment(pool));
@ -1724,12 +1744,6 @@ static Res AMSDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
if (res != ResOK)
return res;
res = WriteF(stream, depth + 2,
"grain shift $U\n", (WriteFU)ams->grainShift,
NULL);
if (res != ResOK)
return res;
res = WriteF(stream, depth + 2,
"segments: * black + grey - white . alloc ! bad\n"
"buffers: [ base < scan limit | init > alloc ] limit\n",
@ -1764,6 +1778,7 @@ DEFINE_CLASS(Pool, AMSPool, klass)
klass->bufferClass = RankBufClassGet;
klass->bufferFill = AMSBufferFill;
klass->bufferEmpty = AMSBufferEmpty;
klass->segPoolGen = amsSegPoolGen;
klass->freewalk = AMSFreeWalk;
klass->totalSize = AMSTotalSize;
klass->freeSize = AMSFreeSize;
@ -1822,7 +1837,6 @@ Bool AMSCheck(AMS ams)
CHECKC(AMSPool, ams);
CHECKD(Pool, AMSPool(ams));
CHECKL(IsA(AMSPool, ams));
CHECKL(PoolAlignment(AMSPool(ams)) == AMSGrainsSize(ams, (Size)1));
CHECKL(PoolAlignment(AMSPool(ams)) == AMSPool(ams)->format->alignment);
if (ams->pgen != NULL) {
CHECKL(ams->pgen == &ams->pgenStruct);

View file

@ -41,7 +41,6 @@ typedef Res (*AMSSegSizePolicyFunction)(Size *sizeReturn,
typedef struct AMSStruct {
PoolStruct poolStruct; /* generic pool structure */
Shift grainShift; /* log2 of grain size */
PoolGenStruct pgenStruct; /* generation representing the pool */
PoolGen pgen; /* NULL or pointer to pgenStruct field */
Size size; /* total segment size of the pool */
@ -83,22 +82,6 @@ typedef struct AMSSegStruct {
#define AMSPool(ams) (&(ams)->poolStruct)
/* macros for abstracting index/address computations */
/* <design/poolams/#addr-index.slow> */
/* only use when size is a multiple of the grain size */
#define AMSGrains(ams, size) ((size) >> (ams)->grainShift)
#define AMSGrainsSize(ams, grains) ((grains) << (ams)->grainShift)
#define AMSSegShift(seg) (Seg2AMSSeg(seg)->ams->grainShift)
#define AMS_ADDR_INDEX(seg, addr) \
((Index)(AddrOffset(SegBase(seg), addr) >> AMSSegShift(seg)))
#define AMS_INDEX_ADDR(seg, index) \
AddrAdd(SegBase(seg), (Size)(index) << AMSSegShift(seg))
/* colour ops */
#define AMS_IS_WHITE(seg, index) \

View file

@ -714,6 +714,16 @@ static void AWLBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
}
/* awlSegPoolGen -- get pool generation for an AWL segment */
static PoolGen awlSegPoolGen(Pool pool, Seg seg)
{
AWL awl = MustBeA(AWLPool, pool);
AVERT(Seg, seg);
return awl->pgen;
}
/* awlSegWhiten -- segment condemning method */
/* awlSegRangeWhiten -- helper function that works on a range.
@ -734,7 +744,7 @@ static Res awlSegWhiten(Seg seg, Trace trace)
{
AWLSeg awlseg = MustBeA(AWLSeg, seg);
Pool pool = SegPool(seg);
AWL awl = MustBeA(AWLPool, pool);
PoolGen pgen = PoolSegPoolGen(pool, seg);
Buffer buffer;
Count agedGrains, uncondemnedGrains;
@ -768,14 +778,14 @@ static Res awlSegWhiten(Seg seg, Trace trace)
/* The unused part of the buffer remains buffered: the rest becomes old. */
AVER(awlseg->bufferedGrains >= uncondemnedGrains);
agedGrains = awlseg->bufferedGrains - uncondemnedGrains;
PoolGenAccountForAge(awl->pgen, PoolGrainsSize(pool, agedGrains),
PoolGenAccountForAge(pgen, PoolGrainsSize(pool, agedGrains),
PoolGrainsSize(pool, awlseg->newGrains), FALSE);
awlseg->oldGrains += agedGrains + awlseg->newGrains;
awlseg->bufferedGrains = uncondemnedGrains;
awlseg->newGrains = 0;
if (awlseg->oldGrains > 0) {
GenDescCondemned(awl->pgen->gen, trace,
GenDescCondemned(pgen->gen, trace,
PoolGrainsSize(pool, awlseg->oldGrains));
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
}
@ -1047,7 +1057,7 @@ static void awlSegReclaim(Seg seg, Trace trace)
{
AWLSeg awlseg = MustBeA(AWLSeg, seg);
Pool pool = SegPool(seg);
AWL awl = MustBeA(AWLPool, pool);
PoolGen pgen = PoolSegPoolGen(pool, seg);
Addr base = SegBase(seg);
Buffer buffer;
Bool hasBuffer = SegBuffer(&buffer, seg);
@ -1101,17 +1111,17 @@ static void awlSegReclaim(Seg seg, Trace trace)
AVER(awlseg->oldGrains >= reclaimedGrains);
awlseg->oldGrains -= reclaimedGrains;
awlseg->freeGrains += reclaimedGrains;
PoolGenAccountForReclaim(awl->pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE);
PoolGenAccountForReclaim(pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE);
STATISTIC(trace->reclaimSize += PoolGrainsSize(pool, reclaimedGrains));
STATISTIC(trace->preservedInPlaceCount += preservedInPlaceCount);
GenDescSurvived(awl->pgen->gen, trace, 0, preservedInPlaceSize);
GenDescSurvived(pgen->gen, trace, 0, preservedInPlaceSize);
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
if (awlseg->freeGrains == awlseg->grains && !hasBuffer) {
/* No survivors */
AVER(awlseg->bufferedGrains == 0);
PoolGenFree(awl->pgen, seg,
PoolGenFree(pgen, seg,
PoolGrainsSize(pool, awlseg->freeGrains),
PoolGrainsSize(pool, awlseg->oldGrains),
PoolGrainsSize(pool, awlseg->newGrains),
@ -1245,6 +1255,7 @@ DEFINE_CLASS(Pool, AWLPool, klass)
klass->bufferClass = RankBufClassGet;
klass->bufferFill = AWLBufferFill;
klass->bufferEmpty = AWLBufferEmpty;
klass->segPoolGen = awlSegPoolGen;
klass->totalSize = AWLTotalSize;
klass->freeSize = AWLFreeSize;
AVERT(PoolClass, klass);

View file

@ -288,7 +288,7 @@ static void loSegReclaim(Seg seg, Trace trace)
Count reclaimedGrains = (Count)0;
LOSeg loseg = MustBeA(LOSeg, seg);
Pool pool = SegPool(seg);
LO lo = MustBeA(LOPool, pool);
PoolGen pgen = PoolSegPoolGen(pool, seg);
Format format = NULL; /* supress "may be used uninitialized" warning */
Count preservedInPlaceCount = (Count)0;
Size preservedInPlaceSize = (Size)0;
@ -354,16 +354,16 @@ static void loSegReclaim(Seg seg, Trace trace)
AVER(loseg->oldGrains >= reclaimedGrains);
loseg->oldGrains -= reclaimedGrains;
loseg->freeGrains += reclaimedGrains;
PoolGenAccountForReclaim(lo->pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE);
PoolGenAccountForReclaim(pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE);
STATISTIC(trace->reclaimSize += PoolGrainsSize(pool, reclaimedGrains));
STATISTIC(trace->preservedInPlaceCount += preservedInPlaceCount);
GenDescSurvived(lo->pgen->gen, trace, 0, preservedInPlaceSize);
GenDescSurvived(pgen->gen, trace, 0, preservedInPlaceSize);
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
if (!marked) {
AVER(loseg->bufferedGrains == 0);
PoolGenFree(lo->pgen, seg,
PoolGenFree(pgen, seg,
PoolGrainsSize(pool, loseg->freeGrains),
PoolGrainsSize(pool, loseg->oldGrains),
PoolGrainsSize(pool, loseg->newGrains),
@ -639,13 +639,23 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
}
/* loSegPoolGen -- get pool generation for an LO segment */
static PoolGen loSegPoolGen(Pool pool, Seg seg)
{
LO lo = MustBeA(LOPool, pool);
AVERT(Seg, seg);
return lo->pgen;
}
/* loSegWhiten -- whiten a segment */
static Res loSegWhiten(Seg seg, Trace trace)
{
LOSeg loseg = MustBeA(LOSeg, seg);
Pool pool = SegPool(seg);
LO lo = MustBeA(LOPool, pool);
PoolGen pgen = PoolSegPoolGen(pool, seg);
Buffer buffer;
Count grains, agedGrains, uncondemnedGrains;
@ -672,14 +682,14 @@ static Res loSegWhiten(Seg seg, Trace trace)
/* The unused part of the buffer remains buffered: the rest becomes old. */
AVER(loseg->bufferedGrains >= uncondemnedGrains);
agedGrains = loseg->bufferedGrains - uncondemnedGrains;
PoolGenAccountForAge(lo->pgen, PoolGrainsSize(pool, agedGrains),
PoolGenAccountForAge(pgen, PoolGrainsSize(pool, agedGrains),
PoolGrainsSize(pool, loseg->newGrains), FALSE);
loseg->oldGrains += agedGrains + loseg->newGrains;
loseg->bufferedGrains = uncondemnedGrains;
loseg->newGrains = 0;
if (loseg->oldGrains > 0) {
GenDescCondemned(lo->pgen->gen, trace,
GenDescCondemned(pgen->gen, trace,
PoolGrainsSize(pool, loseg->oldGrains));
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
}
@ -774,6 +784,7 @@ DEFINE_CLASS(Pool, LOPool, klass)
klass->init = LOInit;
klass->bufferFill = LOBufferFill;
klass->bufferEmpty = LOBufferEmpty;
klass->segPoolGen = loSegPoolGen;
klass->totalSize = LOTotalSize;
klass->freeSize = LOFreeSize;
AVERT(PoolClass, klass);

View file

@ -381,23 +381,14 @@ static Bool AMSSegIsFree(Seg seg)
static Bool AMSSegRegionIsFree(Seg seg, Addr base, Addr limit)
{
AMSSeg amsseg;
AMS ams;
Count bgrain, lgrain;
Addr sbase;
AVERT(Seg, seg);
amsseg = Seg2AMSSeg(seg);
sbase = SegBase(seg);
ams = PoolAMS(SegPool(seg));
bgrain = AMSGrains(ams, AddrOffset(sbase, base));
lgrain = AMSGrains(ams, AddrOffset(sbase, limit));
AMSSeg amsseg = MustBeA(AMSSeg, seg);
Index baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base);
if (amsseg->allocTableInUse) {
return BTIsResRange(amsseg->allocTable, bgrain, lgrain);
Index limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit);
return BTIsResRange(amsseg->allocTable, baseIndex, limitIndex);
} else {
return amsseg->firstFree <= bgrain;
return amsseg->firstFree <= baseIndex;
}
}
@ -416,8 +407,8 @@ static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit)
amsseg = Seg2AMSSeg(seg);
baseIndex = AMS_ADDR_INDEX(seg, base);
limitIndex = AMS_ADDR_INDEX(seg, limit);
baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base);
limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit);
if (amsseg->allocTableInUse) {
/* check that it's allocated */
@ -441,7 +432,8 @@ static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit)
AVER(amsseg->bufferedGrains >= unallocatedGrains);
amsseg->freeGrains += unallocatedGrains;
amsseg->bufferedGrains -= unallocatedGrains;
PoolGenAccountForEmpty(ams->pgen, 0, AMSGrainsSize(ams, unallocatedGrains),
PoolGenAccountForEmpty(ams->pgen, 0,
PoolGrainsSize(AMSPool(ams), unallocatedGrains),
FALSE);
}
@ -460,8 +452,8 @@ static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit)
amsseg = Seg2AMSSeg(seg);
baseIndex = AMS_ADDR_INDEX(seg, base);
limitIndex = AMS_ADDR_INDEX(seg, limit);
baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base);
limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit);
if (amsseg->allocTableInUse) {
/* check that it's not allocated */