1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-04-27 00:32:12 -07:00

Catch-up merge from master sources @186481 to branch/2014-04-15/mvffnoseg.

Copied from Perforce
 Change: 186484
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Gareth Rees 2014-06-11 12:42:58 +01:00
commit 365e431f21
56 changed files with 1564 additions and 1036 deletions

View file

@ -105,7 +105,7 @@ static mps_addr_t make(void)
/* test -- the actual stress test */
static mps_pool_debug_option_s freecheckOptions =
{ NULL, 0, (const void *)"Dead", 4 };
{ NULL, 0, "Dead", 4 };
static void test_pool(mps_class_t pool_class, mps_arg_s args[],
mps_bool_t haveAmbiguous)

View file

@ -43,28 +43,25 @@ static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size)
/* stress -- create a pool of the requested type and allocate in it */
static mps_res_t stress(mps_class_t class, size_t (*size)(size_t i),
mps_arena_t arena, ...)
static mps_res_t stress(mps_arena_t arena, mps_align_t align,
size_t (*size)(size_t i, mps_align_t align),
const char *name, mps_class_t class, mps_arg_s args[])
{
mps_res_t res = MPS_RES_OK;
mps_pool_t pool;
mps_ap_t ap;
va_list arg;
size_t i, k;
int *ps[testSetSIZE];
size_t ss[testSetSIZE];
va_start(arg, arena);
res = mps_pool_create_v(&pool, arena, class, arg);
va_end(arg);
if (res != MPS_RES_OK)
return res;
printf("stress %s\n", name);
die(mps_pool_create_k(&pool, arena, class, args), "pool_create");
die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate");
/* allocate a load of objects */
for (i=0; i<testSetSIZE; ++i) {
ss[i] = (*size)(i);
ss[i] = (*size)(i, align);
res = make((mps_addr_t *)&ps[i], ap, ss[i]);
if (res != MPS_RES_OK)
@ -96,7 +93,7 @@ static mps_res_t stress(mps_class_t class, size_t (*size)(size_t i),
}
/* allocate some new objects */
for (i=testSetSIZE/2; i<testSetSIZE; ++i) {
ss[i] = (*size)(i);
ss[i] = (*size)(i, align);
res = make((mps_addr_t *)&ps[i], ap, ss[i]);
if (res != MPS_RES_OK)
goto allocFail;
@ -111,63 +108,72 @@ allocFail:
}
/* randomSizeAligned -- produce sizes both large and small,
* aligned by platform alignment */
/* randomSizeAligned -- produce sizes both large and small, aligned to
* align.
*/
static size_t randomSizeAligned(size_t i)
static size_t randomSizeAligned(size_t i, mps_align_t align)
{
size_t maxSize = 2 * 160 * 0x2000;
/* Reduce by a factor of 2 every 10 cycles. Total allocation about 40 MB. */
return alignUp(rnd() % max((maxSize >> (i / 10)), 2) + 1, MPS_PF_ALIGN);
return alignUp(rnd() % max((maxSize >> (i / 10)), 2) + 1, align);
}
static mps_pool_debug_option_s bothOptions = {
/* .fence_template = */ (const void *)"postpostpostpost",
/* .fence_size = */ MPS_PF_ALIGN,
/* .free_template = */ (const void *)"DEAD",
/* .fence_template = */ "post",
/* .fence_size = */ 4,
/* .free_template = */ "DEAD",
/* .free_size = */ 4
};
static mps_pool_debug_option_s fenceOptions = {
/* .fence_template = */ (const void *)"\0XXX ''\"\"'' XXX\0",
/* .fence_size = */ 16,
/* .fence_template = */ "123456789abcdef",
/* .fence_size = */ 15,
/* .free_template = */ NULL,
/* .free_size = */ 0
};
/* testInArena -- test all the pool classes in the given arena */
static void testInArena(mps_arena_t arena, mps_pool_debug_option_s *options)
{
mps_res_t res;
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE);
die(stress(arena, align, randomSizeAligned, "MVFF", mps_class_mvff(), args),
"stress MVFF");
} MPS_ARGS_END(args);
/* IWBN to test MVFFDebug, but the MPS doesn't support debugging APs, */
/* yet (MV Debug works here, because it fakes it through PoolAlloc). */
printf("MVFF\n");
res = stress(mps_class_mvff(), randomSizeAligned, arena,
(size_t)65536, (size_t)32, (mps_align_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE);
if (res == MPS_RES_COMMIT_LIMIT) return;
die(res, "stress MVFF");
printf("MV debug\n");
res = stress(mps_class_mv_debug(), randomSizeAligned, arena,
options, (size_t)65536, (size_t)32, (size_t)65536);
if (res == MPS_RES_COMMIT_LIMIT) return;
die(res, "stress MV debug");
MPS_ARGS_BEGIN(args) {
mps_align_t align = (mps_align_t)1 << (rnd() % 6);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
die(stress(arena, align, randomSizeAligned, "MV", mps_class_mv(), args),
"stress MV");
} MPS_ARGS_END(args);
printf("MV\n");
res = stress(mps_class_mv(), randomSizeAligned, arena,
(size_t)65536, (size_t)32, (size_t)65536);
if (res == MPS_RES_COMMIT_LIMIT) return;
die(res, "stress MV");
MPS_ARGS_BEGIN(args) {
mps_align_t align = (mps_align_t)1 << (rnd() % 6);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, options);
die(stress(arena, align, randomSizeAligned, "MV debug",
mps_class_mv_debug(), args),
"stress MV debug");
} MPS_ARGS_END(args);
printf("MVT\n");
res = stress(mps_class_mvt(), randomSizeAligned, arena,
(size_t)8, (size_t)32, (size_t)65536, (mps_word_t)4,
(mps_word_t)50);
if (res == MPS_RES_COMMIT_LIMIT) return;
die(res, "stress MVT");
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
die(stress(arena, align, randomSizeAligned, "MVT", mps_class_mvt(), args),
"stress MVT");
} MPS_ARGS_END(args);
}

View file

@ -204,8 +204,6 @@ static Res BufferInit(Buffer buffer, BufferClass class,
AVER(buffer != NULL);
AVERT(BufferClass, class);
AVERT(Pool, pool);
/* The PoolClass should support buffer protocols */
AVER(PoolHasAttr(pool, AttrBUF));
arena = PoolArena(pool);
/* Initialize the buffer. See <code/mpmst.h> for a definition of */
@ -382,8 +380,6 @@ void BufferFinish(Buffer buffer)
pool = BufferPool(buffer);
/* The PoolClass should support buffer protocols */
AVER(PoolHasAttr(pool, AttrBUF));
AVER(BufferIsReady(buffer));
/* <design/alloc-frame/#lw-frame.sync.trip> */

View file

@ -1,7 +1,7 @@
/* chain.h: GENERATION CHAINS
*
* $Id$
* Copyright (c) 2001 Ravenbrook Limited. See end of file for license.
* Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
*/
#ifndef chain_h
@ -31,7 +31,6 @@ typedef struct GenDescStruct {
ZoneSet zones; /* zoneset for this generation */
Size capacity; /* capacity in kB */
double mortality;
double proflow; /* predicted proportion of survivors promoted */
RingStruct locusRing; /* Ring of all PoolGen's in this GenDesc (locus) */
} GenDescStruct;
@ -48,14 +47,15 @@ typedef struct PoolGenStruct {
GenDesc gen; /* generation this belongs to */
/* link in ring of all PoolGen's in this GenDesc (locus) */
RingStruct genRing;
Size totalSize; /* total size of segs in gen in this pool */
Size newSize; /* size allocated since last GC */
/* newSize when TraceCreate was called. This is used in the
* TraceStartPoolGen event emitted at the start of a trace; at that
* time, newSize has already been diminished by Whiten so we can't
* use that value. TODO: This will not work well with multiple
* traces. */
Size newSizeAtCreate;
/* Accounting of memory in this generation for this pool */
STATISTIC_DECL(Size segs); /* number of segments */
Size totalSize; /* total (sum of segment sizes) */
STATISTIC_DECL(Size freeSize); /* unused (free or lost to fragmentation) */
Size newSize; /* allocated since last collection */
STATISTIC_DECL(Size oldSize); /* allocated prior to last collection */
Size newDeferredSize; /* new (but deferred) */
STATISTIC_DECL(Size oldDeferredSize); /* old (but deferred) */
} PoolGenStruct;
@ -90,13 +90,22 @@ extern Res PoolGenInit(PoolGen pgen, GenDesc gen, Pool pool);
extern void PoolGenFinish(PoolGen pgen);
extern Res PoolGenAlloc(Seg *segReturn, PoolGen pgen, SegClass class,
Size size, Bool withReservoirPermit, ArgList args);
extern void PoolGenFree(PoolGen pgen, Seg seg, Size freeSize, Size oldSize,
Size newSize, Bool deferred);
extern void PoolGenAccountForFill(PoolGen pgen, Size size, Bool deferred);
extern void PoolGenAccountForEmpty(PoolGen pgen, Size unused, Bool deferred);
extern void PoolGenAccountForAge(PoolGen pgen, Size aged, Bool deferred);
extern void PoolGenAccountForReclaim(PoolGen pgen, Size reclaimed, Bool deferred);
extern void PoolGenUndefer(PoolGen pgen, Size oldSize, Size newSize);
extern void PoolGenAccountForSegSplit(PoolGen pgen);
extern void PoolGenAccountForSegMerge(PoolGen pgen);
#endif /* chain_h */
/* C. COPYRIGHT AND LICENSE
*
* Copyright (C) 2001-2002 Ravenbrook Limited <http://www.ravenbrook.com/>.
* Copyright (C) 2001-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*

View file

@ -157,7 +157,7 @@ $(PFM)\$(VARIETY)\cvmicv.exe: $(PFM)\$(VARIETY)\cvmicv.obj \
$(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ)
$(PFM)\$(VARIETY)\djbench.exe: $(PFM)\$(VARIETY)\djbench.obj \
$(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(TESTTHROBJ)
$(TESTLIBOBJ) $(TESTTHROBJ)
$(PFM)\$(VARIETY)\exposet0.exe: $(PFM)\$(VARIETY)\exposet0.obj \
$(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ)
@ -175,7 +175,7 @@ $(PFM)\$(VARIETY)\fotest.exe: $(PFM)\$(VARIETY)\fotest.obj \
$(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ)
$(PFM)\$(VARIETY)\gcbench.exe: $(PFM)\$(VARIETY)\gcbench.obj \
$(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ)
$(FMTTESTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ)
$(PFM)\$(VARIETY)\landtest.exe: $(PFM)\$(VARIETY)\landtest.obj \
$(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ)

View file

@ -359,6 +359,7 @@
/* Pool MV Configuration -- see <code/poolmv.c> */
#define MV_ALIGN_DEFAULT MPS_PF_ALIGN
#define MV_EXTEND_BY_DEFAULT ((Size)65536)
#define MV_AVG_SIZE_DEFAULT ((Size)32)
#define MV_MAX_SIZE_DEFAULT ((Size)65536)

View file

@ -123,10 +123,14 @@ Bool PoolDebugOptionsCheck(PoolDebugOptions opt)
ARG_DEFINE_KEY(pool_debug_options, PoolDebugOptions);
static PoolDebugOptionsStruct debugPoolOptionsDefault = {
"POST", 4, "DEAD", 4,
};
static Res DebugPoolInit(Pool pool, ArgList args)
{
Res res;
PoolDebugOptions options;
PoolDebugOptions options = &debugPoolOptionsDefault;
PoolDebugMixin debug;
TagInitMethod tagInit;
Size tagSize;
@ -134,10 +138,8 @@ static Res DebugPoolInit(Pool pool, ArgList args)
AVERT(Pool, pool);
/* TODO: Split this structure into separate keyword arguments,
now that we can support them. */
ArgRequire(&arg, args, MPS_KEY_POOL_DEBUG_OPTIONS);
options = (PoolDebugOptions)arg.val.pool_debug_options;
if (ArgPick(&arg, args, MPS_KEY_POOL_DEBUG_OPTIONS))
options = (PoolDebugOptions)arg.val.pool_debug_options;
AVERT(PoolDebugOptions, options);
@ -158,10 +160,6 @@ static Res DebugPoolInit(Pool pool, ArgList args)
/* into Addr memory, to avoid breaking <design/type/#addr.use>. */
debug->fenceSize = options->fenceSize;
if (debug->fenceSize != 0) {
if (debug->fenceSize % PoolAlignment(pool) != 0) {
res = ResPARAM;
goto alignFail;
}
/* Fenceposting turns on tagging */
if (tagInit == NULL) {
tagSize = 0;
@ -176,10 +174,6 @@ static Res DebugPoolInit(Pool pool, ArgList args)
/* into Addr memory, to avoid breaking <design/type#addr.use>. */
debug->freeSize = options->freeSize;
if (debug->freeSize != 0) {
if (PoolAlignment(pool) % debug->freeSize != 0) {
res = ResPARAM;
goto alignFail;
}
debug->freeTemplate = options->freeTemplate;
}
@ -208,7 +202,6 @@ static Res DebugPoolInit(Pool pool, ArgList args)
return ResOK;
tagFail:
alignFail:
SuperclassOfPool(pool)->finish(pool);
AVER(res != ResOK);
return res;
@ -234,39 +227,150 @@ static void DebugPoolFinish(Pool pool)
}
/* freeSplat -- splat free block with splat pattern
/* patternIterate -- call visitor for occurrences of pattern between
* base and limit
*
* If base is in a segment, the whole block has to be in it.
* pattern is an arbitrary pattern that's size bytes long.
*
* Imagine that the entirety of memory were covered by contiguous
* copies of pattern starting at address 0. Then call visitor for each
* copy (or part) of pattern that lies between base and limit. In each
* call, target is the address of the copy or part (where base <=
* target < limit); source is the corresponding byte of the pattern
* (where pattern <= source < pattern + size); and size is the length
* of the copy or part.
*/
typedef Bool (*patternVisitor)(Addr target, ReadonlyAddr source, Size size);
static Bool patternIterate(ReadonlyAddr pattern, Size size,
Addr base, Addr limit, patternVisitor visitor)
{
Addr p;
AVER(pattern != NULL);
AVER(0 < size);
AVER(base != NULL);
AVER(base <= limit);
p = base;
while (p < limit) {
Addr end = AddrAdd(p, size);
Addr rounded = AddrRoundUp(p, size);
Size offset = (Word)p % size;
if (end < p || rounded < p) {
/* Address range overflow */
break;
} else if (p == rounded && end <= limit) {
/* Room for a whole copy */
if (!(*visitor)(p, pattern, size))
return FALSE;
p = end;
} else if (p < rounded && rounded <= end && rounded <= limit) {
/* Copy up to rounded */
if (!(*visitor)(p, ReadonlyAddrAdd(pattern, offset),
AddrOffset(p, rounded)))
return FALSE;
p = rounded;
} else {
/* Copy up to limit */
AVER(limit <= end && (p == rounded || limit <= rounded));
if (!(*visitor)(p, ReadonlyAddrAdd(pattern, offset),
AddrOffset(p, limit)))
return FALSE;
p = limit;
}
}
return TRUE;
}
/* patternCopy -- copy pattern to fill a range
*
* Fill the range of addresses from base (inclusive) to limit
* (exclusive) with copies of pattern (which is size bytes long).
*/
static Bool patternCopyVisitor(Addr target, ReadonlyAddr source, Size size)
{
(void)AddrCopy(target, source, size);
return TRUE;
}
static void patternCopy(ReadonlyAddr pattern, Size size, Addr base, Addr limit)
{
(void)patternIterate(pattern, size, base, limit, patternCopyVisitor);
}
/* patternCheck -- check pattern against a range
*
* Compare the range of addresses from base (inclusive) to limit
* (exclusive) with copies of pattern (which is size bytes long). The
* copies of pattern must be arranged so that fresh copies start at
* aligned addresses wherever possible.
*/
static Bool patternCheckVisitor(Addr target, ReadonlyAddr source, Size size)
{
return AddrComp(target, source, size) == 0;
}
static Bool patternCheck(ReadonlyAddr pattern, Size size, Addr base, Addr limit)
{
return patternIterate(pattern, size, base, limit, patternCheckVisitor);
}
/* debugPoolSegIterate -- iterate over a range of segments in an arena
*
* Expects to be called on a range corresponding to objects withing a
* single pool.
*
* NOTE: This relies on pools consistently using segments
* contiguously.
*/
static void debugPoolSegIterate(Arena arena, Addr base, Addr limit,
void (*visitor)(Arena, Seg))
{
Seg seg;
if (SegOfAddr(&seg, arena, base)) {
do {
base = SegLimit(seg);
(*visitor)(arena, seg);
} while (base < limit && SegOfAddr(&seg, arena, base));
AVER(base >= limit); /* shouldn't run out of segments */
}
}
static void debugPoolShieldExpose(Arena arena, Seg seg)
{
ShieldExpose(arena, seg);
}
static void debugPoolShieldCover(Arena arena, Seg seg)
{
ShieldCover(arena, seg);
}
/* freeSplat -- splat free block with splat pattern */
static void freeSplat(PoolDebugMixin debug, Pool pool, Addr base, Addr limit)
{
Addr p, next;
Size freeSize = debug->freeSize;
Arena arena;
Seg seg = NULL; /* suppress "may be used uninitialized" */
Bool inSeg;
AVER(base < limit);
/* If the block is in a segment, make sure any shield is up. */
/* If the block is in one or more segments, make sure the segments
are exposed so that we can overwrite the block with the pattern. */
arena = PoolArena(pool);
inSeg = SegOfAddr(&seg, arena, base);
if (inSeg) {
AVER(limit <= SegLimit(seg));
ShieldExpose(arena, seg);
}
/* Write as many copies of the template as fit in the block. */
for (p = base, next = AddrAdd(p, freeSize);
next <= limit && p < next /* watch out for overflow in next */;
p = next, next = AddrAdd(next, freeSize))
(void)AddrCopy(p, debug->freeTemplate, freeSize);
/* Fill the tail of the block with a partial copy of the template. */
if (next > limit || next < p)
(void)AddrCopy(p, debug->freeTemplate, AddrOffset(p, limit));
if (inSeg) {
ShieldCover(arena, seg);
}
debugPoolSegIterate(arena, base, limit, debugPoolShieldExpose);
patternCopy(debug->freeTemplate, debug->freeSize, base, limit);
debugPoolSegIterate(arena, base, limit, debugPoolShieldCover);
}
@ -274,41 +378,17 @@ static void freeSplat(PoolDebugMixin debug, Pool pool, Addr base, Addr limit)
static Bool freeCheck(PoolDebugMixin debug, Pool pool, Addr base, Addr limit)
{
Addr p, next;
Size freeSize = debug->freeSize;
Res res;
Bool res;
Arena arena;
Seg seg = NULL; /* suppress "may be used uninitialized" */
Bool inSeg;
AVER(base < limit);
/* If the block is in a segment, make sure any shield is up. */
/* If the block is in one or more segments, make sure the segments
are exposed so we can read the pattern. */
arena = PoolArena(pool);
inSeg = SegOfAddr(&seg, arena, base);
if (inSeg) {
AVER(limit <= SegLimit(seg));
ShieldExpose(arena, seg);
}
/* Compare this to the AddrCopys in freeSplat. */
/* Check the complete copies of the template in the block. */
for (p = base, next = AddrAdd(p, freeSize);
next <= limit && p < next /* watch out for overflow in next */;
p = next, next = AddrAdd(next, freeSize))
if (AddrComp(p, debug->freeTemplate, freeSize) != 0) {
res = FALSE; goto done;
}
/* Check the partial copy of the template at the tail of the block. */
if (next > limit || next < p)
if (AddrComp(p, debug->freeTemplate, AddrOffset(p, limit)) != 0) {
res = FALSE; goto done;
}
res = TRUE;
done:
if (inSeg) {
ShieldCover(arena, seg);
}
debugPoolSegIterate(arena, base, limit, debugPoolShieldExpose);
res = patternCheck(debug->freeTemplate, debug->freeSize, base, limit);
debugPoolSegIterate(arena, base, limit, debugPoolShieldCover);
return res;
}
@ -354,63 +434,75 @@ static void freeCheckFree(PoolDebugMixin debug,
* start fp client object slop end fp
*
* slop is the extra allocation from rounding up the client request to
* the pool's alignment. The fenceposting code does this, so there's a
* better chance of the end fencepost being flush with the next object
* (can't be guaranteed, since the underlying pool could have allocated
* an even larger block). The alignment slop is filled from the
* fencepost template as well (as much as fits, .fence.size guarantees
* the template is larger).
* the pool's alignment. The fenceposting code adds this slop so that
* there's a better chance of the end fencepost being flush with the
* next object (though it can't be guaranteed, since the underlying
* pool could have allocated an even larger block). The alignment slop
* is filled from the fencepost template as well.
*
* Keep in sync with fenceCheck.
*/
static Res fenceAlloc(Addr *aReturn, PoolDebugMixin debug, Pool pool,
Size size, Bool withReservoir)
{
Res res;
Addr new, clientNew;
Size alignedSize;
Addr obj, startFence, clientNew, clientLimit, limit;
Size alignedFenceSize, alignedSize;
AVER(aReturn != NULL);
AVERT(PoolDebugMixin, debug);
AVERT(Pool, pool);
alignedFenceSize = SizeAlignUp(debug->fenceSize, PoolAlignment(pool));
alignedSize = SizeAlignUp(size, PoolAlignment(pool));
res = freeCheckAlloc(&new, debug, pool, alignedSize + 2*debug->fenceSize,
res = freeCheckAlloc(&obj, debug, pool,
alignedSize + 2 * alignedFenceSize,
withReservoir);
if (res != ResOK)
return res;
clientNew = AddrAdd(new, debug->fenceSize);
startFence = obj;
clientNew = AddrAdd(startFence, alignedFenceSize);
clientLimit = AddrAdd(clientNew, size);
limit = AddrAdd(clientNew, alignedSize + alignedFenceSize);
/* @@@@ shields? */
/* start fencepost */
(void)AddrCopy(new, debug->fenceTemplate, debug->fenceSize);
/* alignment slop */
(void)AddrCopy(AddrAdd(clientNew, size),
debug->fenceTemplate, alignedSize - size);
/* end fencepost */
(void)AddrCopy(AddrAdd(clientNew, alignedSize),
debug->fenceTemplate, debug->fenceSize);
patternCopy(debug->fenceTemplate, debug->fenceSize, startFence, clientNew);
patternCopy(debug->fenceTemplate, debug->fenceSize, clientLimit, limit);
*aReturn = clientNew;
return res;
return ResOK;
}
/* fenceCheck -- check fences of an object */
/* fenceCheck -- check fences of an object
*
* Keep in sync with fenceAlloc.
*/
static Bool fenceCheck(PoolDebugMixin debug, Pool pool, Addr obj, Size size)
{
Size alignedSize;
Addr startFence, clientNew, clientLimit, limit;
Size alignedFenceSize, alignedSize;
AVERT_CRITICAL(PoolDebugMixin, debug);
AVERT_CRITICAL(Pool, pool);
/* Can't check obj */
alignedFenceSize = SizeAlignUp(debug->fenceSize, PoolAlignment(pool));
alignedSize = SizeAlignUp(size, PoolAlignment(pool));
startFence = AddrSub(obj, alignedFenceSize);
clientNew = obj;
clientLimit = AddrAdd(clientNew, size);
limit = AddrAdd(clientNew, alignedSize + alignedFenceSize);
/* @@@@ shields? */
/* Compare this to the AddrCopys in fenceAlloc */
return (AddrComp(AddrSub(obj, debug->fenceSize), debug->fenceTemplate,
debug->fenceSize) == 0
&& AddrComp(AddrAdd(obj, size), debug->fenceTemplate,
alignedSize - size) == 0
&& AddrComp(AddrAdd(obj, alignedSize), debug->fenceTemplate,
debug->fenceSize) == 0);
return patternCheck(debug->fenceTemplate, debug->fenceSize,
startFence, clientNew)
&& patternCheck(debug->fenceTemplate, debug->fenceSize,
clientLimit, limit);
}
@ -419,13 +511,14 @@ static Bool fenceCheck(PoolDebugMixin debug, Pool pool, Addr obj, Size size)
static void fenceFree(PoolDebugMixin debug,
Pool pool, Addr old, Size size)
{
Size alignedSize;
Size alignedFenceSize, alignedSize;
ASSERT(fenceCheck(debug, pool, old, size), "fencepost check on free");
alignedFenceSize = SizeAlignUp(debug->fenceSize, PoolAlignment(pool));
alignedSize = SizeAlignUp(size, PoolAlignment(pool));
freeCheckFree(debug, pool, AddrSub(old, debug->fenceSize),
alignedSize + 2*debug->fenceSize);
freeCheckFree(debug, pool, AddrSub(old, alignedFenceSize),
alignedSize + 2 * alignedFenceSize);
}

View file

@ -26,9 +26,9 @@ typedef void (*TagInitMethod)(void* tag, va_list args);
*/
typedef struct PoolDebugOptionsStruct {
void* fenceTemplate;
const void *fenceTemplate;
Size fenceSize;
void* freeTemplate;
const void *freeTemplate;
Size freeSize;
/* TagInitMethod tagInit; */
/* Size tagSize; */
@ -43,9 +43,9 @@ typedef PoolDebugOptionsStruct *PoolDebugOptions;
typedef struct PoolDebugMixinStruct {
Sig sig;
Addr fenceTemplate;
const struct AddrStruct *fenceTemplate;
Size fenceSize;
Addr freeTemplate;
const struct AddrStruct *freeTemplate;
Size freeSize;
TagInitMethod tagInit;
Size tagSize;

View file

@ -187,7 +187,7 @@
EVENT(X, VMCompact , 0x0079, TRUE, Arena) \
EVENT(X, amcScanNailed , 0x0080, TRUE, Seg) \
EVENT(X, AMCTraceEnd , 0x0081, TRUE, Trace) \
EVENT(X, TraceStartPoolGen , 0x0082, TRUE, Trace) \
EVENT(X, TraceCreatePoolGen , 0x0082, TRUE, Trace) \
/* new events for performance analysis of large heaps. */ \
EVENT(X, TraceCondemnZones , 0x0083, TRUE, Trace) \
EVENT(X, ArenaGenZoneAdd , 0x0084, TRUE, Arena) \
@ -713,17 +713,18 @@
PARAM(X, 19, W, pRL) \
PARAM(X, 20, W, pRLr)
#define EVENT_TraceStartPoolGen_PARAMS(PARAM, X) \
PARAM(X, 0, P, chain) /* chain (or NULL for topGen) */ \
PARAM(X, 1, B, top) /* 1 for topGen, 0 otherwise */ \
PARAM(X, 2, W, index) /* index of generation in the chain */ \
PARAM(X, 3, P, gendesc) /* generation description */ \
PARAM(X, 4, W, capacity) /* capacity of generation */ \
PARAM(X, 5, D, mortality) /* mortality of generation */ \
PARAM(X, 6, W, zone) /* zone set of generation */ \
PARAM(X, 7, P, pool) /* pool */ \
PARAM(X, 8, W, totalSize) /* total size of pool gen */ \
PARAM(X, 9, W, newSizeAtCreate) /* new size of pool gen at trace create */
#define EVENT_TraceCreatePoolGen_PARAMS(PARAM, X) \
PARAM(X, 0, P, gendesc) /* generation description */ \
PARAM(X, 1, W, capacity) /* capacity of generation */ \
PARAM(X, 2, D, mortality) /* mortality of generation */ \
PARAM(X, 3, W, zone) /* zone set of generation */ \
PARAM(X, 4, P, pool) /* pool */ \
PARAM(X, 5, W, totalSize) /* total size of pool gen */ \
PARAM(X, 6, W, freeSize) /* free size of pool gen */ \
PARAM(X, 7, W, newSize) /* new size of pool gen */ \
PARAM(X, 8, W, oldSize) /* old size of pool gen */ \
PARAM(X, 9, W, newDeferredSize) /* new size (deferred) of pool gen */ \
PARAM(X, 10, W, oldDeferredSize) /* old size (deferred) of pool gen */
#define EVENT_TraceCondemnZones_PARAMS(PARAM, X) \
PARAM(X, 0, P, trace) /* the trace */ \

View file

@ -255,8 +255,8 @@ static void test_mode(int mode, mps_arena_t arena, mps_chain_t chain)
test_pool(mode, arena, chain, mps_class_amc());
test_pool(mode, arena, chain, mps_class_amcz());
test_pool(mode, arena, chain, mps_class_ams());
/* test_pool(mode, arena, chain, mps_class_lo()); TODO: job003773 */
/* test_pool(mode, arena, chain, mps_class_awl()); TODO: job003772 */
test_pool(mode, arena, chain, mps_class_awl());
test_pool(mode, arena, chain, mps_class_lo());
}

View file

@ -43,7 +43,7 @@ extern Land _mps_mvt_cbs(Pool);
/* "OOM" pool class -- dummy alloc/free pool class whose alloc()
* method always fails. */
* method always fails and whose free method does nothing. */
static Res oomAlloc(Addr *pReturn, Pool pool, Size size,
Bool withReservoirPermit)
@ -65,8 +65,9 @@ static Res oomAlloc(Addr *pReturn, Pool pool, Size size,
extern PoolClass OOMPoolClassGet(void);
DEFINE_POOL_CLASS(OOMPoolClass, this)
{
INHERIT_CLASS(this, AbstractAllocFreePoolClass);
INHERIT_CLASS(this, AbstractPoolClass);
this->alloc = oomAlloc;
this->free = PoolTrivFree;
this->size = sizeof(PoolStruct);
AVERT(PoolClass, this);
}

View file

@ -41,13 +41,6 @@ typedef union FreelistBlockUnion {
#define freelistEND ((FreelistBlock)0)
/* freelistMinimumAlignment -- the minimum allowed alignment for the
* address ranges in a free list: see <design/freelist/#impl.grain.align>
*/
#define freelistMinimumAlignment ((Align)sizeof(FreelistBlock))
/* FreelistTag -- return the tag of word */
#define FreelistTag(word) ((word) & 1)
@ -181,7 +174,7 @@ Bool FreelistCheck(Freelist fl)
land = FreelistLand(fl);
CHECKD(Land, land);
/* See <design/freelist/#impl.grain.align> */
CHECKL(AlignIsAligned(freelistAlignment(fl), freelistMinimumAlignment));
CHECKL(AlignIsAligned(freelistAlignment(fl), FreelistMinimumAlignment));
CHECKL((fl->list == freelistEND) == (fl->listSize == 0));
CHECKL((fl->list == freelistEND) == (fl->size == 0));
CHECKL(SizeIsAligned(fl->size, freelistAlignment(fl)));
@ -203,7 +196,7 @@ static Res freelistInit(Land land, ArgList args)
return res;
/* See <design/freelist/#impl.grain> */
AVER(AlignIsAligned(LandAlignment(land), freelistMinimumAlignment));
AVER(AlignIsAligned(LandAlignment(land), FreelistMinimumAlignment));
fl = freelistOfLand(land);
fl->list = freelistEND;
@ -448,7 +441,7 @@ static Bool freelistIterate(Land land, LandVisitor visitor,
void *closureP, Size closureS)
{
Freelist fl;
FreelistBlock cur;
FreelistBlock cur, next;
AVERT(Land, land);
fl = freelistOfLand(land);
@ -456,9 +449,12 @@ static Bool freelistIterate(Land land, LandVisitor visitor,
AVER(FUNCHECK(visitor));
/* closureP and closureS are arbitrary */
for (cur = fl->list; cur != freelistEND; cur = FreelistBlockNext(cur)) {
for (cur = fl->list; cur != freelistEND; cur = next) {
RangeStruct range;
Bool cont;
/* .next.first: Take next before calling the visitor, in case the
* visitor touches the block. */
next = FreelistBlockNext(cur);
RangeInit(&range, FreelistBlockBase(cur), FreelistBlockLimit(fl, cur));
cont = (*visitor)(land, &range, closureP, closureS);
if (!cont)
@ -486,20 +482,21 @@ static Bool freelistIterateAndDelete(Land land, LandDeleteVisitor visitor,
Bool delete = FALSE;
RangeStruct range;
Bool cont;
Size size;
next = FreelistBlockNext(cur); /* See .next.first. */
size = FreelistBlockSize(fl, cur);
RangeInit(&range, FreelistBlockBase(cur), FreelistBlockLimit(fl, cur));
cont = (*visitor)(&delete, land, &range, closureP, closureS);
next = FreelistBlockNext(cur);
if (delete) {
Size size = FreelistBlockSize(fl, cur);
freelistBlockSetPrevNext(fl, prev, next, -1);
AVER(fl->size >= size);
fl->size -= size;
} else {
prev = cur;
}
cur = next;
if (!cont)
return FALSE;
cur = next;
}
return TRUE;
}

View file

@ -1,7 +1,7 @@
/* freelist.h: FREE LIST ALLOCATOR INTERFACE
*
* $Id$
* Copyright (c) 2013 Ravenbrook Limited. See end of file for license.
* Copyright (c) 2013-2014 Ravenbrook Limited. See end of file for license.
*
* .source: <design/freelist/>.
*/
@ -17,6 +17,9 @@ typedef struct FreelistStruct *Freelist;
extern Bool FreelistCheck(Freelist freelist);
/* See <design/freelist/#impl.grain.align> */
#define FreelistMinimumAlignment ((Align)sizeof(FreelistBlock))
extern LandClass FreelistLandClassGet(void);
#endif /* freelist.h */
@ -24,7 +27,7 @@ extern LandClass FreelistLandClassGet(void);
/* C. COPYRIGHT AND LICENSE
*
* Copyright (C) 2013 Ravenbrook Limited <http://www.ravenbrook.com/>.
* Copyright (C) 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*

View file

@ -6,7 +6,8 @@
* DESIGN
*
* See <design/arenavm/> and <design/locus/> for basic locus stuff.
* See <design/trace/> for chains.
* See <design/trace/> for chains. See <design/strategy/> for the
* collection strategy.
*/
#include "chain.h"
@ -88,8 +89,6 @@ static Bool GenDescCheck(GenDesc gen)
/* nothing to check for capacity */
CHECKL(gen->mortality >= 0.0);
CHECKL(gen->mortality <= 1.0);
CHECKL(gen->proflow >= 0.0);
CHECKL(gen->proflow <= 1.0);
CHECKD_NOSIG(Ring, &gen->locusRing);
return TRUE;
}
@ -157,9 +156,9 @@ Res ChainCreate(Chain *chainReturn, Arena arena, size_t genCount,
gens[i].zones = ZoneSetEMPTY;
gens[i].capacity = params[i].capacity;
gens[i].mortality = params[i].mortality;
gens[i].proflow = 1.0; /* @@@@ temporary */
RingInit(&gens[i].locusRing);
gens[i].sig = GenDescSig;
AVERT(GenDesc, &gens[i]);
}
res = ControlAlloc(&p, arena, sizeof(ChainStruct), FALSE);
@ -251,7 +250,9 @@ GenDesc ChainGen(Chain chain, Index gen)
}
/* PoolGenAlloc -- allocate a segment in a pool generation */
/* PoolGenAlloc -- allocate a segment in a pool generation and update
* accounting
*/
Res PoolGenAlloc(Seg *segReturn, PoolGen pgen, SegClass class, Size size,
Bool withReservoirPermit, ArgList args)
@ -293,6 +294,12 @@ Res PoolGenAlloc(Seg *segReturn, PoolGen pgen, SegClass class, Size size,
EVENT3(ArenaGenZoneAdd, arena, gen, moreZones);
}
size = SegSize(seg);
pgen->totalSize += size;
STATISTIC_STAT ({
++ pgen->segs;
pgen->freeSize += size;
});
*segReturn = seg;
return ResOK;
}
@ -418,8 +425,13 @@ Res PoolGenInit(PoolGen pgen, GenDesc gen, Pool pool)
pgen->pool = pool;
pgen->gen = gen;
RingInit(&pgen->genRing);
pgen->totalSize = (Size)0;
pgen->newSize = (Size)0;
STATISTIC(pgen->segs = 0);
pgen->totalSize = 0;
STATISTIC(pgen->freeSize = 0);
pgen->newSize = 0;
STATISTIC(pgen->oldSize = 0);
pgen->newDeferredSize = 0;
STATISTIC(pgen->oldDeferredSize = 0);
pgen->sig = PoolGenSig;
AVERT(PoolGen, pgen);
@ -433,6 +445,15 @@ Res PoolGenInit(PoolGen pgen, GenDesc gen, Pool pool)
void PoolGenFinish(PoolGen pgen)
{
AVERT(PoolGen, pgen);
AVER(pgen->totalSize == 0);
AVER(pgen->newSize == 0);
AVER(pgen->newDeferredSize == 0);
STATISTIC_STAT ({
AVER(pgen->segs == 0);
AVER(pgen->freeSize == 0);
AVER(pgen->oldSize == 0);
AVER(pgen->oldDeferredSize == 0);
});
pgen->sig = SigInvalid;
RingRemove(&pgen->genRing);
@ -448,11 +469,202 @@ Bool PoolGenCheck(PoolGen pgen)
CHECKU(Pool, pgen->pool);
CHECKU(GenDesc, pgen->gen);
CHECKD_NOSIG(Ring, &pgen->genRing);
CHECKL(pgen->newSize <= pgen->totalSize);
STATISTIC_STAT ({
CHECKL((pgen->totalSize == 0) == (pgen->segs == 0));
CHECKL(pgen->totalSize >= pgen->segs * ArenaAlign(PoolArena(pgen->pool)));
CHECKL(pgen->totalSize == pgen->freeSize + pgen->newSize + pgen->oldSize
+ pgen->newDeferredSize + pgen->oldDeferredSize);
});
return TRUE;
}
/* PoolGenAccountForFill -- accounting for allocation
*
* Call this when the pool allocates memory to the client program via
* BufferFill. The deferred flag indicates whether the accounting of
* this memory (for the purpose of scheduling collections) should be
* deferred until later.
*
* See <design/strategy/#accounting.op.fill>
*/
void PoolGenAccountForFill(PoolGen pgen, Size size, Bool deferred)
{
AVERT(PoolGen, pgen);
AVERT(Bool, deferred);
STATISTIC_STAT ({
AVER(pgen->freeSize >= size);
pgen->freeSize -= size;
});
if (deferred)
pgen->newDeferredSize += size;
else
pgen->newSize += size;
}
/* PoolGenAccountForEmpty -- accounting for emptying a buffer
*
* Call this when the client program returns memory (that was never
* condemned) to the pool via BufferEmpty. The deferred flag is as for
* PoolGenAccountForFill.
*
* See <design/strategy/#accounting.op.empty>
*/
void PoolGenAccountForEmpty(PoolGen pgen, Size unused, Bool deferred)
{
AVERT(PoolGen, pgen);
AVERT(Bool, deferred);
if (deferred) {
AVER(pgen->newDeferredSize >= unused);
pgen->newDeferredSize -= unused;
} else {
AVER(pgen->newSize >= unused);
pgen->newSize -= unused;
}
STATISTIC(pgen->freeSize += unused);
}
/* PoolGenAccountForAge -- accounting for condemning
*
* Call this when memory is condemned via PoolWhiten. The size
* parameter should be the amount of memory that is being condemned
* for the first time. The deferred flag is as for PoolGenAccountForFill.
*
* See <design/strategy/#accounting.op.age>
*/
void PoolGenAccountForAge(PoolGen pgen, Size size, Bool deferred)
{
AVERT(PoolGen, pgen);
if (deferred) {
AVER(pgen->newDeferredSize >= size);
pgen->newDeferredSize -= size;
STATISTIC(pgen->oldDeferredSize += size);
} else {
AVER(pgen->newSize >= size);
pgen->newSize -= size;
STATISTIC(pgen->oldSize += size);
}
}
/* PoolGenAccountForReclaim -- accounting for reclaiming
*
* Call this when reclaiming memory, passing the amount of memory that
* was reclaimed. The deferred flag is as for PoolGenAccountForFill.
*
* See <design/strategy/#accounting.op.reclaim>
*/
void PoolGenAccountForReclaim(PoolGen pgen, Size reclaimed, Bool deferred)
{
AVERT(PoolGen, pgen);
AVERT(Bool, deferred);
STATISTIC_STAT ({
if (deferred) {
AVER(pgen->oldDeferredSize >= reclaimed);
pgen->oldDeferredSize -= reclaimed;
} else {
AVER(pgen->oldSize >= reclaimed);
pgen->oldSize -= reclaimed;
}
pgen->freeSize += reclaimed;
});
}
/* PoolGenUndefer -- finish deferring accounting
*
* Call this when exiting ramp mode, passing the amount of old
* (condemned at least once) and new (never condemned) memory whose
* accounting was deferred (for example, during a ramp).
*
* See <design/strategy/#accounting.op.undefer>
*/
void PoolGenUndefer(PoolGen pgen, Size oldSize, Size newSize)
{
AVERT(PoolGen, pgen);
STATISTIC_STAT ({
AVER(pgen->oldDeferredSize >= oldSize);
pgen->oldDeferredSize -= oldSize;
pgen->oldSize += oldSize;
});
AVER(pgen->newDeferredSize >= newSize);
pgen->newDeferredSize -= newSize;
pgen->newSize += newSize;
}
/* PoolGenAccountForSegSplit -- accounting for splitting a segment */
void PoolGenAccountForSegSplit(PoolGen pgen)
{
AVERT(PoolGen, pgen);
STATISTIC_STAT ({
AVER(pgen->segs >= 1); /* must be at least one segment to split */
++ pgen->segs;
});
}
/* PoolGenAccountForSegMerge -- accounting for merging a segment */
void PoolGenAccountForSegMerge(PoolGen pgen)
{
AVERT(PoolGen, pgen);
STATISTIC_STAT ({
AVER(pgen->segs >= 2); /* must be at least two segments to merge */
-- pgen->segs;
});
}
/* PoolGenFree -- free a segment and update accounting
*
* Pass the amount of memory in the segment that is accounted as free,
* old, or new, respectively. The deferred flag is as for
* PoolGenAccountForFill.
*
* See <design/strategy/#accounting.op.free>
*/
void PoolGenFree(PoolGen pgen, Seg seg, Size freeSize, Size oldSize,
Size newSize, Bool deferred)
{
Size size;
AVERT(PoolGen, pgen);
AVERT(Seg, seg);
size = SegSize(seg);
AVER(freeSize + oldSize + newSize == size);
/* Pretend to age and reclaim the contents of the segment to ensure
* that the entire segment is accounted as free. */
PoolGenAccountForAge(pgen, newSize, deferred);
PoolGenAccountForReclaim(pgen, oldSize + newSize, deferred);
AVER(pgen->totalSize >= size);
pgen->totalSize -= size;
STATISTIC_STAT ({
AVER(pgen->segs > 0);
-- pgen->segs;
AVER(pgen->freeSize >= size);
pgen->freeSize -= size;
});
SegFree(seg);
}
/* LocusInit -- initialize the locus module */
void LocusInit(Arena arena)
@ -466,7 +678,6 @@ void LocusInit(Arena arena)
gen->zones = ZoneSetEMPTY;
gen->capacity = 0; /* unused */
gen->mortality = 0.51;
gen->proflow = 0.0;
RingInit(&gen->locusRing);
gen->sig = GenDescSig;
AVERT(GenDesc, gen);

View file

@ -156,8 +156,12 @@ typedef const struct SrcIdStruct {
*
* Use these values for unused pointer, size closure arguments and
* check them in the callback or visitor.
*
* We use PointerAdd rather than a cast to avoid "warning C4306: 'type
* cast' : conversion from 'unsigned int' to 'Pointer' of greater
* size" on platform w3i6mv.
*/
#define UNUSED_POINTER ((Pointer)0xB60405ED) /* PointeR UNUSED */
#define UNUSED_POINTER PointerAdd(0, 0xB60405ED) /* PointeR UNUSED */
#define UNUSED_SIZE ((Size)0x520405ED) /* SiZe UNUSED */
@ -178,6 +182,19 @@ typedef const struct SrcIdStruct {
((type *)(void *)((char *)(p) - offsetof(type, field)))
/* BOOLFIELD -- declare a Boolean bitfield
*
* A Boolean bitfield needs to be unsigned (not Bool), so that its
* values are 0 and 1 (not 0 and -1), in order to avoid a sign
* conversion (which would be a compiler error) when assigning TRUE to
* the field.
*
* See <design/type/#bool.bitfield>
*/
#define BOOLFIELD(name) unsigned name : 1
/* BITFIELD -- coerce a value into a bitfield
*
* This coerces value to the given width and type in a way that avoids

View file

@ -87,6 +87,9 @@ extern Addr (AddrAlignDown)(Addr addr, Align align);
#define AddrIsAligned(p, a) WordIsAligned((Word)(p), a)
#define AddrAlignUp(p, a) ((Addr)WordAlignUp((Word)(p), a))
#define AddrRoundUp(p, r) ((Addr)WordRoundUp((Word)(p), r))
#define ReadonlyAddrAdd(p, s) ((ReadonlyAddr)((const char *)(p) + (s)))
#define SizeIsAligned(s, a) WordIsAligned((Word)(s), a)
#define SizeAlignUp(s, a) ((Size)WordAlignUp((Word)(s), a))
@ -281,13 +284,11 @@ extern BufferClass PoolNoBufferClass(void);
/* Abstract Pool Classes Interface -- see <code/poolabs.c> */
extern void PoolClassMixInAllocFree(PoolClass class);
extern void PoolClassMixInBuffer(PoolClass class);
extern void PoolClassMixInScan(PoolClass class);
extern void PoolClassMixInFormat(PoolClass class);
extern void PoolClassMixInCollect(PoolClass class);
extern AbstractPoolClass AbstractPoolClassGet(void);
extern AbstractAllocFreePoolClass AbstractAllocFreePoolClassGet(void);
extern AbstractBufferPoolClass AbstractBufferPoolClassGet(void);
extern AbstractBufferPoolClass AbstractSegBufPoolClassGet(void);
extern AbstractScanPoolClass AbstractScanPoolClassGet(void);

View file

@ -25,19 +25,19 @@
/* stress -- create a pool of the requested type and allocate in it */
static mps_res_t stress(mps_class_t class, size_t (*size)(size_t i),
mps_arena_t arena, ...)
static mps_res_t stress(mps_arena_t arena, size_t (*size)(size_t i),
const char *name, mps_class_t pool_class,
mps_arg_s *args)
{
mps_res_t res;
mps_pool_t pool;
va_list arg;
size_t i, k;
int *ps[testSetSIZE];
size_t ss[testSetSIZE];
va_start(arg, arena);
res = mps_pool_create_v(&pool, arena, class, arg);
va_end(arg);
printf("%s\n", name);
res = mps_pool_create_k(&pool, arena, pool_class, args);
if (res != MPS_RES_OK)
return res;
@ -87,7 +87,7 @@ static mps_res_t stress(mps_class_t class, size_t (*size)(size_t i),
}
/* randomSize -- produce sizes both latge and small */
/* randomSize -- produce sizes both large and small */
static size_t randomSize(size_t i)
{
@ -99,7 +99,7 @@ static size_t randomSize(size_t i)
}
/* randomSize8 -- produce sizes both latge and small, 8-byte aligned */
/* randomSize8 -- produce sizes both large and small, 8-byte aligned */
static size_t randomSize8(size_t i)
{
@ -121,61 +121,90 @@ static size_t fixedSize(size_t i)
static mps_pool_debug_option_s bothOptions = {
/* .fence_template = */ (const void *)"postpostpostpost",
/* .fence_size = */ MPS_PF_ALIGN,
/* .free_template = */ (const void *)"DEAD",
/* .fence_template = */ "post",
/* .fence_size = */ 4,
/* .free_template = */ "DEAD",
/* .free_size = */ 4
};
static mps_pool_debug_option_s fenceOptions = {
/* .fence_template = */ (const void *)"\0XXX ''\"\"'' XXX\0",
/* .fence_size = */ 16,
/* .fence_template = */ "123456789abcdef",
/* .fence_size = */ 15,
/* .free_template = */ NULL,
/* .free_size = */ 0
};
/* testInArena -- test all the pool classes in the given arena */
static void testInArena(mps_arena_t arena, mps_pool_debug_option_s *options)
static void testInArena(mps_arena_class_t arena_class, mps_arg_s *arena_args,
mps_pool_debug_option_s *options)
{
/* IWBN to test MVFFDebug, but the MPS doesn't support debugging */
/* cross-segment allocation (possibly MVFF ought not to). */
printf("MVFF\n");
die(stress(mps_class_mvff(), randomSize8, arena,
(size_t)65536, (size_t)32, (mps_align_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE),
"stress MVFF");
printf("MV debug\n");
die(stress(mps_class_mv_debug(), randomSize, arena,
options, (size_t)65536, (size_t)32, (size_t)65536),
"stress MV debug");
mps_arena_t arena;
printf("MFS\n");
fixedSizeSize = 13;
die(stress(mps_class_mfs(), fixedSize, arena, (size_t)100000, fixedSizeSize),
die(mps_arena_create_k(&arena, arena_class, arena_args),
"mps_arena_create");
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE);
die(stress(arena, randomSize8, "MVFF", mps_class_mvff(), args),
"stress MVFF");
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, options);
die(stress(arena, randomSize8, "MVFF debug", mps_class_mvff_debug(), args),
"stress MVFF debug");
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
mps_align_t align = (mps_align_t)1 << (rnd() % 6);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
die(stress(arena, randomSize, "MV", mps_class_mv(), args),
"stress MV");
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
mps_align_t align = (mps_align_t)1 << (rnd() % 6);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, options);
die(stress(arena, randomSize, "MV debug", mps_class_mv_debug(), args),
"stress MV debug");
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
fixedSizeSize = 1 + rnd() % 64;
MPS_ARGS_ADD(args, MPS_KEY_MFS_UNIT_SIZE, fixedSizeSize);
MPS_ARGS_ADD(args, MPS_KEY_EXTEND_BY, 100000);
die(stress(arena, fixedSize, "MFS", mps_class_mfs(), args),
"stress MFS");
} MPS_ARGS_END(args);
printf("MV\n");
die(stress(mps_class_mv(), randomSize, arena,
(size_t)65536, (size_t)32, (size_t)65536),
"stress MV");
mps_arena_destroy(arena);
}
int main(int argc, char *argv[])
{
mps_arena_t arena;
testlib_init(argc, argv);
die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE),
"mps_arena_create");
testInArena(arena, &bothOptions);
mps_arena_destroy(arena);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE);
testInArena(mps_arena_class_vm(), args, &bothOptions);
} MPS_ARGS_END(args);
die(mps_arena_create(&arena, mps_arena_class_vm(), smallArenaSIZE),
"mps_arena_create");
testInArena(arena, &fenceOptions);
mps_arena_destroy(arena);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, smallArenaSIZE);
testInArena(mps_arena_class_vm(), args, &fenceOptions);
} MPS_ARGS_END(args);
printf("%s: Conclusion: Failed to find any defects.\n", argv[0]);
return 0;

View file

@ -33,6 +33,7 @@ typedef void (*Fun)(void); /* <design/type/#fun> */
typedef MPS_T_WORD Word; /* <design/type/#word> */
typedef unsigned char Byte; /* <design/type/#byte> */
typedef struct AddrStruct *Addr; /* <design/type/#addr> */
typedef const struct AddrStruct *ReadonlyAddr; /* <design/type/#readonlyaddr> */
typedef Word Size; /* <design/type/#size> */
typedef Word Count; /* <design/type/#count> */
typedef Word Index; /* <design/type/#index> */
@ -76,7 +77,6 @@ typedef struct LockStruct *Lock; /* <code/lock.c>* */
typedef struct mps_pool_s *Pool; /* <design/pool/> */
typedef struct mps_class_s *PoolClass; /* <code/poolclas.c> */
typedef PoolClass AbstractPoolClass; /* <code/poolabs.c> */
typedef PoolClass AbstractAllocFreePoolClass; /* <code/poolabs.c> */
typedef PoolClass AbstractBufferPoolClass; /* <code/poolabs.c> */
typedef PoolClass AbstractSegBufPoolClass; /* <code/poolabs.c> */
typedef PoolClass AbstractScanPoolClass; /* <code/poolabs.c> */
@ -300,22 +300,9 @@ typedef Res (*LandDescribeMethod)(Land land, mps_lib_FILE *stream);
#define RankSetEMPTY BS_EMPTY(RankSet)
#define RankSetUNIV ((RankSet)((1u << RankLIMIT) - 1))
#define AttrFMT ((Attr)(1<<0)) /* <design/type/#attr> */
#define AttrSCAN ((Attr)(1<<1))
#define AttrPM_NO_READ ((Attr)(1<<2))
#define AttrPM_NO_WRITE ((Attr)(1<<3))
#define AttrALLOC ((Attr)(1<<4))
#define AttrFREE ((Attr)(1<<5))
#define AttrBUF ((Attr)(1<<6))
#define AttrBUF_RESERVE ((Attr)(1<<7))
#define AttrBUF_ALLOC ((Attr)(1<<8))
#define AttrGC ((Attr)(1<<9))
#define AttrINCR_RB ((Attr)(1<<10))
#define AttrINCR_WB ((Attr)(1<<11))
#define AttrMOVINGGC ((Attr)(1<<12))
#define AttrMASK (AttrFMT | AttrSCAN | AttrPM_NO_READ | \
AttrPM_NO_WRITE | AttrALLOC | AttrFREE | \
AttrBUF | AttrBUF_RESERVE | AttrBUF_ALLOC | \
AttrGC | AttrINCR_RB | AttrINCR_WB | AttrMOVINGGC)
#define AttrGC ((Attr)(1<<1))
#define AttrMOVINGGC ((Attr)(1<<2))
#define AttrMASK (AttrFMT | AttrGC | AttrMOVINGGC)
/* Segment preferences */

View file

@ -326,9 +326,9 @@ typedef struct _mps_sac_s {
/* .sacc: Keep in sync with <code/sac.h>. */
typedef struct mps_sac_class_s {
size_t _block_size;
size_t _cached_count;
unsigned _frequency;
size_t mps_block_size;
size_t mps_cached_count;
unsigned mps_frequency;
} mps_sac_class_s;
#define mps_sac_classes_s mps_sac_class_s

View file

@ -9,22 +9,6 @@
#include "mps.h"
/* The mvt pool class has five extra parameters to mps_pool_create:
* mps_res_t mps_pool_create(mps_pool_t * pool, mps_arena_t arena,
* mps_class_t mvt_class,
* size_t minimum_size,
* size_t mean_size,
* size_t maximum_size,
* mps_count_t reserve_depth
* mps_count_t fragmentation_limit);
* minimum_, mean_, and maximum_size are the mimimum, mean, and
* maximum (typical) size of objects expected to be allocated in the
* pool. reserve_depth is a measure of the expected hysteresis of the
* object population. fragmentation_limit is a percentage (between 0
* and 100): if the free space managed by the pool exceeds the
* specified percentage, the pool will resort to a "first fit"
* allocation policy.
*/
extern mps_class_t mps_class_mvt(void);
/* The mvt pool class supports two extensions to the pool protocol:

View file

@ -6,14 +6,10 @@
#include <stdio.h>
#include <stdarg.h>
#include "mpstd.h"
#include <time.h>
#include "mpscmvt.h"
#include "mps.h"
typedef mps_word_t mps_count_t; /* machine word (target dep.) */
#include "mpslib.h"
#include "mpsavm.h"
#include "testlib.h"
@ -71,11 +67,11 @@ static size_t randomSize(unsigned long i)
#define TEST_SET_SIZE 1234
#define TEST_LOOPS 27
static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size)
static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size, mps_align_t align)
{
mps_res_t res;
size = alignUp(size, MPS_PF_ALIGN);
size = alignUp(size, align);
do {
MPS_RESERVE_BLOCK(res, *p, ap, size);
@ -87,8 +83,9 @@ static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size)
}
static mps_res_t stress(mps_class_t class, mps_arena_t arena,
size_t (*size)(unsigned long i), mps_arg_s args[])
static mps_res_t stress(mps_arena_t arena, mps_align_t align,
size_t (*size)(unsigned long i),
mps_class_t class, mps_arg_s args[])
{
mps_res_t res;
mps_ap_t ap;
@ -105,7 +102,7 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
for(i=0; i<TEST_SET_SIZE; ++i) {
ss[i] = (*size)(i);
res = make((mps_addr_t *)&ps[i], ap, ss[i]);
res = make((mps_addr_t *)&ps[i], ap, ss[i], align);
if(res != MPS_RES_OK)
ss[i] = 0;
else
@ -144,7 +141,7 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
/* allocate some new objects */
for(i=x; i<TEST_SET_SIZE; ++i) {
size_t s = (*size)(i);
res = make((mps_addr_t *)&ps[i], ap, s);
res = make((mps_addr_t *)&ps[i], ap, s, align);
if(res != MPS_RES_OK)
break;
ss[i] = s;
@ -166,33 +163,29 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
}
static void stress_with_arena_class(mps_arena_class_t aclass, Bool zoned)
static void test_in_arena(mps_arena_class_t arena_class, mps_arg_s *arena_args)
{
mps_arena_t arena;
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, zoned);
die(mps_arena_create_k(&arena, aclass, args),
"mps_arena_create");
} MPS_ARGS_END(args);
die(mps_arena_create_k(&arena, arena_class, arena_args),
"mps_arena_create");
size_min = MPS_PF_ALIGN;
size_mean = 42;
size_max = 8192;
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_MIN_SIZE, size_min);
MPS_ARGS_ADD(args, MPS_KEY_MEAN_SIZE, size_mean);
MPS_ARGS_ADD(args, MPS_KEY_MAX_SIZE, size_max);
MPS_ARGS_ADD(args, MPS_KEY_MVT_RESERVE_DEPTH, TEST_SET_SIZE/2);
MPS_ARGS_ADD(args, MPS_KEY_MVT_FRAG_LIMIT, 0.3);
die(stress(mps_class_mvt(), arena, randomSize, args), "stress MVT");
die(stress(arena, align, randomSize, mps_class_mvt(), args), "stress MVT");
} MPS_ARGS_END(args);
mps_arena_destroy(arena);
return;
}
@ -200,8 +193,16 @@ int main(int argc, char *argv[])
{
testlib_init(argc, argv);
stress_with_arena_class(mps_arena_class_vm(), TRUE);
stress_with_arena_class(mps_arena_class_vm(), FALSE);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE);
test_in_arena(mps_arena_class_vm(), args);
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE);
test_in_arena(mps_arena_class_vm(), args);
} MPS_ARGS_END(args);
printf("%s: Conclusion: Failed to find any defects.\n", argv[0]);
return 0;

View file

@ -286,7 +286,6 @@ Res PoolAlloc(Addr *pReturn, Pool pool, Size size,
AVER(pReturn != NULL);
AVERT(Pool, pool);
AVER(PoolHasAttr(pool, AttrALLOC));
AVER(size > 0);
AVERT(Bool, withReservoirPermit);
@ -316,7 +315,6 @@ Res PoolAlloc(Addr *pReturn, Pool pool, Size size,
void PoolFree(Pool pool, Addr old, Size size)
{
AVERT(Pool, pool);
AVER(PoolHasAttr(pool, AttrFREE));
AVER(old != NULL);
/* The pool methods should check that old is in pool. */
AVER(size > 0);
@ -381,7 +379,6 @@ Res PoolScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
AVER(totalReturn != NULL);
AVERT(ScanState, ss);
AVERT(Pool, pool);
AVER(PoolHasAttr(pool, AttrSCAN));
AVERT(Seg, seg);
AVER(ss->arena == pool->arena);

View file

@ -18,7 +18,6 @@
*
* .hierarchy: define the following hierarchy of abstract pool classes:
* AbstractPoolClass - implements init, finish, describe
* AbstractAllocFreePoolClass - implements alloc & free
* AbstractBufferPoolClass - implements the buffer protocol
* AbstractSegBufPoolClass - uses SegBuf buffer class
* AbstractScanPoolClass - implements basic scanning
@ -31,7 +30,6 @@ SRCID(poolabs, "$Id$");
typedef PoolClassStruct AbstractPoolClassStruct;
typedef PoolClassStruct AbstractAllocFreePoolClassStruct;
typedef PoolClassStruct AbstractBufferPoolClassStruct;
typedef PoolClassStruct AbstractSegBufPoolClassStruct;
typedef PoolClassStruct AbstractScanPoolClassStruct;
@ -49,23 +47,11 @@ typedef PoolClassStruct AbstractCollectPoolClassStruct;
*/
/* PoolClassMixInAllocFree -- mix in the protocol for Alloc / Free */
void PoolClassMixInAllocFree(PoolClass class)
{
/* Can't check class because it's not initialized yet */
class->attr |= (AttrALLOC | AttrFREE);
class->alloc = PoolTrivAlloc;
class->free = PoolTrivFree;
}
/* PoolClassMixInBuffer -- mix in the protocol for buffer reserve / commit */
void PoolClassMixInBuffer(PoolClass class)
{
/* Can't check class because it's not initialized yet */
class->attr |= AttrBUF;
class->bufferFill = PoolTrivBufferFill;
class->bufferEmpty = PoolTrivBufferEmpty;
/* By default, buffered pools treat frame operations as NOOPs */
@ -81,7 +67,6 @@ void PoolClassMixInBuffer(PoolClass class)
void PoolClassMixInScan(PoolClass class)
{
/* Can't check class because it's not initialized yet */
class->attr |= AttrSCAN;
class->access = PoolSegAccess;
class->blacken = PoolTrivBlacken;
class->grey = PoolTrivGrey;
@ -164,12 +149,6 @@ DEFINE_CLASS(AbstractPoolClass, class)
class->sig = PoolClassSig;
}
DEFINE_CLASS(AbstractAllocFreePoolClass, class)
{
INHERIT_CLASS(class, AbstractPoolClass);
PoolClassMixInAllocFree(class);
}
DEFINE_CLASS(AbstractBufferPoolClass, class)
{
INHERIT_CLASS(class, AbstractPoolClass);

View file

@ -45,7 +45,6 @@ typedef struct amcGenStruct {
PoolGenStruct pgen;
RingStruct amcRing; /* link in list of gens in pool */
Buffer forward; /* forwarding buffer */
Count segs; /* number of segs in gen */
Sig sig; /* <code/misc.h#sig> */
} amcGenStruct;
@ -72,12 +71,19 @@ enum {
/* amcSegStruct -- AMC-specific fields appended to GCSegStruct
*
* .seg-ramp-new: The "new" flag is usually true, and indicates that the
* segment has been counted towards the pool generation's newSize. It is
* set to FALSE otherwise. This is used by both ramping and hash array
* allocations. TODO: The code for this is scrappy and needs refactoring,
* and the *reasons* for setting these flags need properly documenting.
* RB 2013-07-17
* .seq.old: The "old" flag is FALSE if the segment has never been
* collected, and so its size is accounted against the pool
* generation's newSize; it is TRUE if the segment has been collected
* at least once, and so its size is accounted against the pool
* generation's oldSize.
*
* .seg.deferred: The "deferred" flag is TRUE if its size accounting
* in the pool generation has been deferred. This is set if the
* segment was created in ramping mode (and so we don't want it to
* contribute to the pool generation's newSize and so provoke a
* collection via TracePoll), and by hash array allocations (where we
* don't want the allocation to provoke a collection that makes the
* location dependency stale immediately).
*/
typedef struct amcSegStruct *amcSeg;
@ -88,7 +94,8 @@ typedef struct amcSegStruct {
GCSegStruct gcSegStruct; /* superclass fields must come first */
amcGen gen; /* generation this segment belongs to */
Nailboard board; /* nailboard for this segment or NULL if none */
Bool new; /* .seg-ramp-new */
BOOLFIELD(old); /* .seg.old */
BOOLFIELD(deferred); /* .seg.deferred */
Sig sig; /* <code/misc.h#sig> */
} amcSegStruct;
@ -106,7 +113,8 @@ static Bool amcSegCheck(amcSeg amcseg)
CHECKD(Nailboard, amcseg->board);
CHECKL(SegNailed(amcSeg2Seg(amcseg)) != TraceSetEMPTY);
}
CHECKL(BoolCheck(amcseg->new));
/* CHECKL(BoolCheck(amcseg->old)); <design/type/#bool.bitfield.check> */
/* CHECKL(BoolCheck(amcseg->deferred)); <design/type/#bool.bitfield.check> */
return TRUE;
}
@ -141,7 +149,8 @@ static Res AMCSegInit(Seg seg, Pool pool, Addr base, Size size,
amcseg->gen = amcgen;
amcseg->board = NULL;
amcseg->new = TRUE;
amcseg->old = FALSE;
amcseg->deferred = FALSE;
amcseg->sig = amcSegSig;
AVERT(amcSeg, amcseg);
@ -478,7 +487,6 @@ typedef struct AMCStruct { /* <design/poolamc/#struct> */
ATTRIBUTE_UNUSED
static Bool amcGenCheck(amcGen gen)
{
Arena arena;
AMC amc;
CHECKS(amcGen, gen);
@ -487,9 +495,7 @@ static Bool amcGenCheck(amcGen gen)
CHECKU(AMC, amc);
CHECKD(Buffer, gen->forward);
CHECKD_NOSIG(Ring, &gen->amcRing);
CHECKL((gen->pgen.totalSize == 0) == (gen->segs == 0));
arena = amc->poolStruct.arena;
CHECKL(gen->pgen.totalSize >= gen->segs * ArenaAlign(arena));
return TRUE;
}
@ -666,7 +672,6 @@ static Res amcGenCreate(amcGen *genReturn, AMC amc, GenDesc gen)
if(res != ResOK)
goto failGenInit;
RingInit(&amcgen->amcRing);
amcgen->segs = 0;
amcgen->forward = buffer;
amcgen->sig = amcGenSig;
@ -693,8 +698,6 @@ static void amcGenDestroy(amcGen gen)
Arena arena;
AVERT(amcGen, gen);
AVER(gen->segs == 0);
AVER(gen->pgen.totalSize == 0);
EVENT1(AMCGenDestroy, gen);
arena = PoolArena(amcGenPool(gen));
@ -720,7 +723,7 @@ static Res amcGenDescribe(amcGen gen, mps_lib_FILE *stream)
" amcGen $P {\n", (WriteFP)gen,
" buffer $P\n", gen->forward,
" segs $U, totalSize $U, newSize $U\n",
(WriteFU)gen->segs,
(WriteFU)gen->pgen.segs,
(WriteFU)gen->pgen.totalSize,
(WriteFU)gen->pgen.newSize,
" } amcGen\n", NULL);
@ -942,21 +945,19 @@ static void AMCFinish(Pool pool)
RING_FOR(node, &amc->genRing, nextNode) {
amcGen gen = RING_ELT(amcGen, amcRing, node);
BufferDetach(gen->forward, pool);
/* Maintain invariant < totalSize. */
gen->pgen.newSize = (Size)0;
}
ring = PoolSegRing(pool);
RING_FOR(node, ring, nextNode) {
Seg seg = SegOfPoolRing(node);
Size size;
amcGen gen = amcSegGen(seg);
--gen->segs;
size = SegSize(seg);
gen->pgen.totalSize -= size;
SegFree(seg);
amcSeg amcseg = Seg2amcSeg(seg);
AVERT(amcSeg, amcseg);
PoolGenFree(&gen->pgen, seg,
0,
amcseg->old ? SegSize(seg) : 0,
amcseg->old ? 0 : SegSize(seg),
amcseg->deferred);
}
/* Disassociate forwarding buffers from gens before they are */
@ -992,7 +993,6 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn,
amcGen gen;
PoolGen pgen;
amcBuf amcbuf;
Bool isRamping;
AVERT(Pool, pool);
amc = Pool2AMC(pool);
@ -1013,7 +1013,7 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn,
pgen = &gen->pgen;
/* Create and attach segment. The location of this segment is */
/* expressed as a generation number. We rely on the arena to */
/* expressed via the pool generation. We rely on the arena to */
/* organize locations appropriately. */
alignedSize = SizeAlignUp(size, ArenaAlign(arena));
MPS_ARGS_BEGIN(args) {
@ -1031,23 +1031,17 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn,
else
SegSetRankAndSummary(seg, BufferRankSet(buffer), RefSetUNIV);
/* Put the segment in the generation indicated by the buffer. */
++gen->segs;
pgen->totalSize += alignedSize;
/* If ramping, or if the buffer is intended for allocating
hash table arrays, don't count it towards newSize. */
isRamping = (amc->rampMode == RampRAMPING &&
buffer == amc->rampGen->forward &&
gen == amc->rampGen);
if (isRamping || amcbuf->forHashArrays) {
Seg2amcSeg(seg)->new = FALSE;
} else {
pgen->newSize += alignedSize;
/* If ramping, or if the buffer is intended for allocating hash
* table arrays, defer the size accounting. */
if ((amc->rampMode == RampRAMPING
&& buffer == amc->rampGen->forward
&& gen == amc->rampGen)
|| amcbuf->forHashArrays)
{
Seg2amcSeg(seg)->deferred = TRUE;
}
base = SegBase(seg);
*baseReturn = base;
if(alignedSize < AMCLargeSegPAGES * ArenaAlign(arena)) {
/* Small or Medium segment: give the buffer the entire seg. */
limit = AddrAdd(base, alignedSize);
@ -1069,6 +1063,9 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn,
ShieldCover(arena, seg);
}
}
PoolGenAccountForFill(pgen, SegSize(seg), Seg2amcSeg(seg)->deferred);
*baseReturn = base;
*limitReturn = limit;
return ResOK;
}
@ -1111,6 +1108,11 @@ static void AMCBufferEmpty(Pool pool, Buffer buffer,
(*pool->format->pad)(init, size);
ShieldCover(arena, seg);
}
/* The unused part of the buffer is not reused by AMC, so we pass 0
* for the unused argument. This call therefore has no effect on the
* accounting, but we call it anyway for consistency. */
PoolGenAccountForEmpty(&amcSegGen(seg)->pgen, 0, Seg2amcSeg(seg)->deferred);
}
@ -1174,16 +1176,19 @@ static void AMCRampEnd(Pool pool, Buffer buf)
NOTREACHED;
}
/* Adjust amc->rampGen->pgen.newSize: Now count all the segments */
/* in the ramp generation as new (except if they're white). */
/* Now all the segments in the ramp generation contribute to the
* pool generation's sizes. */
RING_FOR(node, PoolSegRing(pool), nextNode) {
Seg seg = SegOfPoolRing(node);
if(amcSegGen(seg) == amc->rampGen && !Seg2amcSeg(seg)->new
amcSeg amcseg = Seg2amcSeg(seg);
if(amcSegGen(seg) == amc->rampGen
&& amcseg->deferred
&& SegWhite(seg) == TraceSetEMPTY)
{
pgen->newSize += SegSize(seg);
Seg2amcSeg(seg)->new = TRUE;
PoolGenUndefer(pgen,
amcseg->old ? SegSize(seg) : 0,
amcseg->old ? 0 : SegSize(seg));
amcseg->deferred = FALSE;
}
}
}
@ -1197,14 +1202,17 @@ static void AMCRampEnd(Pool pool, Buffer buf)
*/
static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
{
Size condemned = 0;
amcGen gen;
AMC amc;
Buffer buffer;
amcSeg amcseg;
Res res;
AVERT(Pool, pool);
AVERT(Trace, trace);
AVERT(Seg, seg);
amcseg = Seg2amcSeg(seg);
buffer = SegBuffer(seg);
if(buffer != NULL) {
@ -1260,14 +1268,14 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
/* @@@@ We could subtract all the nailed grains. */
/* Relies on unsigned arithmetic wrapping round */
/* on under- and overflow (which it does). */
trace->condemned -= AddrOffset(BufferScanLimit(buffer),
BufferLimit(buffer));
condemned -= AddrOffset(BufferScanLimit(buffer), BufferLimit(buffer));
}
}
}
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
trace->condemned += SegSize(seg);
condemned += SegSize(seg);
trace->condemned += condemned;
amc = Pool2AMC(pool);
AVERT(AMC, amc);
@ -1291,9 +1299,9 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
gen = amcSegGen(seg);
AVERT(amcGen, gen);
if(Seg2amcSeg(seg)->new) {
gen->pgen.newSize -= SegSize(seg);
Seg2amcSeg(seg)->new = FALSE;
if (!amcseg->old) {
PoolGenAccountForAge(&gen->pgen, SegSize(seg), amcseg->deferred);
amcseg->old = TRUE;
}
/* Ensure we are forwarding into the right generation. */
@ -1994,9 +2002,7 @@ static void amcReclaimNailed(Pool pool, Trace trace, Seg seg)
/* We may not free a buffered seg. */
AVER(SegBuffer(seg) == NULL);
--gen->segs;
gen->pgen.totalSize -= SegSize(seg);
SegFree(seg);
PoolGenFree(&gen->pgen, seg, 0, SegSize(seg), 0, Seg2amcSeg(seg)->deferred);
} else {
/* Seg retained */
STATISTIC_STAT( {
@ -2040,7 +2046,6 @@ static void AMCReclaim(Pool pool, Trace trace, Seg seg)
{
AMC amc;
amcGen gen;
Size size;
AVERT_CRITICAL(Pool, pool);
amc = Pool2AMC(pool);
@ -2073,13 +2078,9 @@ static void AMCReclaim(Pool pool, Trace trace, Seg seg)
/* segs should have been nailed anyway). */
AVER(SegBuffer(seg) == NULL);
--gen->segs;
size = SegSize(seg);
gen->pgen.totalSize -= size;
trace->reclaimSize += SegSize(seg);
trace->reclaimSize += size;
SegFree(seg);
PoolGenFree(&gen->pgen, seg, 0, SegSize(seg), 0, Seg2amcSeg(seg)->deferred);
}

View file

@ -55,7 +55,7 @@ Bool AMSSegCheck(AMSSeg amsseg)
CHECKL(amsseg->grains == AMSGrains(amsseg->ams, SegSize(seg)));
CHECKL(amsseg->grains > 0);
CHECKL(amsseg->grains >= amsseg->free + amsseg->newAlloc);
CHECKL(amsseg->grains == amsseg->freeGrains + amsseg->oldGrains + amsseg->newGrains);
CHECKL(BoolCheck(amsseg->allocTableInUse));
if (!amsseg->allocTableInUse)
@ -94,7 +94,7 @@ void AMSSegFreeWalk(AMSSeg amsseg, FreeBlockStepMethod f, void *p)
pool = SegPool(AMSSeg2Seg(amsseg));
seg = AMSSeg2Seg(amsseg);
if (amsseg->free == 0)
if (amsseg->freeGrains == 0)
return;
if (amsseg->allocTableInUse) {
Index base, limit, next;
@ -107,10 +107,8 @@ void AMSSegFreeWalk(AMSSeg amsseg, FreeBlockStepMethod f, void *p)
(*f)(AMS_INDEX_ADDR(seg, base), AMS_INDEX_ADDR(seg, limit), pool, p);
next = limit + 1;
}
} else {
if ( amsseg->firstFree < amsseg->grains )
(*f)(AMS_INDEX_ADDR(seg, amsseg->firstFree), SegLimit(seg), pool, p);
}
} else if (amsseg->firstFree < amsseg->grains)
(*f)(AMS_INDEX_ADDR(seg, amsseg->firstFree), SegLimit(seg), pool, p);
}
@ -129,7 +127,7 @@ void AMSSegFreeCheck(AMSSeg amsseg)
AVERT(AMSSeg, amsseg);
if (amsseg->free == 0)
if (amsseg->freeGrains == 0)
return;
/* If it's not a debug class, don't bother walking. */
@ -241,8 +239,9 @@ static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size,
goto failNextMethod;
amsseg->grains = size >> ams->grainShift;
amsseg->free = amsseg->grains;
amsseg->newAlloc = (Count)0;
amsseg->freeGrains = amsseg->grains;
amsseg->oldGrains = (Count)0;
amsseg->newGrains = (Count)0;
amsseg->marksChanged = FALSE; /* <design/poolams/#marked.unused> */
amsseg->ambiguousFixes = FALSE;
@ -263,7 +262,6 @@ static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size,
&amsseg->segRing);
amsseg->sig = AMSSegSig;
ams->size += size;
AVERT(AMSSeg, amsseg);
return ResOK;
@ -299,8 +297,6 @@ static void AMSSegFinish(Seg seg)
RingRemove(&amsseg->segRing);
RingFinish(&amsseg->segRing);
AVER(ams->size >= SegSize(seg));
ams->size -= SegSize(seg);
amsseg->sig = SigInvalid;
/* finish the superclass fields last */
@ -359,7 +355,7 @@ static Res AMSSegMerge(Seg seg, Seg segHi,
/* checks for .grain-align */
AVER(allGrains == AddrOffset(base, limit) >> ams->grainShift);
/* checks for .empty */
AVER(amssegHi->free == hiGrains);
AVER(amssegHi->freeGrains == hiGrains);
AVER(!amssegHi->marksChanged);
/* .alloc-early */
@ -393,8 +389,9 @@ static Res AMSSegMerge(Seg seg, Seg segHi,
MERGE_TABLES(nonwhiteTable, BTSetRange);
amsseg->grains = allGrains;
amsseg->free = amsseg->free + amssegHi->free;
amsseg->newAlloc = amsseg->newAlloc + amssegHi->newAlloc;
amsseg->freeGrains = amsseg->freeGrains + amssegHi->freeGrains;
amsseg->oldGrains = amsseg->oldGrains + amssegHi->oldGrains;
amsseg->newGrains = amsseg->newGrains + amssegHi->newGrains;
/* other fields in amsseg are unaffected */
RingRemove(&amssegHi->segRing);
@ -402,6 +399,7 @@ static Res AMSSegMerge(Seg seg, Seg segHi,
amssegHi->sig = SigInvalid;
AVERT(AMSSeg, amsseg);
PoolGenAccountForSegMerge(&ams->pgen);
return ResOK;
failSuper:
@ -443,7 +441,7 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
/* checks for .grain-align */
AVER(allGrains == amsseg->grains);
/* checks for .empty */
AVER(amsseg->free >= hiGrains);
AVER(amsseg->freeGrains >= hiGrains);
if (amsseg->allocTableInUse) {
AVER(BTIsResRange(amsseg->allocTable, loGrains, allGrains));
} else {
@ -485,9 +483,11 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
amsseg->grains = loGrains;
amssegHi->grains = hiGrains;
amsseg->free -= hiGrains;
amssegHi->free = hiGrains;
amssegHi->newAlloc = (Count)0;
AVER(amsseg->freeGrains >= hiGrains);
amsseg->freeGrains -= hiGrains;
amssegHi->freeGrains = hiGrains;
amssegHi->oldGrains = (Count)0;
amssegHi->newGrains = (Count)0;
amssegHi->marksChanged = FALSE; /* <design/poolams/#marked.unused> */
amssegHi->ambiguousFixes = FALSE;
@ -505,6 +505,7 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
amssegHi->sig = AMSSegSig;
AVERT(AMSSeg, amsseg);
AVERT(AMSSeg, amssegHi);
PoolGenAccountForSegSplit(&ams->pgen);
return ResOK;
failSuper:
@ -553,6 +554,9 @@ static Res AMSSegDescribe(Seg seg, mps_lib_FILE *stream)
res = WriteF(stream,
" AMS $P\n", (WriteFP)amsseg->ams,
" grains $W\n", (WriteFW)amsseg->grains,
" freeGrains $W\n", (WriteFW)amsseg->freeGrains,
" oldGrains $W\n", (WriteFW)amsseg->oldGrains,
" newGrains $W\n", (WriteFW)amsseg->newGrains,
NULL);
if (res != ResOK) return res;
if (amsseg->allocTableInUse)
@ -731,9 +735,15 @@ static void AMSSegsDestroy(AMS ams)
ring = PoolSegRing(AMS2Pool(ams));
RING_FOR(node, ring, next) {
Seg seg = SegOfPoolRing(node);
AVER(Seg2AMSSeg(seg)->ams == ams);
AMSSegFreeCheck(Seg2AMSSeg(seg));
SegFree(seg);
AMSSeg amsseg = Seg2AMSSeg(seg);
AVERT(AMSSeg, amsseg);
AVER(amsseg->ams == ams);
AMSSegFreeCheck(amsseg);
PoolGenFree(&ams->pgen, seg,
AMSGrainsSize(ams, amsseg->freeGrains),
AMSGrainsSize(ams, amsseg->oldGrains),
AMSGrainsSize(ams, amsseg->newGrains),
FALSE);
}
}
@ -836,8 +846,6 @@ Res AMSInitInternal(AMS ams, Format format, Chain chain, unsigned gen,
ams->segsDestroy = AMSSegsDestroy;
ams->segClass = AMSSegClassGet;
ams->size = 0;
ams->sig = AMSSig;
AVERT(AMS, ams);
return ResOK;
@ -904,15 +912,17 @@ static Bool amsSegAlloc(Index *baseReturn, Index *limitReturn,
} else {
if (amsseg->firstFree > amsseg->grains - grains)
return FALSE;
base = amsseg->firstFree; limit = amsseg->grains;
base = amsseg->firstFree;
limit = amsseg->grains;
amsseg->firstFree = limit;
}
/* We don't place buffers on white segments, so no need to adjust colour. */
AVER(!amsseg->colourTablesInUse);
amsseg->free -= limit - base;
amsseg->newAlloc += limit - base;
AVER(amsseg->freeGrains >= limit - base);
amsseg->freeGrains -= limit - base;
amsseg->newGrains += limit - base;
*baseReturn = base;
*limitReturn = limit;
return TRUE;
@ -958,12 +968,15 @@ static Res AMSBufferFill(Addr *baseReturn, Addr *limitReturn,
RING_FOR(node, ring, nextNode) {
AMSSeg amsseg = RING_ELT(AMSSeg, segRing, node);
AVERT_CRITICAL(AMSSeg, amsseg);
if (amsseg->free >= AMSGrains(ams, size)) {
if (amsseg->freeGrains >= AMSGrains(ams, size)) {
seg = AMSSeg2Seg(amsseg);
if (SegRankSet(seg) == rankSet && SegBuffer(seg) == NULL
if (SegRankSet(seg) == rankSet
&& SegBuffer(seg) == NULL
/* Can't use a white or grey segment, see d.m.p.fill.colour. */
&& SegWhite(seg) == TraceSetEMPTY && SegGrey(seg) == TraceSetEMPTY) {
&& SegWhite(seg) == TraceSetEMPTY
&& SegGrey(seg) == TraceSetEMPTY)
{
b = amsSegAlloc(&base, &limit, seg, size);
if (b)
goto found;
@ -983,10 +996,10 @@ found:
baseAddr = AMS_INDEX_ADDR(seg, base); limitAddr = AMS_INDEX_ADDR(seg, limit);
DebugPoolFreeCheck(pool, baseAddr, limitAddr);
allocatedSize = AddrOffset(baseAddr, limitAddr);
ams->pgen.totalSize += allocatedSize;
ams->pgen.newSize += allocatedSize;
*baseReturn = baseAddr; *limitReturn = limitAddr;
PoolGenAccountForFill(&ams->pgen, allocatedSize, FALSE);
*baseReturn = baseAddr;
*limitReturn = limitAddr;
return ResOK;
}
@ -1040,9 +1053,9 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
/* The nonwhiteTable is shared with allocTable and in use, so we
* mustn't start using allocTable. In this case we know: 1. the
* segment has been condemned (because colour tables are turned
* on in AMSCondemn); 2. the segment has not yet been reclaimed
* on in AMSWhiten); 2. the segment has not yet been reclaimed
* (because colour tables are turned off in AMSReclaim); 3. the
* unused portion of the buffer is black (see AMSCondemn). So we
* unused portion of the buffer is black (see AMSWhiten). So we
* need to whiten the unused portion of the buffer. The
* allocTable will be turned back on (if necessary) in
* AMSReclaim, when we know that the nonwhite grains are exactly
@ -1061,20 +1074,19 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
if (amsseg->colourTablesInUse)
AMS_RANGE_WHITEN(seg, initIndex, limitIndex);
amsseg->free += limitIndex - initIndex;
/* The unused portion of the buffer must be new, since it's not condemned. */
AVER(amsseg->newAlloc >= limitIndex - initIndex);
amsseg->newAlloc -= limitIndex - initIndex;
amsseg->freeGrains += limitIndex - initIndex;
/* Unused portion of the buffer must be new, since it's not condemned. */
AVER(amsseg->newGrains >= limitIndex - initIndex);
amsseg->newGrains -= limitIndex - initIndex;
size = AddrOffset(init, limit);
ams->pgen.totalSize -= size;
ams->pgen.newSize -= size;
PoolGenAccountForEmpty(&ams->pgen, size, FALSE);
}
/* amsRangeCondemn -- Condemn a part of an AMS segment
/* amsRangeWhiten -- Condemn a part of an AMS segment
* Allow calling it with base = limit, to simplify the callers.
*/
static void amsRangeCondemn(Seg seg, Index base, Index limit)
static void amsRangeWhiten(Seg seg, Index base, Index limit)
{
if (base != limit) {
AMSSeg amsseg = Seg2AMSSeg(seg);
@ -1087,9 +1099,9 @@ static void amsRangeCondemn(Seg seg, Index base, Index limit)
}
/* AMSCondemn -- the pool class segment condemning method */
/* AMSWhiten -- the pool class segment condemning method */
static Res AMSCondemn(Pool pool, Trace trace, Seg seg)
static Res AMSWhiten(Pool pool, Trace trace, Seg seg)
{
AMS ams;
AMSSeg amsseg;
@ -1139,23 +1151,24 @@ static Res AMSCondemn(Pool pool, Trace trace, Seg seg)
scanLimitIndex = AMS_ADDR_INDEX(seg, BufferScanLimit(buffer));
limitIndex = AMS_ADDR_INDEX(seg, BufferLimit(buffer));
amsRangeCondemn(seg, 0, scanLimitIndex);
amsRangeWhiten(seg, 0, scanLimitIndex);
if (scanLimitIndex < limitIndex)
AMS_RANGE_BLACKEN(seg, scanLimitIndex, limitIndex);
amsRangeCondemn(seg, limitIndex, amsseg->grains);
amsRangeWhiten(seg, limitIndex, amsseg->grains);
/* We didn't condemn the buffer, subtract it from the count. */
uncondemned = limitIndex - scanLimitIndex;
} else { /* condemn whole seg */
amsRangeCondemn(seg, 0, amsseg->grains);
amsRangeWhiten(seg, 0, amsseg->grains);
uncondemned = (Count)0;
}
trace->condemned += SegSize(seg) - AMSGrainsSize(ams, uncondemned);
/* The unused part of the buffer is new allocation by definition. */
ams->pgen.newSize -= AMSGrainsSize(ams, amsseg->newAlloc - uncondemned);
amsseg->newAlloc = uncondemned;
/* The unused part of the buffer remains new: the rest becomes old. */
PoolGenAccountForAge(&ams->pgen, AMSGrainsSize(ams, amsseg->newGrains - uncondemned), FALSE);
amsseg->oldGrains += amsseg->newGrains - uncondemned;
amsseg->newGrains = uncondemned;
amsseg->marksChanged = FALSE; /* <design/poolams/#marked.condemn> */
amsseg->ambiguousFixes = FALSE;
trace->condemned += AMSGrainsSize(ams, amsseg->oldGrains);
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
@ -1561,8 +1574,7 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
{
AMS ams;
AMSSeg amsseg;
Count nowFree, grains;
Size reclaimedSize;
Count nowFree, grains, reclaimedGrains;
PoolDebugMixin debug;
AVERT(Pool, pool);
@ -1607,21 +1619,26 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
}
}
reclaimedSize = (nowFree - amsseg->free) << ams->grainShift;
amsseg->free = nowFree;
trace->reclaimSize += reclaimedSize;
ams->pgen.totalSize -= reclaimedSize;
reclaimedGrains = nowFree - amsseg->freeGrains;
AVER(amsseg->oldGrains >= reclaimedGrains);
amsseg->oldGrains -= reclaimedGrains;
amsseg->freeGrains += reclaimedGrains;
PoolGenAccountForReclaim(&ams->pgen, AMSGrainsSize(ams, reclaimedGrains), FALSE);
trace->reclaimSize += AMSGrainsSize(ams, reclaimedGrains);
/* preservedInPlaceCount is updated on fix */
trace->preservedInPlaceSize += (grains - amsseg->free) << ams->grainShift;
trace->preservedInPlaceSize += AMSGrainsSize(ams, amsseg->oldGrains);
/* Ensure consistency of segment even if are just about to free it */
amsseg->colourTablesInUse = FALSE;
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
if (amsseg->free == grains && SegBuffer(seg) == NULL) {
if (amsseg->freeGrains == grains && SegBuffer(seg) == NULL)
/* No survivors */
SegFree(seg);
}
PoolGenFree(&ams->pgen, seg,
AMSGrainsSize(ams, amsseg->freeGrains),
AMSGrainsSize(ams, amsseg->oldGrains),
AMSGrainsSize(ams, amsseg->newGrains),
FALSE);
}
@ -1662,8 +1679,6 @@ static Res AMSDescribe(Pool pool, mps_lib_FILE *stream)
"AMS $P {\n", (WriteFP)ams,
" pool $P ($U)\n",
(WriteFP)pool, (WriteFU)pool->serial,
" size $W\n",
(WriteFW)ams->size,
" grain shift $U\n", (WriteFU)ams->grainShift,
NULL);
if (res != ResOK) return res;
@ -1708,7 +1723,7 @@ DEFINE_CLASS(AMSPoolClass, this)
this->bufferClass = RankBufClassGet;
this->bufferFill = AMSBufferFill;
this->bufferEmpty = AMSBufferEmpty;
this->whiten = AMSCondemn;
this->whiten = AMSWhiten;
this->blacken = AMSBlacken;
this->scan = AMSScan;
this->fix = AMSFix;
@ -1756,10 +1771,9 @@ Bool AMSCheck(AMS ams)
CHECKS(AMS, ams);
CHECKD(Pool, AMS2Pool(ams));
CHECKL(IsSubclassPoly(AMS2Pool(ams)->class, AMSPoolClassGet()));
CHECKL(PoolAlignment(AMS2Pool(ams)) == ((Size)1 << ams->grainShift));
CHECKL(PoolAlignment(AMS2Pool(ams)) == AMSGrainsSize(ams, (Size)1));
CHECKL(PoolAlignment(AMS2Pool(ams)) == AMS2Pool(ams)->format->alignment);
CHECKD(PoolGen, &ams->pgen);
CHECKL(SizeIsAligned(ams->size, ArenaAlign(PoolArena(AMS2Pool(ams)))));
CHECKL(FUNCHECK(ams->segSize));
CHECKD_NOSIG(Ring, &ams->segRing);
CHECKL(FUNCHECK(ams->allocRing));

View file

@ -57,9 +57,10 @@ typedef struct AMSSegStruct {
GCSegStruct gcSegStruct; /* superclass fields must come first */
AMS ams; /* owning ams */
RingStruct segRing; /* ring that this seg belongs to */
Count grains; /* number of grains */
Count free; /* number of free grains */
Count newAlloc; /* number of grains allocated since last GC */
Count grains; /* total grains */
Count freeGrains; /* free grains */
Count oldGrains; /* grains allocated prior to last collection */
Count newGrains; /* grains allocated since last collection */
Bool allocTableInUse; /* allocTable is used */
Index firstFree; /* 1st free grain, if allocTable is not used */
BT allocTable; /* set if grain is allocated */

View file

@ -85,7 +85,6 @@ typedef struct AWLStruct {
PoolStruct poolStruct;
Shift alignShift;
PoolGenStruct pgen; /* generation representing the pool */
Size size; /* allocated size in bytes */
Count succAccesses; /* number of successive single accesses */
FindDependentMethod findDependent; /* to find a dependent object */
awlStatTotalStruct stats;
@ -93,6 +92,7 @@ typedef struct AWLStruct {
} AWLStruct, *AWL;
#define Pool2AWL(pool) PARENT(AWLStruct, poolStruct, pool)
#define AWLGrainsSize(awl, grains) ((grains) << (awl)->alignShift)
static Bool AWLCheck(AWL awl);
@ -101,6 +101,8 @@ static Bool AWLCheck(AWL awl);
/* Conversion between indexes and Addrs */
#define awlIndexOfAddr(base, awl, p) \
(AddrOffset((base), (p)) >> (awl)->alignShift)
#define awlAddrOfIndex(base, awl, i) \
AddrAdd(base, AWLGrainsSize(awl, i))
/* AWLSegStruct -- AWL segment subclass
@ -117,8 +119,10 @@ typedef struct AWLSegStruct {
BT scanned;
BT alloc;
Count grains;
Count free; /* number of free grains */
Count singleAccesses; /* number of accesses processed singly */
Count freeGrains; /* free grains */
Count oldGrains; /* grains allocated prior to last collection */
Count newGrains; /* grains allocated since last collection */
Count singleAccesses; /* number of accesses processed singly */
awlStatSegStruct stats;
Sig sig;
} AWLSegStruct, *AWLSeg;
@ -138,9 +142,8 @@ static Bool AWLSegCheck(AWLSeg awlseg)
CHECKL(awlseg->mark != NULL);
CHECKL(awlseg->scanned != NULL);
CHECKL(awlseg->alloc != NULL);
/* Can't do any real check on ->grains */
CHECKL(awlseg->grains > 0);
CHECKL(awlseg->free <= awlseg->grains);
CHECKL(awlseg->grains == awlseg->freeGrains + awlseg->oldGrains + awlseg->newGrains);
return TRUE;
}
@ -223,10 +226,12 @@ static Res AWLSegInit(Seg seg, Pool pool, Addr base, Size size,
BTResRange(awlseg->scanned, 0, bits);
BTResRange(awlseg->alloc, 0, bits);
SegSetRankAndSummary(seg, rankSet, RefSetUNIV);
awlseg->free = bits;
awlseg->sig = AWLSegSig;
awlseg->freeGrains = bits;
awlseg->oldGrains = (Count)0;
awlseg->newGrains = (Count)0;
awlseg->singleAccesses = 0;
awlStatSegInit(awlseg);
awlseg->sig = AWLSegSig;
AVERT(AWLSeg, awlseg);
return ResOK;
@ -500,7 +505,7 @@ static Bool AWLSegAlloc(Addr *baseReturn, Addr *limitReturn,
AVERT(AWLSeg, awlseg);
AVERT(AWL, awl);
AVER(size > 0);
AVER(size << awl->alignShift >= size);
AVER(AWLGrainsSize(awl, size) >= size);
seg = AWLSeg2Seg(awlseg);
if (size > SegSize(seg))
@ -508,9 +513,8 @@ static Bool AWLSegAlloc(Addr *baseReturn, Addr *limitReturn,
n = size >> awl->alignShift;
if (!BTFindLongResRange(&i, &j, awlseg->alloc, 0, awlseg->grains, n))
return FALSE;
awl->size += size;
*baseReturn = AddrAdd(SegBase(seg), i << awl->alignShift);
*limitReturn = AddrAdd(SegBase(seg), j << awl->alignShift);
*baseReturn = awlAddrOfIndex(SegBase(seg), awl, i);
*limitReturn = awlAddrOfIndex(SegBase(seg),awl, j);
return TRUE;
}
@ -575,8 +579,6 @@ static Res AWLInit(Pool pool, ArgList args)
goto failGenInit;
awl->alignShift = SizeLog2(PoolAlignment(pool));
awl->size = (Size)0;
awl->succAccesses = 0;
awlStatTotalInit(awl);
awl->sig = AWLSig;
@ -606,8 +608,13 @@ static void AWLFinish(Pool pool)
ring = &pool->segRing;
RING_FOR(node, ring, nextNode) {
Seg seg = SegOfPoolRing(node);
AVERT(Seg, seg);
SegFree(seg);
AWLSeg awlseg = Seg2AWLSeg(seg);
AVERT(AWLSeg, awlseg);
PoolGenFree(&awl->pgen, seg,
AWLGrainsSize(awl, awlseg->freeGrains),
AWLGrainsSize(awl, awlseg->oldGrains),
AWLGrainsSize(awl, awlseg->newGrains),
FALSE);
}
awl->sig = SigInvalid;
PoolGenFinish(&awl->pgen);
@ -646,10 +653,11 @@ static Res AWLBufferFill(Addr *baseReturn, Addr *limitReturn,
/* Only try to allocate in the segment if it is not already */
/* buffered, and has the same ranks as the buffer. */
if (SegBuffer(seg) == NULL && SegRankSet(seg) == BufferRankSet(buffer))
if (awlseg->free << awl->alignShift >= size
&& AWLSegAlloc(&base, &limit, awlseg, awl, size))
goto found;
if (SegBuffer(seg) == NULL
&& SegRankSet(seg) == BufferRankSet(buffer)
&& AWLGrainsSize(awl, awlseg->freeGrains) >= size
&& AWLSegAlloc(&base, &limit, awlseg, awl, size))
goto found;
}
/* No free space in existing awlsegs, so create new awlseg */
@ -673,7 +681,10 @@ found:
/* Shouldn't this depend on trace phase? @@@@ */
BTSetRange(awlseg->mark, i, j);
BTSetRange(awlseg->scanned, i, j);
awlseg->free -= j - i;
AVER(awlseg->freeGrains >= j - i);
awlseg->freeGrains -= j - i;
awlseg->newGrains += j - i;
PoolGenAccountForFill(&awl->pgen, AddrOffset(base, limit), FALSE);
}
*baseReturn = base;
*limitReturn = limit;
@ -709,7 +720,10 @@ static void AWLBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
AVER(i <= j);
if (i < j) {
BTResRange(awlseg->alloc, i, j);
awlseg->free += j - i;
AVER(awlseg->newGrains >= j - i);
awlseg->newGrains -= j - i;
awlseg->freeGrains += j - i;
PoolGenAccountForEmpty(&awl->pgen, AddrOffset(init, limit), FALSE);
}
}
@ -735,6 +749,7 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
AWL awl;
AWLSeg awlseg;
Buffer buffer;
Count uncondemned;
/* All parameters checked by generic PoolWhiten. */
@ -750,15 +765,13 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
if(buffer == NULL) {
awlRangeWhiten(awlseg, 0, awlseg->grains);
trace->condemned += SegSize(seg);
uncondemned = (Count)0;
} else {
/* Whiten everything except the buffer. */
Addr base = SegBase(seg);
Index scanLimitIndex = awlIndexOfAddr(base, awl,
BufferScanLimit(buffer));
Index limitIndex = awlIndexOfAddr(base, awl,
BufferLimit(buffer));
Index scanLimitIndex = awlIndexOfAddr(base, awl, BufferScanLimit(buffer));
Index limitIndex = awlIndexOfAddr(base, awl, BufferLimit(buffer));
uncondemned = limitIndex - scanLimitIndex;
awlRangeWhiten(awlseg, 0, scanLimitIndex);
awlRangeWhiten(awlseg, limitIndex, awlseg->grains);
@ -769,14 +782,12 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
AVER(BTIsSetRange(awlseg->mark, scanLimitIndex, limitIndex));
AVER(BTIsSetRange(awlseg->scanned, scanLimitIndex, limitIndex));
}
/* We didn't condemn the buffer, subtract it from the count. */
/* @@@@ We could subtract all the free grains. */
trace->condemned += SegSize(seg)
- AddrOffset(BufferScanLimit(buffer),
BufferLimit(buffer));
}
PoolGenAccountForAge(&awl->pgen, AWLGrainsSize(awl, awlseg->newGrains - uncondemned), FALSE);
awlseg->oldGrains += awlseg->newGrains - uncondemned;
awlseg->newGrains = uncondemned;
trace->condemned += AWLGrainsSize(awl, awlseg->oldGrains);
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
return ResOK;
}
@ -1088,12 +1099,12 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
Addr base;
AWL awl;
AWLSeg awlseg;
Buffer buffer;
Index i;
Count oldFree;
Format format;
Count reclaimedGrains = (Count)0;
Count preservedInPlaceCount = (Count)0;
Size preservedInPlaceSize = (Size)0;
Size freed; /* amount reclaimed, in bytes */
AVERT(Pool, pool);
AVERT(Trace, trace);
@ -1107,8 +1118,9 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
format = pool->format;
base = SegBase(seg);
buffer = SegBuffer(seg);
i = 0; oldFree = awlseg->free;
i = 0;
while(i < awlseg->grains) {
Addr p, q;
Index j;
@ -1117,16 +1129,13 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
++i;
continue;
}
p = AddrAdd(base, i << awl->alignShift);
if(SegBuffer(seg) != NULL) {
Buffer buffer = SegBuffer(seg);
if(p == BufferScanLimit(buffer)
&& BufferScanLimit(buffer) != BufferLimit(buffer))
{
i = awlIndexOfAddr(base, awl, BufferLimit(buffer));
continue;
}
p = awlAddrOfIndex(base, awl, i);
if (buffer != NULL
&& p == BufferScanLimit(buffer)
&& BufferScanLimit(buffer) != BufferLimit(buffer))
{
i = awlIndexOfAddr(base, awl, BufferLimit(buffer));
continue;
}
q = format->skip(AddrAdd(p, format->headerSize));
q = AddrSub(q, format->headerSize);
@ -1143,20 +1152,30 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
BTResRange(awlseg->mark, i, j);
BTSetRange(awlseg->scanned, i, j);
BTResRange(awlseg->alloc, i, j);
awlseg->free += j - i;
reclaimedGrains += j - i;
}
i = j;
}
AVER(i == awlseg->grains);
freed = (awlseg->free - oldFree) << awl->alignShift;
awl->size -= freed;
trace->reclaimSize += freed;
AVER(reclaimedGrains <= awlseg->grains);
AVER(awlseg->oldGrains >= reclaimedGrains);
awlseg->oldGrains -= reclaimedGrains;
awlseg->freeGrains += reclaimedGrains;
PoolGenAccountForReclaim(&awl->pgen, AWLGrainsSize(awl, reclaimedGrains), FALSE);
trace->reclaimSize += AWLGrainsSize(awl, reclaimedGrains);
trace->preservedInPlaceCount += preservedInPlaceCount;
trace->preservedInPlaceSize += preservedInPlaceSize;
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
/* @@@@ never frees a segment, see job001687. */
return;
if (awlseg->freeGrains == awlseg->grains && buffer == NULL)
/* No survivors */
PoolGenFree(&awl->pgen, seg,
AWLGrainsSize(awl, awlseg->freeGrains),
AWLGrainsSize(awl, awlseg->oldGrains),
AWLGrainsSize(awl, awlseg->newGrains),
FALSE);
}
@ -1304,7 +1323,7 @@ static Bool AWLCheck(AWL awl)
CHECKS(AWL, awl);
CHECKD(Pool, &awl->poolStruct);
CHECKL(awl->poolStruct.class == AWLPoolClassGet());
CHECKL((Align)1 << awl->alignShift == awl->poolStruct.alignment);
CHECKL(AWLGrainsSize(awl, (Count)1) == awl->poolStruct.alignment);
/* Nothing to check about succAccesses. */
CHECKL(FUNCHECK(awl->findDependent));
/* Don't bother to check stats. */

View file

@ -30,6 +30,7 @@ typedef struct LOStruct {
#define PoolPoolLO(pool) PARENT(LOStruct, poolStruct, pool)
#define LOPool(lo) (&(lo)->poolStruct)
#define LOGrainsSize(lo, grains) ((grains) << (lo)->alignShift)
/* forward declaration */
@ -47,8 +48,9 @@ typedef struct LOSegStruct {
LO lo; /* owning LO */
BT mark; /* mark bit table */
BT alloc; /* alloc bit table */
Count free; /* number of free grains */
Count newAlloc; /* number of grains allocated since last GC */
Count freeGrains; /* free grains */
Count oldGrains; /* grains allocated prior to last collection */
Count newGrains; /* grains allocated since last collection */
Sig sig; /* <code/misc.h#sig> */
} LOSegStruct;
@ -60,6 +62,7 @@ typedef struct LOSegStruct {
static Res loSegInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, ArgList args);
static void loSegFinish(Seg seg);
static Count loSegGrains(LOSeg loseg);
/* LOSegClass -- Class definition for LO segments */
@ -87,8 +90,8 @@ static Bool LOSegCheck(LOSeg loseg)
CHECKL(loseg->mark != NULL);
CHECKL(loseg->alloc != NULL);
/* Could check exactly how many bits are set in the alloc table. */
CHECKL(loseg->free + loseg->newAlloc
<= SegSize(LOSegSeg(loseg)) >> loseg->lo->alignShift);
CHECKL(loseg->freeGrains + loseg->oldGrains + loseg->newGrains
== SegSize(LOSegSeg(loseg)) >> loseg->lo->alignShift);
return TRUE;
}
@ -105,7 +108,7 @@ static Res loSegInit(Seg seg, Pool pool, Addr base, Size size,
Size tablebytes; /* # bytes in each control array */
Arena arena;
/* number of bits needed in each control array */
Count bits;
Count grains;
void *p;
AVERT(Seg, seg);
@ -125,8 +128,8 @@ static Res loSegInit(Seg seg, Pool pool, Addr base, Size size,
AVER(SegWhite(seg) == TraceSetEMPTY);
bits = size >> lo->alignShift;
tablebytes = BTSize(bits);
grains = size >> lo->alignShift;
tablebytes = BTSize(grains);
res = ControlAlloc(&p, arena, tablebytes, reservoirPermit);
if(res != ResOK)
goto failMarkTable;
@ -135,11 +138,12 @@ static Res loSegInit(Seg seg, Pool pool, Addr base, Size size,
if(res != ResOK)
goto failAllocTable;
loseg->alloc = p;
BTResRange(loseg->alloc, 0, bits);
BTSetRange(loseg->mark, 0, bits);
BTResRange(loseg->alloc, 0, grains);
BTSetRange(loseg->mark, 0, grains);
loseg->lo = lo;
loseg->free = bits;
loseg->newAlloc = (Count)0;
loseg->freeGrains = grains;
loseg->oldGrains = (Count)0;
loseg->newGrains = (Count)0;
loseg->sig = LOSegSig;
AVERT(LOSeg, loseg);
return ResOK;
@ -162,7 +166,7 @@ static void loSegFinish(Seg seg)
Pool pool;
Arena arena;
Size tablesize;
Count bits;
Count grains;
AVERT(Seg, seg);
loseg = SegLOSeg(seg);
@ -172,8 +176,8 @@ static void loSegFinish(Seg seg)
AVERT(LO, lo);
arena = PoolArena(pool);
bits = SegSize(seg) >> lo->alignShift;
tablesize = BTSize(bits);
grains = loSegGrains(loseg);
tablesize = BTSize(grains);
ControlFree(arena, (Addr)loseg->alloc, tablesize);
ControlFree(arena, (Addr)loseg->mark, tablesize);
loseg->sig = SigInvalid;
@ -185,7 +189,7 @@ static void loSegFinish(Seg seg)
ATTRIBUTE_UNUSED
static Count loSegBits(LOSeg loseg)
static Count loSegGrains(LOSeg loseg)
{
LO lo;
Size size;
@ -204,7 +208,7 @@ static Count loSegBits(LOSeg loseg)
(AddrOffset((base), (p)) >> (lo)->alignShift)
#define loAddrOfIndex(base, lo, i) \
(AddrAdd((base), (i) << (lo)->alignShift))
(AddrAdd((base), LOGrainsSize((lo), (i))))
/* loSegFree -- mark block from baseIndex to limitIndex free */
@ -213,12 +217,11 @@ static void loSegFree(LOSeg loseg, Index baseIndex, Index limitIndex)
{
AVERT(LOSeg, loseg);
AVER(baseIndex < limitIndex);
AVER(limitIndex <= loSegBits(loseg));
AVER(limitIndex <= loSegGrains(loseg));
AVER(BTIsSetRange(loseg->alloc, baseIndex, limitIndex));
BTResRange(loseg->alloc, baseIndex, limitIndex);
BTSetRange(loseg->mark, baseIndex, limitIndex);
loseg->free += limitIndex - baseIndex;
}
@ -233,7 +236,7 @@ static Bool loSegFindFree(Addr *bReturn, Addr *lReturn,
LO lo;
Seg seg;
Count agrains;
Count bits;
Count grains;
Addr segBase;
AVER(bReturn != NULL);
@ -248,23 +251,22 @@ static Bool loSegFindFree(Addr *bReturn, Addr *lReturn,
/* of the allocation request */
agrains = size >> lo->alignShift;
AVER(agrains >= 1);
AVER(agrains <= loseg->free);
AVER(agrains <= loseg->freeGrains);
AVER(size <= SegSize(seg));
if(SegBuffer(seg) != NULL) {
if(SegBuffer(seg) != NULL)
/* Don't bother trying to allocate from a buffered segment */
return FALSE;
}
bits = SegSize(seg) >> lo->alignShift;
grains = loSegGrains(loseg);
if(!BTFindLongResRange(&baseIndex, &limitIndex, loseg->alloc,
0, bits, agrains)) {
0, grains, agrains)) {
return FALSE;
}
/* check that BTFindLongResRange really did find enough space */
AVER(baseIndex < limitIndex);
AVER((limitIndex-baseIndex) << lo->alignShift >= size);
AVER(LOGrainsSize(lo, limitIndex - baseIndex) >= size);
segBase = SegBase(seg);
*bReturn = loAddrOfIndex(segBase, lo, baseIndex);
*lReturn = loAddrOfIndex(segBase, lo, limitIndex);
@ -312,7 +314,7 @@ static void loSegReclaim(LOSeg loseg, Trace trace)
{
Addr p, base, limit;
Bool marked;
Count bytesReclaimed = (Count)0;
Count reclaimedGrains = (Count)0;
Seg seg;
LO lo;
Format format;
@ -370,23 +372,30 @@ static void loSegReclaim(LOSeg loseg, Trace trace)
Index j = loIndexOfAddr(base, lo, q);
/* This object is not marked, so free it */
loSegFree(loseg, i, j);
bytesReclaimed += AddrOffset(p, q);
reclaimedGrains += j - i;
}
p = q;
}
AVER(p == limit);
AVER(bytesReclaimed <= SegSize(seg));
trace->reclaimSize += bytesReclaimed;
lo->pgen.totalSize -= bytesReclaimed;
AVER(reclaimedGrains <= loSegGrains(loseg));
AVER(loseg->oldGrains >= reclaimedGrains);
loseg->oldGrains -= reclaimedGrains;
loseg->freeGrains += reclaimedGrains;
PoolGenAccountForReclaim(&lo->pgen, LOGrainsSize(lo, reclaimedGrains), FALSE);
trace->reclaimSize += LOGrainsSize(lo, reclaimedGrains);
trace->preservedInPlaceCount += preservedInPlaceCount;
trace->preservedInPlaceSize += preservedInPlaceSize;
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
if(!marked) {
SegFree(seg);
}
if (!marked)
PoolGenFree(&lo->pgen, seg,
LOGrainsSize(lo, loseg->freeGrains),
LOGrainsSize(lo, loseg->oldGrains),
LOGrainsSize(lo, loseg->newGrains),
FALSE);
}
/* This walks over _all_ objects in the heap, whether they are */
@ -399,7 +408,7 @@ static void LOWalk(Pool pool, Seg seg,
Addr base;
LO lo;
LOSeg loseg;
Index i, limit;
Index i, grains;
Format format;
AVERT(Pool, pool);
@ -416,10 +425,10 @@ static void LOWalk(Pool pool, Seg seg,
AVERT(Format, format);
base = SegBase(seg);
limit = SegSize(seg) >> lo->alignShift;
grains = loSegGrains(loseg);
i = 0;
while(i < limit) {
while(i < grains) {
/* object is a slight misnomer because it might point to a */
/* free grain */
Addr object = loAddrOfIndex(base, lo, i);
@ -531,10 +540,12 @@ static void LOFinish(Pool pool)
RING_FOR(node, &pool->segRing, nextNode) {
Seg seg = SegOfPoolRing(node);
LOSeg loseg = SegLOSeg(seg);
AVERT(LOSeg, loseg);
UNUSED(loseg); /* <code/mpm.c#check.unused> */
SegFree(seg);
PoolGenFree(&lo->pgen, seg,
LOGrainsSize(lo, loseg->freeGrains),
LOGrainsSize(lo, loseg->oldGrains),
LOGrainsSize(lo, loseg->newGrains),
FALSE);
}
PoolGenFinish(&lo->pgen);
@ -569,7 +580,7 @@ static Res LOBufferFill(Addr *baseReturn, Addr *limitReturn,
Seg seg = SegOfPoolRing(node);
loseg = SegLOSeg(seg);
AVERT(LOSeg, loseg);
if((loseg->free << lo->alignShift) >= size
if(LOGrainsSize(lo, loseg->freeGrains) >= size
&& loSegFindFree(&base, &limit, loseg, size))
goto found;
}
@ -594,12 +605,12 @@ found:
AVER(BTIsResRange(loseg->alloc, baseIndex, limitIndex));
AVER(BTIsSetRange(loseg->mark, baseIndex, limitIndex));
BTSetRange(loseg->alloc, baseIndex, limitIndex);
loseg->free -= limitIndex - baseIndex;
loseg->newAlloc += limitIndex - baseIndex;
AVER(loseg->freeGrains >= limitIndex - baseIndex);
loseg->freeGrains -= limitIndex - baseIndex;
loseg->newGrains += limitIndex - baseIndex;
}
lo->pgen.totalSize += AddrOffset(base, limit);
lo->pgen.newSize += AddrOffset(base, limit);
PoolGenAccountForFill(&lo->pgen, AddrOffset(base, limit), FALSE);
*baseReturn = base;
*limitReturn = limit;
@ -618,7 +629,7 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
Addr base, segBase;
Seg seg;
LOSeg loseg;
Index baseIndex, initIndex, limitIndex;
Index initIndex, limitIndex;
AVERT(Pool, pool);
lo = PARENT(LOStruct, poolStruct, pool);
@ -643,21 +654,17 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
AVER(init <= SegLimit(seg));
/* convert base, init, and limit, to quantum positions */
baseIndex = loIndexOfAddr(segBase, lo, base);
initIndex = loIndexOfAddr(segBase, lo, init);
limitIndex = loIndexOfAddr(segBase, lo, limit);
/* Record the unused portion at the end of the buffer */
/* as being free. */
AVER(baseIndex == limitIndex
|| BTIsSetRange(loseg->alloc, baseIndex, limitIndex));
if(initIndex != limitIndex) {
/* Free the unused portion of the buffer (this must be "new", since
* it's not condemned). */
loSegFree(loseg, initIndex, limitIndex);
lo->pgen.totalSize -= AddrOffset(init, limit);
/* All of the buffer must be new, since buffered segs are not condemned. */
AVER(loseg->newAlloc >= limitIndex - baseIndex);
loseg->newAlloc -= limitIndex - initIndex;
lo->pgen.newSize -= AddrOffset(init, limit);
AVER(loseg->newGrains >= limitIndex - initIndex);
loseg->newGrains -= limitIndex - initIndex;
loseg->freeGrains += limitIndex - initIndex;
PoolGenAccountForEmpty(&lo->pgen, AddrOffset(init, limit), FALSE);
}
}
@ -667,7 +674,9 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
static Res LOWhiten(Pool pool, Trace trace, Seg seg)
{
LO lo;
Count bits;
LOSeg loseg;
Buffer buffer;
Count grains, uncondemned;
AVERT(Pool, pool);
lo = PoolPoolLO(pool);
@ -677,21 +686,32 @@ static Res LOWhiten(Pool pool, Trace trace, Seg seg)
AVERT(Seg, seg);
AVER(SegWhite(seg) == TraceSetEMPTY);
if(SegBuffer(seg) == NULL) {
LOSeg loseg = SegLOSeg(seg);
AVERT(LOSeg, loseg);
loseg = SegLOSeg(seg);
AVERT(LOSeg, loseg);
grains = loSegGrains(loseg);
bits = SegSize(seg) >> lo->alignShift;
/* Allocated objects should be whitened, free areas should */
/* be left "black". */
BTCopyInvertRange(loseg->alloc, loseg->mark, 0, bits);
/* @@@@ We could subtract all the free grains. */
trace->condemned += SegSize(seg);
lo->pgen.newSize -= loseg->newAlloc << lo->alignShift;
loseg->newAlloc = (Count)0;
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
/* Whiten allocated objects; leave free areas black. */
buffer = SegBuffer(seg);
if (buffer != NULL) {
Addr base = SegBase(seg);
Index scanLimitIndex = loIndexOfAddr(base, lo, BufferScanLimit(buffer));
Index limitIndex = loIndexOfAddr(base, lo, BufferLimit(buffer));
uncondemned = limitIndex - scanLimitIndex;
if (0 < scanLimitIndex)
BTCopyInvertRange(loseg->alloc, loseg->mark, 0, scanLimitIndex);
if (limitIndex < grains)
BTCopyInvertRange(loseg->alloc, loseg->mark, limitIndex, grains);
} else {
uncondemned = (Count)0;
BTCopyInvertRange(loseg->alloc, loseg->mark, 0, grains);
}
PoolGenAccountForAge(&lo->pgen, LOGrainsSize(lo, loseg->newGrains - uncondemned), FALSE);
loseg->oldGrains += loseg->newGrains - uncondemned;
loseg->newGrains = uncondemned;
trace->condemned += LOGrainsSize(lo, loseg->oldGrains);
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
return ResOK;
}
@ -815,7 +835,7 @@ static Bool LOCheck(LO lo)
CHECKD(Pool, &lo->poolStruct);
CHECKL(lo->poolStruct.class == EnsureLOPoolClass());
CHECKL(ShiftCheck(lo->alignShift));
CHECKL((Align)1 << lo->alignShift == PoolAlignment(&lo->poolStruct));
CHECKL(LOGrainsSize(lo, (Count)1) == PoolAlignment(&lo->poolStruct));
CHECKD(PoolGen, &lo->pgen);
return TRUE;
}

View file

@ -322,7 +322,7 @@ static Res MFSDescribe(Pool pool, mps_lib_FILE *stream)
DEFINE_POOL_CLASS(MFSPoolClass, this)
{
INHERIT_CLASS(this, AbstractAllocFreePoolClass);
INHERIT_CLASS(this, AbstractPoolClass);
this->name = "MFS";
this->size = sizeof(MFSStruct);
this->offset = offsetof(MFSStruct, poolStruct);

View file

@ -861,7 +861,6 @@ DEFINE_POOL_CLASS(MRGPoolClass, this)
this->name = "MRG";
this->size = sizeof(MRGStruct);
this->offset = offsetof(MRGStruct, poolStruct);
this->attr |= AttrSCAN;
this->init = MRGInit;
this->finish = MRGFinish;
this->grey = PoolTrivGrey;

View file

@ -217,6 +217,7 @@ static void MVDebugVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs)
static Res MVInit(Pool pool, ArgList args)
{
Align align = MV_ALIGN_DEFAULT;
Size extendBy = MV_EXTEND_BY_DEFAULT;
Size avgSize = MV_AVG_SIZE_DEFAULT;
Size maxSize = MV_MAX_SIZE_DEFAULT;
@ -226,6 +227,8 @@ static Res MVInit(Pool pool, ArgList args)
Res res;
ArgStruct arg;
if (ArgPick(&arg, args, MPS_KEY_ALIGN))
align = arg.val.align;
if (ArgPick(&arg, args, MPS_KEY_EXTEND_BY))
extendBy = arg.val.size;
if (ArgPick(&arg, args, MPS_KEY_MEAN_SIZE))
@ -233,12 +236,14 @@ static Res MVInit(Pool pool, ArgList args)
if (ArgPick(&arg, args, MPS_KEY_MAX_SIZE))
maxSize = arg.val.size;
AVERT(Align, align);
AVER(extendBy > 0);
AVER(avgSize > 0);
AVER(avgSize <= extendBy);
AVER(maxSize > 0);
AVER(extendBy <= maxSize);
pool->alignment = align;
mv = Pool2MV(pool);
arena = PoolArena(pool);
@ -626,6 +631,7 @@ static void MVFree(Pool pool, Addr old, Size size)
AVERT(MV, mv);
AVER(old != (Addr)0);
AVER(AddrIsAligned(old, pool->alignment));
AVER(size > 0);
size = SizeAlignUp(size, pool->alignment);
@ -791,7 +797,6 @@ static Res MVDescribe(Pool pool, mps_lib_FILE *stream)
DEFINE_POOL_CLASS(MVPoolClass, this)
{
INHERIT_CLASS(this, AbstractBufferPoolClass);
PoolClassMixInAllocFree(this);
this->name = "MV";
this->size = sizeof(MVStruct);
this->offset = offsetof(MVStruct, poolStruct);

View file

@ -139,7 +139,6 @@ DEFINE_POOL_CLASS(MVTPoolClass, this)
this->name = "MVT";
this->size = sizeof(MVTStruct);
this->offset = offsetof(MVTStruct, poolStruct);
this->attr |= AttrFREE;
this->varargs = MVTVarargs;
this->init = MVTInit;
this->finish = MVTFinish;
@ -255,7 +254,12 @@ static Res MVTInit(Pool pool, ArgList args)
fragLimit = (Count)(arg.val.d * 100);
}
AVER(SizeIsAligned(align, MPS_PF_ALIGN));
AVERT(Align, align);
/* This restriction on the alignment is necessary because of the use
* of a Freelist to store the free address ranges in low-memory
* situations. See <design/freelist/#impl.grain.align>.
*/
AVER(AlignIsAligned(align, FreelistMinimumAlignment));
AVER(0 < minSize);
AVER(minSize <= meanSize);
AVER(meanSize <= maxSize);

View file

@ -465,7 +465,7 @@ static Res MVFFInit(Pool pool, ArgList args)
{
Size extendBy = MVFF_EXTEND_BY_DEFAULT;
Size avgSize = MVFF_AVG_SIZE_DEFAULT;
Size align = MVFF_ALIGN_DEFAULT;
Align align = MVFF_ALIGN_DEFAULT;
Bool slotHigh = MVFF_SLOT_HIGH_DEFAULT;
Bool arenaHigh = MVFF_ARENA_HIGH_DEFAULT;
Bool firstFit = MVFF_FIRST_FIT_DEFAULT;
@ -507,9 +507,12 @@ static Res MVFFInit(Pool pool, ArgList args)
AVER(extendBy > 0); /* .arg.check */
AVER(avgSize > 0); /* .arg.check */
AVER(avgSize <= extendBy); /* .arg.check */
AVER(spare >= 0.0); /* .arg.check */
AVER(spare <= 1.0); /* .arg.check */
AVER(SizeIsAligned(align, MPS_PF_ALIGN));
AVERT(Align, align);
/* This restriction on the alignment is necessary because of the use
* of a Freelist to store the free address ranges in low-memory
* situations. <design/freelist/#impl.grain.align>.
*/
AVER(AlignIsAligned(align, FreelistMinimumAlignment));
AVERT(Bool, slotHigh);
AVERT(Bool, arenaHigh);
AVERT(Bool, firstFit);
@ -687,7 +690,7 @@ static Res MVFFDescribe(Pool pool, mps_lib_FILE *stream)
DEFINE_POOL_CLASS(MVFFPoolClass, this)
{
INHERIT_CLASS(this, AbstractAllocFreePoolClass);
INHERIT_CLASS(this, AbstractPoolClass);
PoolClassMixInBuffer(this);
this->name = "MVFF";
this->size = sizeof(MVFFStruct);

View file

@ -270,7 +270,7 @@ DEFINE_POOL_CLASS(NPoolClass, this)
this->name = "N";
this->size = sizeof(PoolNStruct);
this->offset = offsetof(PoolNStruct, poolStruct);
this->attr |= (AttrALLOC | AttrBUF | AttrFREE | AttrGC | AttrSCAN);
this->attr |= AttrGC;
this->init = NInit;
this->finish = NFinish;
this->alloc = NAlloc;

View file

@ -117,10 +117,10 @@ Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount,
/* to be large enough, but that gets complicated, if you have to */
/* merge classes because of the adjustment. */
for (i = 0; i < classesCount; ++i) {
AVER(classes[i]._block_size > 0);
AVER(SizeIsAligned(classes[i]._block_size, PoolAlignment(pool)));
AVER(prevSize < classes[i]._block_size);
prevSize = classes[i]._block_size;
AVER(classes[i].mps_block_size > 0);
AVER(SizeIsAligned(classes[i].mps_block_size, PoolAlignment(pool)));
AVER(prevSize < classes[i].mps_block_size);
prevSize = classes[i].mps_block_size;
/* no restrictions on count */
/* no restrictions on frequency */
}
@ -128,7 +128,7 @@ Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount,
/* Calculate frequency scale */
for (i = 0; i < classesCount; ++i) {
unsigned oldFreq = totalFreq;
totalFreq += classes[i]._frequency;
totalFreq += classes[i].mps_frequency;
AVER(oldFreq <= totalFreq); /* check for overflow */
UNUSED(oldFreq); /* <code/mpm.c#check.unused> */
}
@ -136,10 +136,10 @@ Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount,
/* Find middle one */
totalFreq /= 2;
for (i = 0; i < classesCount; ++i) {
if (totalFreq < classes[i]._frequency) break;
totalFreq -= classes[i]._frequency;
if (totalFreq < classes[i].mps_frequency) break;
totalFreq -= classes[i].mps_frequency;
}
if (totalFreq <= classes[i]._frequency / 2)
if (totalFreq <= classes[i].mps_frequency / 2)
middleIndex = i;
else
middleIndex = i + 1; /* there must exist another class at i+1 */
@ -155,9 +155,9 @@ Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount,
/* It's important this matches SACFind. */
esac = ExternalSACOfSAC(sac);
for (j = middleIndex + 1, i = 0; j < classesCount; ++j, i += 2) {
esac->_freelists[i]._size = classes[j]._block_size;
esac->_freelists[i]._size = classes[j].mps_block_size;
esac->_freelists[i]._count = 0;
esac->_freelists[i]._count_max = classes[j]._cached_count;
esac->_freelists[i]._count_max = classes[j].mps_cached_count;
esac->_freelists[i]._blocks = NULL;
}
esac->_freelists[i]._size = SizeMAX;
@ -165,19 +165,19 @@ Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount,
esac->_freelists[i]._count_max = 0;
esac->_freelists[i]._blocks = NULL;
for (j = middleIndex, i = 1; j > 0; --j, i += 2) {
esac->_freelists[i]._size = classes[j-1]._block_size;
esac->_freelists[i]._size = classes[j-1].mps_block_size;
esac->_freelists[i]._count = 0;
esac->_freelists[i]._count_max = classes[j]._cached_count;
esac->_freelists[i]._count_max = classes[j].mps_cached_count;
esac->_freelists[i]._blocks = NULL;
}
esac->_freelists[i]._size = 0;
esac->_freelists[i]._count = 0;
esac->_freelists[i]._count_max = classes[j]._cached_count;
esac->_freelists[i]._count_max = classes[j].mps_cached_count;
esac->_freelists[i]._blocks = NULL;
/* finish init */
esac->_trapped = FALSE;
esac->_middle = classes[middleIndex]._block_size;
esac->_middle = classes[middleIndex].mps_block_size;
sac->pool = pool;
sac->classesCount = classesCount;
sac->middleIndex = middleIndex;

View file

@ -7,6 +7,7 @@
#include "mpscmv.h"
#include "mpscmvff.h"
#include "mpscmfs.h"
#include "mpslib.h"
#include "mpsavm.h"
#include "mps.h"
@ -15,9 +16,7 @@
#include "mpslib.h"
#include <stdio.h>
#include "mpstd.h"
#include <stdlib.h>
#include <stdarg.h>
#include <time.h>
@ -28,9 +27,6 @@
#define testSetSIZE 200
#define testLOOPS 10
#define topClassSIZE 0xA00
#define classCOUNT 4
/* make -- allocate an object */
@ -45,25 +41,36 @@ static mps_res_t make(mps_addr_t *p, mps_sac_t sac, size_t size)
/* stress -- create a pool of the requested type and allocate in it */
static mps_res_t stress(mps_class_t class,
size_t classes_count, mps_sac_classes_s *classes,
size_t (*size)(size_t i), mps_arena_t arena, ...)
static mps_res_t stress(mps_arena_t arena, mps_align_t align,
size_t (*size)(size_t i),
const char *name, mps_class_t pool_class,
mps_arg_s *args)
{
mps_res_t res;
mps_pool_t pool;
mps_sac_t sac;
va_list arg;
size_t i, k;
int *ps[testSetSIZE];
size_t ss[testSetSIZE];
mps_sac_classes_s classes[4] = {
{1, 1, 1},
{2, 1, 2},
{16, 9, 5},
{100, 9, 4},
};
size_t classes_count = sizeof classes / sizeof *classes;
for (i = 0; i < classes_count; ++i) {
classes[i].mps_block_size *= alignUp(align, sizeof(void *));
}
va_start(arg, arena);
res = mps_pool_create_v(&pool, arena, class, arg);
va_end(arg);
printf("%s\n", name);
res = mps_pool_create_k(&pool, arena, pool_class, args);
if (res != MPS_RES_OK)
return res;
die(mps_sac_create(&sac, pool, classes_count, classes), "SACCreate");
die(mps_sac_create(&sac, pool, classes_count, classes),
"SACCreate");
/* allocate a load of objects */
for (i = 0; i < testSetSIZE; ++i) {
@ -125,9 +132,9 @@ static mps_res_t stress(mps_class_t class,
}
/* randomSize8 -- produce sizes both latge and small */
/* randomSize -- produce sizes both large and small */
static size_t randomSize8(size_t i)
static size_t randomSize(size_t i)
{
size_t maxSize = 2 * 160 * 0x2000;
size_t size;
@ -138,58 +145,97 @@ static size_t randomSize8(size_t i)
}
/* testInArena -- test all the pool classes in the given arena */
/* fixedSize -- produce always the same size */
static size_t fixedSizeSize = 0;
static size_t fixedSize(size_t i)
{
testlib_unused(i);
return fixedSizeSize;
}
static mps_pool_debug_option_s debugOptions = {
/* .fence_template = */ (const void *)"postpostpostpost",
/* .fence_size = */ MPS_PF_ALIGN,
/* .free_template = */ (const void *)"DEAD",
/* .fence_template = */ "post",
/* .fence_size = */ 4,
/* .free_template = */ "DEAD",
/* .free_size = */ 4
};
static mps_sac_classes_s classes[4] = {
{MPS_PF_ALIGN, 1, 1},
{MPS_PF_ALIGN * 2, 1, 2},
{128 + MPS_PF_ALIGN, 9, 5},
{topClassSIZE, 9, 4}
};
static void testInArena(mps_arena_t arena)
/* testInArena -- test all the pool classes in the given arena */
static void testInArena(mps_arena_class_t arena_class, mps_arg_s *arena_args)
{
printf("MVFF\n\n");
die(stress(mps_class_mvff(), classCOUNT, classes, randomSize8, arena,
(size_t)65536, (size_t)32, (mps_align_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE),
"stress MVFF");
printf("MV debug\n\n");
die(stress(mps_class_mv_debug(), classCOUNT, classes, randomSize8, arena,
&debugOptions, (size_t)65536, (size_t)32, (size_t)65536),
"stress MV debug");
printf("MV\n\n");
die(stress(mps_class_mv(), classCOUNT, classes, randomSize8, arena,
(size_t)65536, (size_t)32, (size_t)65536),
"stress MV");
mps_arena_t arena;
die(mps_arena_create_k(&arena, arena_class, arena_args),
"mps_arena_create");
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE);
die(stress(arena, align, randomSize, "MVFF", mps_class_mvff(), args),
"stress MVFF");
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, &debugOptions);
die(stress(arena, align, randomSize, "MVFF debug",
mps_class_mvff_debug(), args),
"stress MVFF debug");
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
mps_align_t align = (mps_align_t)1 << (rnd() % 6);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
die(stress(arena, align, randomSize, "MV", mps_class_mv(), args),
"stress MV");
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
mps_align_t align = (mps_align_t)1 << (rnd() % 6);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, &debugOptions);
die(stress(arena, align, randomSize, "MV debug",
mps_class_mv_debug(), args),
"stress MV debug");
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
fixedSizeSize = MPS_PF_ALIGN * (1 + rnd() % 100);
MPS_ARGS_ADD(args, MPS_KEY_MFS_UNIT_SIZE, fixedSizeSize);
die(stress(arena, fixedSizeSize, fixedSize, "MFS", mps_class_mfs(), args),
"stress MFS");
} MPS_ARGS_END(args);
mps_arena_destroy(arena);
}
int main(int argc, char *argv[])
{
mps_arena_t arena;
testlib_init(argc, argv);
die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE),
"mps_arena_create");
testInArena(arena);
mps_arena_destroy(arena);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE);
testInArena(mps_arena_class_vm(), args);
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE);
die(mps_arena_create_k(&arena, mps_arena_class_vm(), args),
"mps_arena_create");
testInArena(mps_arena_class_vm(), args);
} MPS_ARGS_END(args);
testInArena(arena);
mps_arena_destroy(arena);
printf("%s: Conclusion: Failed to find any defects.\n", argv[0]);
return 0;

View file

@ -603,6 +603,10 @@ Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at,
AVER(at < limit);
AVERT(Bool, withReservoirPermit);
/* Can only split a buffered segment if the entire buffer is below
* the split point. */
AVER(SegBuffer(seg) == NULL || BufferLimit(SegBuffer(seg)) <= at);
ShieldFlush(arena); /* see <design/seg/#split-merge.shield> */
/* Allocate the new segment object from the control pool */
@ -701,8 +705,6 @@ Bool SegCheck(Seg seg)
CHECKL(seg->sm == AccessSetEMPTY);
CHECKL(seg->pm == AccessSetEMPTY);
} else {
/* Segments with ranks may only belong to scannable pools. */
CHECKL(PoolHasAttr(pool, AttrSCAN));
/* <design/seg/#field.rankSet.single>: The Tracer only permits */
/* one rank per segment [ref?] so this field is either empty or a */
/* singleton. */

View file

@ -339,7 +339,7 @@ static Res AMSTInit(Pool pool, ArgList args)
AVERT(Pool, pool);
AVERT(ArgList, args);
if (ArgPick(&arg, args, MPS_KEY_CHAIN))
chain = arg.val.chain;
else {
@ -402,7 +402,7 @@ static Bool AMSSegIsFree(Seg seg)
AMSSeg amsseg;
AVERT(Seg, seg);
amsseg = Seg2AMSSeg(seg);
return(amsseg->free == amsseg->grains);
return amsseg->freeGrains == amsseg->grains;
}
@ -436,7 +436,7 @@ static Bool AMSSegRegionIsFree(Seg seg, Addr base, Addr limit)
* Used as a means of overriding the behaviour of AMSBufferFill.
* The code is similar to AMSBufferEmpty.
*/
static void AMSUnallocateRange(Seg seg, Addr base, Addr limit)
static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit)
{
AMSSeg amsseg;
Index baseIndex, limitIndex;
@ -464,8 +464,10 @@ static void AMSUnallocateRange(Seg seg, Addr base, Addr limit)
BTResRange(amsseg->allocTable, baseIndex, limitIndex);
}
}
amsseg->free += limitIndex - baseIndex;
amsseg->newAlloc -= limitIndex - baseIndex;
amsseg->freeGrains += limitIndex - baseIndex;
AVER(amsseg->newGrains >= limitIndex - baseIndex);
amsseg->newGrains -= limitIndex - baseIndex;
PoolGenAccountForEmpty(&ams->pgen, AddrOffset(base, limit), FALSE);
}
@ -474,7 +476,7 @@ static void AMSUnallocateRange(Seg seg, Addr base, Addr limit)
* Used as a means of overriding the behaviour of AMSBufferFill.
* The code is similar to AMSUnallocateRange.
*/
static void AMSAllocateRange(Seg seg, Addr base, Addr limit)
static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit)
{
AMSSeg amsseg;
Index baseIndex, limitIndex;
@ -502,9 +504,10 @@ static void AMSAllocateRange(Seg seg, Addr base, Addr limit)
BTSetRange(amsseg->allocTable, baseIndex, limitIndex);
}
}
AVER(amsseg->free >= limitIndex - baseIndex);
amsseg->free -= limitIndex - baseIndex;
amsseg->newAlloc += limitIndex - baseIndex;
AVER(amsseg->freeGrains >= limitIndex - baseIndex);
amsseg->freeGrains -= limitIndex - baseIndex;
amsseg->newGrains += limitIndex - baseIndex;
PoolGenAccountForFill(&ams->pgen, AddrOffset(base, limit), FALSE);
}
@ -529,6 +532,7 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn,
PoolClass super;
Addr base, limit;
Arena arena;
AMS ams;
AMST amst;
Bool b;
Seg seg;
@ -540,6 +544,7 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn,
AVER(limitReturn != NULL);
/* other parameters are checked by next method */
arena = PoolArena(pool);
ams = Pool2AMS(pool);
amst = Pool2AMST(pool);
/* call next method */
@ -561,14 +566,14 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn,
Seg mergedSeg;
Res mres;
AMSUnallocateRange(seg, base, limit);
AMSUnallocateRange(ams, seg, base, limit);
mres = SegMerge(&mergedSeg, segLo, seg, withReservoirPermit);
if (ResOK == mres) { /* successful merge */
AMSAllocateRange(mergedSeg, base, limit);
AMSAllocateRange(ams, mergedSeg, base, limit);
/* leave range as-is */
} else { /* failed to merge */
AVER(amst->failSegs); /* deliberate fails only */
AMSAllocateRange(seg, base, limit);
AMSAllocateRange(ams, seg, base, limit);
}
}
@ -579,13 +584,13 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn,
Addr mid = AddrAdd(base, half);
Seg segLo, segHi;
Res sres;
AMSUnallocateRange(seg, mid, limit);
AMSUnallocateRange(ams, seg, mid, limit);
sres = SegSplit(&segLo, &segHi, seg, mid, withReservoirPermit);
if (ResOK == sres) { /* successful split */
limit = mid; /* range is lower segment */
} else { /* failed to split */
AVER(amst->failSegs); /* deliberate fails only */
AMSAllocateRange(seg, mid, limit);
AMSAllocateRange(ams, seg, mid, limit);
}
}

View file

@ -634,33 +634,6 @@ failRootFlip:
return res;
}
/* traceCopySizes -- preserve size information for later use
*
* A PoolGen's newSize is important information that we want to emit in
* a diagnostic message at TraceStart. In order to do that we must copy
* the information before Whiten changes it. This function does that.
*/
static void traceCopySizes(Trace trace)
{
Ring node, nextNode;
Index i;
Arena arena = trace->arena;
RING_FOR(node, &arena->chainRing, nextNode) {
Chain chain = RING_ELT(Chain, chainRing, node);
for(i = 0; i < chain->genCount; ++i) {
Ring n, nn;
GenDesc desc = &chain->gens[i];
RING_FOR(n, &desc->locusRing, nn) {
PoolGen gen = RING_ELT(PoolGen, genRing, n);
gen->newSizeAtCreate = gen->newSize;
}
}
}
return;
}
/* TraceCreate -- create a Trace object
*
@ -677,6 +650,17 @@ static void traceCopySizes(Trace trace)
* This code is written to be adaptable to allocating Trace objects
* dynamically. */
static void TraceCreatePoolGen(GenDesc gen)
{
Ring n, nn;
RING_FOR(n, &gen->locusRing, nn) {
PoolGen pgen = RING_ELT(PoolGen, genRing, n);
EVENT11(TraceCreatePoolGen, gen, gen->capacity, gen->mortality, gen->zones,
pgen->pool, pgen->totalSize, pgen->freeSize, pgen->newSize,
pgen->oldSize, pgen->newDeferredSize, pgen->oldDeferredSize);
}
}
Res TraceCreate(Trace *traceReturn, Arena arena, int why)
{
TraceId ti;
@ -747,7 +731,24 @@ found:
/* .. _request.dylan.160098: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/160098 */
ShieldSuspend(arena);
traceCopySizes(trace);
STATISTIC_STAT ({
/* Iterate over all chains, all GenDescs within a chain, and all
* PoolGens within a GenDesc. */
Ring node;
Ring nextNode;
RING_FOR(node, &arena->chainRing, nextNode) {
Chain chain = RING_ELT(Chain, chainRing, node);
Index i;
for (i = 0; i < chain->genCount; ++i) {
GenDesc gen = &chain->gens[i];
TraceCreatePoolGen(gen);
}
}
/* Now do topgen GenDesc, and all PoolGens within it. */
TraceCreatePoolGen(&arena->topGen);
});
*traceReturn = trace;
return ResOK;
@ -1564,9 +1565,9 @@ double TraceWorkFactor = 0.25;
*
* TraceStart should be passed a trace with state TraceINIT, i.e.,
* recently returned from TraceCreate, with some condemned segments
* added. mortality is the fraction of the condemned set expected to
* survive. finishingTime is relative to the current polling clock, see
* <design/arena/#poll.clock>.
* added. mortality is the fraction of the condemned set expected not
* to survive. finishingTime is relative to the current polling clock,
* see <design/arena/#poll.clock>.
*
* .start.black: All segments are black w.r.t. a newly allocated trace.
* However, if TraceStart initialized segments to black when it
@ -1588,18 +1589,6 @@ static Res rootGrey(Root root, void *p)
}
static void TraceStartPoolGen(Chain chain, GenDesc desc, Bool top, Index i)
{
Ring n, nn;
RING_FOR(n, &desc->locusRing, nn) {
PoolGen gen = RING_ELT(PoolGen, genRing, n);
EVENT10(TraceStartPoolGen, chain, BOOLOF(top), i, desc,
desc->capacity, desc->mortality, desc->zones,
gen->pool, gen->totalSize, gen->newSizeAtCreate);
}
}
/* TraceStart -- start a trace whose white set has been established
*
* The main job of TraceStart is to set up the grey list for a trace. The
@ -1664,26 +1653,6 @@ Res TraceStart(Trace trace, double mortality, double finishingTime)
} while (SegNext(&seg, arena, seg));
}
STATISTIC_STAT ({
/* @@ */
/* Iterate over all chains, all GenDescs within a chain, */
/* (and all PoolGens within a GenDesc). */
Ring node;
Ring nextNode;
Index i;
RING_FOR(node, &arena->chainRing, nextNode) {
Chain chain = RING_ELT(Chain, chainRing, node);
for(i = 0; i < chain->genCount; ++i) {
GenDesc desc = &chain->gens[i];
TraceStartPoolGen(chain, desc, FALSE, i);
}
}
/* Now do topgen GenDesc (and all PoolGens within it). */
TraceStartPoolGen(NULL, &arena->topGen, TRUE, 0);
});
res = RootsIterate(ArenaGlobals(arena), rootGrey, (void *)trace);
AVER(res == ResOK);

View file

@ -37,9 +37,6 @@ typedef union PagePoolUnion {
*
* .tract: Tracts represent the grains of memory allocation from
* the arena. See <design/arena/>.
*
* .bool: The hasSeg field is a boolean, but can't be represented
* as type Bool. See <design/arena/#tract.field.hasSeg>.
*/
typedef struct TractStruct { /* Tract structure */
@ -47,7 +44,7 @@ typedef struct TractStruct { /* Tract structure */
void *p; /* pointer for use of owning pool */
Addr base; /* Base address of the tract */
TraceSet white : TraceLIMIT; /* traces for which tract is white */
unsigned hasSeg : 1; /* does tract have a seg in p? See .bool */
BOOLFIELD(hasSeg); /* does tract have a seg in p? */
} TractStruct;

View file

@ -237,7 +237,7 @@ _`.tract.structure`: The tract structure definition looks like this::
void *p; /* pointer for use of owning pool */
Addr base; /* Base address of the tract */
TraceSet white : TRACE_MAX; /* traces for which tract is white */
unsigned int hasSeg : 1; /* does tract have a seg in p? */
BOOLFIELD(hasSeg); /* does tract have a seg in p? */
} TractStruct;
_`.tract.field.pool`: The pool.pool field indicates to which pool the tract
@ -262,10 +262,9 @@ use it for any purpose.
_`.tract.field.hasSeg`: The ``hasSeg`` bit-field is a Boolean which
indicates whether the ``p`` field is being used by the segment module.
If this field is ``TRUE``, then the value of ``p`` is a ``Seg``.
``hasSeg`` is typed as an ``unsigned int``, rather than a ``Bool``.
This ensures that there won't be sign conversion problems when
converting the bit-field value.
If this field is ``TRUE``, then the value of ``p`` is a ``Seg``. See
design.mps.type.bool.bitfield for why this is declared using the
``BOOLFIELD`` macro.
_`.tract.field.base`: The base field contains the base address of the
memory represented by the tract.
@ -273,7 +272,7 @@ memory represented by the tract.
_`.tract.field.white`: The white bit-field indicates for which traces
the tract is white (`.req.fun.trans.white`_). This information is also
stored in the segment, but is duplicated here for efficiency during a
call to ``TraceFix`` (see design.mps.trace.fix).
call to ``TraceFix()`` (see design.mps.trace.fix).
_`.tract.limit`: The limit of the tract's memory may be determined by
adding the arena alignment to the base address.

View file

@ -24,215 +24,196 @@ the MPM and the pool class implementations.
Pirinen, 1999-07-20.
Fields
------
_`.field`: These fields are provided by pool classes as part of the
``PoolClass`` object (see impl.h.mpmst.class). They form part of the
interface which allows the MPM to treat pools in a uniform manner.
_`.field.name`: The ``name`` field should be a short, pithy, cryptic
name for the pool class. It should typically start with ``"A"`` if
memory is managed by the garbage collector, and ``"M"`` if memory is
managed by alloc/free. Examples are "AMC", "MV".
_`.field.attr`: The ``attr`` field must be a bitset of pool class
attributes. See `design.mps.type.attr`_.
.. _design.mps.type.attr: type
_`.field.size`: The ``size`` field is the size of the pool instance
structure. For the ``PoolFoo`` class this can reasonably be expected
to be ``sizeof(PoolFooStruct)``.
_`.field.offset`: The ``offset`` field is the offset into the pool
instance structure of the generic ``PoolStruct``. Typically this field
is called ``poolStruct``, so something like ``offsetof(PoolFooStruct,
poolStruct)`` is typical. If possible, arrange for this to be zero.
Methods
-------
_`.methods`: These methods are provided by pool classes as part of the
``PoolClass`` object (see impl.h.mpmst.class). They form the interface
which allows the MPM to treat pools in a uniform manner.
_`.method`: These methods are provided by pool classes as part of the
``PoolClass`` object (see impl.h.mpmst.class). They form part of the
interface which allows the MPM to treat pools in a uniform manner.
The following description is based on the definition of the
``PoolClassStruct`` (impl.h.mpmst.class).
_`.method.unused`: If a pool class is not required to provide a
certain method, the class should assign the appropriate ``PoolNo``
method for that method to ensure that erroneous calls are detected. It
is not acceptable to use ``NULL``.
If a class is not required to provide a certain method then it should
set the appropriate ``PoolNo*`` method for that method. It is not
acceptable to use ``NULL``.
_`.method.trivial`: If a pool class if required to provide a certain
method, but the class provides no special behaviour in this case, it
should assign the appropriate ``PoolTriv`` method.
.. note::
_`.method.init`: The ``init`` field is the pool class's init method.
This method is called via the generic function ``PoolInit()``, which
is in turn called by ``PoolCreate()``. The generic function allocates
the pool's structure (using the ``size`` and ``offset`` fields),
initializes the ``PoolStruct`` (generic part), then calls the ``init``
method to do any class-specific initialization. Typically this means
initializing the fields in the pool instance structure. If ``init``
returns a non-OK result code the instance structure will be
deallocated and the code returned to the caller of ``PoolInit()`` or
``PoolCreate()``. Note that the ``PoolStruct`` isn't made fully valid
until ``PoolInit()`` returns, so the ``init`` method must not call
``PoolCheck()``.
There are also some ``PoolTriv*`` methods. David Jones, 1997-08-19.
_`.method.finish`: The ``finish`` field is the pool class's finish
method. This method is called via the generic function
``PoolFinish()``, which is in turn called by ``PoolDestroy()``. It is
expected to finalise the pool instance structure, release any
resources allocated to the pool, and release the memory associated
with the pool instance structure. Note that the pool is valid when it
is passed to ``finish``. The ``PoolStruct`` (generic part) is finished
when the pool class's ``finish`` method returns.
_`.method.name`: The name field should be a short, pithy, cryptic name
for the pool class. Examples are "AMC", "MV".
_`.method.alloc`: The ``alloc`` field is the pool class's allocation
method. This method is called via the generic function
``PoolAlloc()``. It is expected to return a pointer to a fresh (that
is, not overlapping with any other live object) object of the required
size. Failure to allocate should be indicated by returning an
appropriate error code, and in such a case, ``*pReturn`` should not be
updated. Pool classes are not required to provide this method.
The ``size`` field is the size of the pool instance structure. For the
``Foo`` ``PoolClass`` this can reasonably be expected to be
``sizeof(FooStruct)``.
The ``offset`` field is the offset into the pool instance structure of
the generic ``PoolStruct``. Typically this field is called
``poolStruct``, so something like ``offsetof(FooStruct, poolStruct)``
is typical. If possible, arrange for this to be zero.
The ``init`` field is the class's init method. This method is called
via the generic function ``PoolInit()``, which is in turn called by
``PoolCreate()``. The generic function allocates the pool's structure
(using the size and offset information), initializes the
``PoolStruct`` (generic part) then calls the ``init`` method to do any
class-specific initialization. Typically this means initializing the
fields in the class instance structure. If ``init`` returns a non-OK
result code the instance structure will be deallocated and the code
returned to the caller of ``PoolInit()``` or ``PoolCreate()``. Note that
the ``PoolStruct`` isn't made fully valid until ``PoolInit()`` returns.
The ``finish`` field is the class's finish method. This method is
called via the generic function ``PoolFinish()``, which is in turn
called by ``PoolDestroy()``. It is expected to finalise the pool
instance structure and release any resources allocated to the pool, it
is expected to release the memory associated with the pool instance
structure. Note that the pool is valid when it is passed to
``finish``. The ``PoolStruct`` (generic part) is finished off when the
class's ``finish`` method returns.
The ``alloc`` field is the class's allocation method. This method is
called via the generic function ``PoolAlloc()``. It is expected to
return a pointer to a fresh (that is, not overlapping with any other
live object) object of the required size. Failure to allocate should
be indicated by returning an appropriate Error code, and in such a
case, ``*pReturn`` should not be updated. Classes are not required to
provide this method, but they should provide at least one of ``alloc``
and ``bufferCreate``.
.. note::
There is no ``bufferCreate``. Gareth Rees, 2013-04-14.
The ``free_`` field is the class's free method. This is intended
primarily for manual style pools. this method is called via the
generic function ``PoolFree()``. The parameters to this method are
_`.method.free`: The ``free`` method is the pool class's free method.
This is intended primarily for manual style pools. This method is
called via the generic function ``PoolFree()``. The parameters are
required to correspond to a previous allocation request (possibly via
a buffer). It is an assertion by the client that the indicated object
is no longer required and the resources associated with it can be
recycled. Pools are not required to provide this method.
recycled. Pool classes are not required to provide this method.
The ``bufferInit`` field is the class's buffer initialization method.
It is called by the generic function ``BufferCreate()``, which allocates
the buffer descriptor and initializes the generic fields. The pool may
optionally adjust these fields or fill in extra values when
``bufferInit`` is called, but often pools set ``bufferInit`` to
``PoolTrivBufferInit()`` because they don't need to do any. If
``bufferInit`` returns a result code other than ``ResOK``, the buffer
structure is deallocated and the code is returned to the called of
``BufferCreate()``. Note that the ``BufferStruct`` isn't fully valid
until ``BufferCreate()`` returns.
_`.method.bufferInit`: The ``bufferInit`` method is the pool class's
buffer initialization method. It is called by the generic function
``BufferCreate()``, which allocates the buffer descriptor and
initializes the generic fields. The pool may optionally adjust these
fields or fill in extra values. If ``bufferInit`` returns a result
code other than ``ResOK``, the buffer structure is deallocated and the
result code is returned to the caller of ``BufferCreate()``. Note that
the ``BufferStruct`` isn't fully valid until ``BufferCreate()``
returns. Pool classes are not required to provide this method.
The ``bufferFinish`` field is the class's buffer finishing method. It
is called by the the generic function ``BufferDestroy()``. The pool is
expected to detach the buffer from any memory and prepare the buffer
for destruction. The class is expected to release the resources
associated with the buffer structure, and any unreserved memory in the
buffer may be recycled. It is illegal for a buffer to be destroyed
when there are pending allocations on it (that is, an allocation has
been reserved, but not committed) and this is checked in the generic
function. This method should be provided if and only if
``bufferCreate`` is provided. [there is no ``bufferCreate`` -- drj
1997-08-19]
_`.method.bufferFinish`: The ``bufferFinish`` method is the pool
class's buffer finishing method. It is called by the the generic
function ``BufferDestroy()``. The pool is expected to detach the
buffer from any memory and prepare the buffer for destruction. The
pool is expected to release the resources associated with the buffer
structure, and any unreserved memory in the buffer may be recycled. It
is illegal for a buffer to be destroyed when there are pending
allocations on it (that is, an allocation has been reserved, but not
committed) and this is checked in the generic function. This method
must be provided if and only if ``bufferInit`` is provided.
The ``condemn`` field is used to condemn a pool. This method is called
via the generic function ``PoolCondemn()``. The class is expected to
condemn a subset (possible the whole set) of objects it manages and
participate in a global trace to determine liveness. The class should
register the refsig of the condemned set with the trace using
``TraceCondemn()``. The class should expect fix requests (via the fix
method below) during a global trace. Classes are not required to
provide this method, but it is expected that automatic style classes
will. This interface is expected to change in the future.
_`.method.access`: The ``access`` method is used to handle client
access. This method is called via the generic functions
``ArenaAccess()`` and ``PoolAccess()``. It indicates that the client
has attempted to access the specified region, but has been denied and
the request trapped due to a protection state. The pool should perform
any work necessary to remove the protection whilst still preserving
appropriate invariants (typically this will be scanning work). Pool
classes are not required to provide this method, and not doing so
indicates they never protect any memory managed by the pool.
.. note::
_`.method.whiten`: The ``whiten`` method is used to condemn a segment
belonging to a pool. This method is called via the generic function
``PoolWhiten()``. The pool is expected to condemn a subset (but
typically all) of the objects in the segment and prepare the segment
for participation in a global trace to determine liveness. The pool
should expect fix requests (via the ``fix`` method below) during a
global trace. Pool classes that automatically reclaim dead objects
must provide this method, and must additionally set the ``AttrGC``
attribute.
``condemn`` now takes an action and a segment and should condemn
the segment (turn it white) if it corresponds to the
interpretation of the action. David Jones, 1997-08-19.
_`.method.grey`: The ``grey`` method is used to greyen a segment
belonging to a pool. This method is called via the generic function
``PoolGrey()``. The pool should set all of the objects in the segment
(excepting any set that has been condemned in this trace) to be grey,
that is, ready for scanning. The pool should arrange that any
appropriate invariants are preserved, possibly by using the protection
interface (see `design.mps.prot`_). Pool classes are not required to
provide this method, and not doing so indicates that all instances of
this class will have no fixable or traceable references in them.
It is now called ``whiten``. David Jones, 1998-02-02.
.. _design.mps.prot: prot
The ``mark`` field is used to mark an entire pool. This method is
called via the generic function ``PoolMark()``. The class should
consider all of its objects, except any set that has been condemned in
this trace, to be marked, that is ready for scanning. The class should
arrange that any appropriate invariants are preserved possibly by the
Protection interface. Classes are not required to provide this method,
and not doing so indicates that all instances of this class will have
no fixable or traceable references in them.
_`.method.blacken`: The ``blacken`` method is used to blacken a
segment belonging to a pool. This method is called via the generic
function ``PoolBlacken()`` when it is known that the segment cannot
refer to the white set. The pool must blacken all grey objects in the
segment. Pool classes are not required to provide this method, and not
doing so indicates that all instances of this class will have no
fixable or traceable references in them.
.. note::
_`.method.scan`: The ``scan`` method is used to scan a segment. This
method is called via the generic function ``PoolScan()``. The pool
must scan all the known grey objects on the segment and it may also
accumulate a summary of *all* the objects on the segment. If it
succeeds in accumulating such a summary it must indicate that it has
done so by setting the ``totalReturn`` parameter to ``TRUE``. Pool
classes are not required to provide this method, and not doing so
indicates that all instances of this class will have no fixable or
traceable reference in them.
``mark`` is no longer present: ``grey`` turns an entire segment
grey. David Jones, 1997-08-19.
_`.method.fix`: The ``fix`` method is used to perform fixing. This
method is called via the generic function ``TraceFix()``. It indicates
that the specified reference has been found and the pool should
consider the object to be live. There is provision for adjusting the
value of the reference (to allow for classes that move objects). not
required to provide this method. Pool classes that automatically
reclaim dead objects must provide this method, and must additionally
set the ``AttrGC`` attribute. Pool classes that may move objects must
also set the ``AttrMOVINGGC`` attribute.
The ``scan`` field is used to perform scanning. This method is called
via the generic function ``PoolScan()``. The class should scan the
segment specified. It should scan all the known live (marked, that is,
those objects on which fix has been called) on the segment and
accumulate a summary of *all* the objects on the segment. This means
that mark and sweep pools may have to jump through hoops a little bit
(see design.mps.poolasm.summary for a pedagogical example). Classes
are not required to provide this method, and not doing so indicates
that all instances of this class will have no fixable or traceable
reference in them.
_`.method.fixEmergency`: The ``fixEmergency`` method is used to
perform fixing in "emergency" situations. It must complete its work
without allocating memory (perhaps by using some approximation, or by
running more slowly). Pool classes must provide this method if they
provide the ``fix`` method.
.. note::
_`.method.reclaim`: The ``reclaim`` method is used to reclaim memory
in a segment. This method is called via the generic function
``PoolReclaim()``. It indicates that any remaining white objects in
the segment have now been proved unreachable, hence are dead. The pool
should reclaim the resources associated with the dead objects. Pool
classes are not required to provide this method. If they do, they must
set the ``AttrGC`` attribute.
The ``scan`` method now takes an extra return parameter which
classes should use to indicate whether they scanned all objects in
segment or not. Classes should return summary only of object they
scanned. Caller of this method (``TraceScan()``) is responsible
for updating summaries correctly when not a total scan. Hence no
jumping through hoops required. David Jones, 1998-01-30.
_`.method.walk`: The ``walk`` method is used by the heap walker. The
``walk`` method should apply the visitor function (along with its
closure parameters and the object format) to all *black* objects in
the segment. Padding objects may or may not be included in the walk at
the classes discretion, in any case in will be the responsibility of
the client to do something sensible with padding objects. Forwarding
objects are never included in the walk. Pool classes need not provide
this method. If they do, they must set the ``AttrFMT`` attribute.
The ``fix`` field is used to perform fixing. This method is called via
the generic function ``TraceFix()``. It indicates that the specified
reference has been found and the class should consider the object
live. There is provision for adjusting the value of the reference (to
allow for classes that move objects). Classes are not required to
provide this method, and not doing so indicates that the class is not
automatic style (ie it does not use global tracing to determine
liveness).
The ``reclaim`` field is used to reclaim memory. This method is called
via the generic function ``PoolReclaim()``. It indicates that the trace
has fixed all references to reachable objects.
.. note::
Actually it indicates that any remaining white objects have now
been proved unreachable, hence are dead. David Jones, 1997-08-19.
The class should consider objects that have been condemned and not
fixed in this trace to be dead and may reclaim the resources
associated with them. Classes are not required to provide this method.
.. note::
``reclaim`` is now called on each segment. David Jones,
1997-08-19.
The ``access`` field is used to indicate client access. This method is
called via the generic functions ``SpaceAccess()`` and
``PoolAccess()``. It indicates that the client has attempted to access
the specified region, but has been denied and the request trapped due
to a protection state. The class should perform any work necessary to
remove the protection whilst still preserving appropriate invariants
(typically this will be scanning work). Classes are not required to
provide this method, and not doing so indicates they never protect any
memory managed by the pool.
.. note::
``access`` is no longer present. David Jones, 1997-08-19.
_`.method.act`: ``act`` is called when the MPM has decided to execute
an action that the class declared. The Class should arrange execution
of the associated work (usually by beginning an incremental trace).
_`.method.walk`: ``walk`` is used by the heap walker. ``walk`` is only
required to be implemented by classes which specify the AttrFMT
attribute (formatted pools). The ``walk`` method should apply the
passed in function (along with its closure variables (which are also
passed in) and the object format) to all *black* objects in the
segment. Padding objects may or may not be included in the walk at the
classes discretion, in any case in will be the responsibility of the
client to do something sensible with padding objects.
.. note::
What about broken hearts? David Jones, 1998-01-30.
The ``describe`` field is used to print out a description of a pool.
This method is called via the generic function ``PoolDescribe()``. The
class should emit an textual description of the pool's contents onto
the specified stream. Each line should begin with two spaces. Classes
are not required to provide this method.
_`.method.describe`: The ``describe`` field is used to print out a
description of a pool. This method is called via the generic function
``PoolDescribe()``. The class should emit an textual description of
the pool's contents onto the specified stream. Each line should begin
with two spaces. Classes are not required to provide this method.
Events
@ -270,6 +251,8 @@ Document history
- 2013-03-12 GDR_ Converted to reStructuredText.
- 2014-06-08 GDR_ Bring method descriptions up to date.
.. _RB: http://www.ravenbrook.com/consultants/rb/
.. _GDR: http://www.ravenbrook.com/consultants/gdr/

View file

@ -70,6 +70,11 @@ an ``AVER()`` has fired. Naturally, if the information required for
the dump has been corrupted, it will fail, as softly as possible
(source @@@@).
_`.req.portable`: Client code that uses these features must be easily
portable to all the supported platforms. (Source: job003749_.)
.. _job003749: http://www.ravenbrook.com/project/mps/issue/job003749/
.. note::
There are more requirements, especially about memory dumps and
@ -90,6 +95,11 @@ specified as a byte/word which used repeatedly to fill the fencepost.
_`.fence.content.template`: The content could be given as a template
which is of the right size and is simply copied onto the fencepost.
_`.fence.content.template.repeat`: The content could be given as a
template which is copied repeatedly until the fencepost is full. (This
would avoid the need to specify different templates on different
architectures, and so help meet `.req.portable`_.)
_`.fence.walk`: `.req.fencepost.check`_ requires the ability to find
all the allocated objects. In formatted pools, this is not a problem.
In unformatted pools, we could use the walker. It's a feasible
@ -233,14 +243,14 @@ to pools. In particular, clients will be able to use tagging and
fenceposting separately on each pool.
_`.fence.size`: Having fenceposts of adjustable size and pattern is
quite useful. We feel that restricting the size to an integral
multiple of the [pool or format?] alignment is harmless and simplifies
the implementation enormously.
useful. Restricting the size to an integral multiple of the [pool or
format?] alignment would simplify the implementation but breaks
`.req.portable`_.
_`.fence.template`: We use templates (`.fence.content.template`_) to
fill in the fenceposts, but we do not give any guarantees about the
location of the fenceposts, only that they're properly aligned. This
leaves us the opportunity to do tail-only fenceposting, if we choose.
location of the fenceposts. This leaves us the opportunity to do
tail-only fenceposting, if we choose.
_`.fence.slop`: [see impl.c.dbgpool.FenceAlloc @@@@]
@ -416,6 +426,8 @@ Document History
- 2013-04-14 GDR_ Converted to reStructuredText.
- 2014-04-09 GDR_ Added newly discovered requirement `.req.portable`_.
.. _RB: http://www.ravenbrook.com/consultants/rb/
.. _GDR: http://www.ravenbrook.com/consultants/gdr/

View file

@ -182,46 +182,117 @@ occupies.
Note that this zoneset can never shrink.
Parameters
..........
_`.param.intro`: A generation has two parameters, *capacity* and
*mortality*, specified by the client program.
_`.param.capacity`: The *capacity* of a generation is the amount of
*new* allocation in that generation (that is, allocation since the
last time the generation was condemned) that will cause the generation
to be collected by ``TracePoll()``.
_`.param.capacity.misnamed`: The name *capacity* is unfortunate since
it suggests that the total amount of memory in the generation will not
exceed this value. But that will only be the case for pool classes
that always promote survivors to another generation. When there is
*old* allocation in the generation (that is, prior to the last time
the generation was condemned), as there is in the case of non-moving
pool classes, the size of a generation is unrelated to its capacity.
_`.param.mortality`: The *mortality* of a generation is the proportion
(between 0 and 1) of memory in the generation that is expected to be
dead when the generation is collected. It is used in ``TraceStart()``
to estimate the amount of data that will have to be scanned in order
to complete the trace.
Accounting
..........
- ``gen[N].mortality``
_`.accounting.intro`: Pool generations maintain the sizes of various
categories of data allocated in that generation for that pool. This
accounting information is reported via the event system, but also used
in two places:
- Specified by the client.
- TODO: fill in how this is used.
_`.accounting.poll`: ``ChainDeferral()`` uses the *new size* of each
generation to determine which generations in the chain are over
capacity and so might need to be collected via ``TracePoll()``.
- ``gen[N].capacity``
_`.accounting.condemn`: ``ChainCondemnAuto()`` uses the *new size* of
each generation to determine which generations in the chain will be
collected; it also uses the *total size* of the generation to compute
the mortality.
- Specified by the client.
- TODO: fill in how this is used.
_`.accounting.check`: Computing the new size for a pool generation is
far from straightforward: see job003772_ for some (former) errors in
this code. In order to assist with checking that this has been
computed correctly, the locus module uses a double-entry book-keeping
system to account for every byte in each pool generation. This uses
six accounts:
- ``amcSeg->new``
.. _job003772: http://www.ravenbrook.com/project/mps/issue/job003772/
- TODO: fill this in
_`.account.total`: Memory acquired from the arena.
- ``pgen->totalSize``:
_`.account.total.negated`: From the point of view of the double-entry
system, the *total* should be negative as it is owing to the arena,
but it is inconvenient to represent negative sizes, and so the
positive value is stored instead.
- incremented by ``AMCBufferFill()``;
- decremented by ``amcReclaimNailed()`` and ``AMCReclaim()``;
- added up by ``GenDescTotalSize(gen)``.
_`.account.total.negated.justification`: We don't have a type for
signed sizes; but if we represented it in two's complement using the
unsigned ``Size`` type then Clang's unsigned integer overflow detector
would complain.
- ``pgen->newSize``:
_`.account.free`: Memory that is not in use (free or lost to
fragmentation).
- incremented by ``AMCBufferFill()`` (*when not ramping*) and ``AMCRampEnd()``;
- decremented by ``AMCWhiten()``,
- added up by ``GenDescNewSize(gen)``.
_`.account.new`: Memory in use by the client program, allocated
since the last time the generation was condemned.
- ``gen[N].proflow``:
_`.account.old`: Memory in use by the client program, allocated
prior to the last time the generation was condemned.
- set to 1.0 by ``ChainCreate()``;
- ``arena->topGen.proflow`` set to 0.0 by ``LocusInit(arena)``;
- *The value of this field is never used*.
_`.account.newDeferred`: Memory in use by the client program,
allocated since the last time the generation was condemned, but which
should not cause collections via ``TracePoll()``. (Due to ramping; see
below.)
_`.account.oldDeferred`: Memory in use by the client program,
allocated prior to the last time the generation was condemned, but
which should not cause collections via ``TracePoll()``. (Due to
ramping; see below.)
- ``pgen->newSizeAtCreate``:
_`.accounting.op`: The following operations are provided:
_`.accounting.op.alloc`: Allocate a segment in a pool generation.
Debit *total*, credit *free*. (But see `.account.total.negated`_.)
_`.accounting.op.free`: Free a segment. First, ensure that the
contents of the segment are accounted as free, by artificially ageing
any memory accounted as *new* or *newDeferred* (see
`.accounting.op.age`_) and then artifically reclaiming any memory
accounted as *old* or *oldDeferred* (see `.accounting.op.reclaim`_).
Finally, debit *free*, credit *total*. (But see
`.account.total.negated`_.)
_`.accounting.op.fill`: Allocate memory, for example by filling a
buffer. Debit *free*, credit *new* or *newDeferred*.
_`.accounting.op.empty`: Deallocate memory, for example by emptying
the unused portion of a buffer. Debit *new* or *newDeferred*, credit
*free*.
_`.accounting.op.age`: Condemn memory. Debit *new* or *newDeferred*,
credit *old* or *oldDeferred*.
_`.accounting.op.reclaim`: Reclaim dead memory. Debit *old* or
*oldDeferred*, credit *free*.
_`.accounting.op.undefer`: Stop deferring the accounting of memory. Debit *oldDeferred*, credit *old*. Debit *newDeferred*, credit *new*.
- set by ``traceCopySizes()`` (that is its purpose);
- output in the ``TraceStartPoolGen`` telemetry event.
Ramps
.....
@ -296,29 +367,31 @@ Reclaiming any AMC segment:
Now, some deductions:
#. When OUTSIDE, the count is always zero, because (a) it starts that
way, and the only ways to go OUTSIDE are (b) by leaving an outermost
ramp (count goes to zero) or (c) by reclaiming when the count is zero.
way, and the only ways to go OUTSIDE are (b) by leaving an
outermost ramp (count goes to zero) or (c) by reclaiming when the
count is zero.
#. When BEGIN, the count is never zero (consider the transitions to
BEGIN and the transition to zero).
BEGIN and the transition to zero).
#. When RAMPING, the count is never zero (again consider transitions to
RAMPING and the transition to zero).
#. When RAMPING, the count is never zero (again consider transitions
to RAMPING and the transition to zero).
#. When FINISH, the count can be anything (the transition to FINISH has
zero count, but the Enter transition when FINISH can change that and
then it can increment to any value).
#. When FINISH, the count can be anything (the transition to FINISH
has zero count, but the Enter transition when FINISH can change
that and then it can increment to any value).
#. When COLLECTING, the count can be anything (from the previous fact,
and the transition to COLLECTING).
and the transition to COLLECTING).
#. *This is a bug!!* The ramp generation is not always reset (to forward
to the after-ramp generation). If we get into FINISH and then see
another ramp before the next condemnation of the ramp generation, we
will Enter followed by Leave. The Enter will keep us in FINISH, and
the Leave will take us back to OUTSIDE, skipping the transition to the
COLLECTING state which is what resets the ramp generation forwarding
buffer. [TODO: check whether I made an issue and/or fixed it; NB 2013-06-04]
#. *This is a bug!!* The ramp generation is not always reset (to
forward to the after-ramp generation). If we get into FINISH and
then see another ramp before the next condemnation of the ramp
generation, we will Enter followed by Leave. The Enter will keep us
in FINISH, and the Leave will take us back to OUTSIDE, skipping the
transition to the COLLECTING state which is what resets the ramp
generation forwarding buffer. [TODO: check whether I made an issue
and/or fixed it; NB 2013-06-04]
The simplest change to fix this is to change the behaviour of the Leave
transition, which should only take us OUTSIDE if we are in BEGIN or

View file

@ -72,6 +72,8 @@ Interface. ``mps_addr_t`` is defined to be the same as ``void *``, so
using the MPS C Interface confines the memory manager to the same
address space as the client data.
_`.addr.readonly`: For read-only addresses, see `.readonlyaddr`_.
``typedef Word Align``
@ -89,28 +91,26 @@ C Interface.
``typedef unsigned Attr``
_`.attr`: Pool attributes. A bitset of pool or pool class
attributes, which are:
_`.attr`: Pool attributes. A bitset of pool class attributes, which
are:
=================== ===================================================
Attribute Description
=================== ===================================================
``AttrALLOC`` Supports the ``PoolAlloc`` interface.
``AttrBUF`` Supports the buffer interface.
``AttrFMT`` Contains formatted objects.
Used to decide which pools to walk.
``AttrFREE`` Supports the ``PoolFree`` interface.
``AttrGC`` Is garbage collecting, that is, parts may be
reclaimed. Used to decide which segments are
condemned.
``AttrMOVINGGC`` Is moving, that is, objects may move in memory.
Used to update the set of zones that might have
moved and so implement location dependency.
``AttrSCAN`` Contains references and must be scanned.
=================== ===================================================
There is an attribute field in the pool class (``PoolClassStruct``)
which declares the attributes of that class.
which declares the attributes of that class. See `design.mps.class-interface.field.attr`_.
.. _design.mps.class-interface.field.attr: class-interface
``typedef int Bool``
@ -155,9 +155,17 @@ _`.bool.bitfield`: When a Boolean needs to be stored in a bitfield,
the type of the bitfield must be ``unsigned:1``, not ``Bool:1``.
(That's because the two values of the type ``Bool:1`` are ``0`` and
``-1``, which means that assigning ``TRUE`` would require a sign
conversion.) To avoid warnings about loss of data from GCC with the
``-Wconversion`` option, ``misc.h`` provides the ``BOOLOF`` macro for
coercing a value to an unsigned single-bit field.
conversion.) To make it clear why this is done, ``misc.h`` provides
the ``BOOLFIELD`` macro.
_`.bool.bitfield.assign`: To avoid warnings about loss of data from
GCC with the ``-Wconversion`` option, ``misc.h`` provides the
``BOOLOF`` macro for coercing a value to an unsigned single-bit field.
_`.bool.bitfield.check`: A Boolean bitfield cannot have an incorrect
value, and if you call ``BoolCheck()`` on such a bitfield then GCC 4.2
issues the warning "comparison is always true due to limited range of
data type". When avoiding such a warning, reference this tag.
``typedef unsigned BufferMode``
@ -373,6 +381,13 @@ their integer values.
_`.rankset`: ``RankSet`` is a set of ranks, represented as a bitset.
``typedef const struct AddrStruct *ReadonlyAddr``
_`.readonlyaddr`: ``ReadonlyAddr`` is the type used for managed
addresses that an interface promises it will only read through, never
write. Otherwise it is identical to ``Addr``.
``typedef Addr Ref``
_`.ref`: ``Ref`` is a reference to a managed object (as opposed to any

View file

@ -22,11 +22,11 @@ TYPES = '''
Arena Attr Bool BootBlock BT Buffer BufferMode Byte Chain Chunk
Clock Compare Count Epoch FindDelete Format FrameState Fun GenDesc
Globals Index Land LD Lock Message MessageType MutatorFaultContext
Page Pointer Pool PoolGen PThreadext Range Rank RankSet Ref RefSet
Res Reservoir Ring Root RootMode RootVar ScanState Seg SegBuf
SegPref SegPrefKind Serial Shift Sig Size Space SplayNode
SplayTree StackContext Thread Trace TraceId TraceSet TraceStartWhy
TraceState ULongest VM Word ZoneSet
Page Pointer Pool PoolGen PThreadext Range Rank RankSet
ReadonlyAddr Ref RefSet Res Reservoir Ring Root RootMode RootVar
ScanState Seg SegBuf SegPref SegPrefKind Serial Shift Sig Size
Space SplayNode SplayTree StackContext Thread Trace TraceId
TraceSet TraceStartWhy TraceState ULongest VM Word ZoneSet
'''

View file

@ -24,8 +24,8 @@ except for blocks that are :term:`pinned <pinning>` by
It uses :term:`generational garbage collection`. That is, it exploits
assumptions about object lifetimes and inter-connection variously
referred to as "the generational hypothesis". In particular, the
following tendencies will be efficiently exploited by an AMC pool:
referred to as "the :term:`generational hypothesis`". In particular,
the following tendencies will be efficiently exploited by an AMC pool:
- most objects die young;
@ -72,8 +72,10 @@ AMC properties
* Blocks are :term:`scanned <scan>`.
* Blocks may only be referenced by :term:`base pointers` (unless they
have :term:`in-band headers`).
* Blocks may be referenced by :term:`interior pointers` (unless
:c:macro:`MPS_KEY_INTERIOR` is set to ``FALSE``, in which case only
:term:`base pointers`, or :term:`client pointers` if the blocks
have :term:`in-band headers`, are supported).
* Blocks may be protected by :term:`barriers (1)`.

View file

@ -180,9 +180,11 @@ AMS interface
class.
When creating a debugging AMS pool, :c:func:`mps_pool_create_k`
takes three keyword arguments: :c:macro:`MPS_KEY_FORMAT` and
:c:macro:`MPS_KEY_CHAIN` are as described above, and
:c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` specifies the debugging
accepts the following keyword arguments:
:c:macro:`MPS_KEY_FORMAT`, :c:macro:`MPS_KEY_CHAIN`,
:c:macro:`MPS_KEY_GEN`, and
:c:macro:`MPS_KEY_AMS_SUPPORT_AMBIGUOUS` are as described above,
and :c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` specifies the debugging
options. See :c:type:`mps_pool_debug_option_s`.
.. deprecated:: starting with version 1.112.

View file

@ -115,7 +115,7 @@ references (1)`.
May contain ambiguous references? [4]_, no, ---, no, no, ---, ---, ---, ---, ---, no
May contain weak references? [4]_, no, ---, no, yes, ---, ---, ---, ---, ---, no
Allocations fixed or variable in size?, var, var, var, var, var, fixed, var, var, var, var
Alignment? [5]_, conf, conf, conf, conf, conf, [6]_, [6]_, [7]_, [7]_, conf
Alignment? [5]_, conf, conf, conf, conf, conf, [6]_, conf, [7]_, [7]_, conf
Dependent objects? [8]_, no, ---, no, yes, ---, ---, ---, ---, ---, no
May use remote references? [9]_, no, ---, no, no, ---, ---, ---, ---, ---, no
Blocks are automatically managed? [10]_, yes, yes, yes, yes, yes, no, no, no, no, no
@ -151,13 +151,13 @@ references (1)`.
.. [5] "Alignment" is "conf" if the client program may specify
:term:`alignment` for each pool.
.. [6] The alignment of blocks allocated from :ref:`pool-mv` pools
is platform-dependent.
.. [6] The alignment of blocks allocated from :ref:`pool-mfs`
pools is the platform's :term:`natural alignment`,
:c:macro:`MPS_PF_ALIGN`.
.. [7] :ref:`pool-mvt` and :ref:`pool-mvff` pools have
configurable alignment, but it may not be smaller than the
:term:`natural alignment` for the :term:`platform` (see
:c:macro:`MPS_PF_ALIGN`).
configurable alignment, but it may not be smaller than
``sizeof(void *)``.
.. [8] In pools with this property, each object may specify an
:term:`dependent object` which the client program

View file

@ -38,9 +38,7 @@ MV properties
* Allocations may be variable in size.
* The :term:`alignment` of blocks is not configurable: it is the
:term:`natural alignment` of the platform (see
:c:macro:`MPS_PF_ALIGN`).
* The :term:`alignment` of blocks is configurable.
* Blocks do not have :term:`dependent objects`.
@ -73,7 +71,13 @@ MV interface
:term:`pool`.
When creating an MV pool, :c:func:`mps_pool_create_k` may take
three :term:`keyword arguments`:
the following :term:`keyword arguments`:
* :c:macro:`MPS_KEY_ALIGN` (type :c:type:`mps_align_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the
:term:`alignment` of addresses for allocation (and freeing) in
the pool. If an unaligned size is passed to :c:func:`mps_alloc` or
:c:func:`mps_free`, it will be rounded up to the pool's alignment.
* :c:macro:`MPS_KEY_EXTEND_BY` (type :c:type:`size_t`,
default 65536) is the :term:`size` of segment that the pool will
@ -119,11 +123,11 @@ MV interface
class.
When creating a debugging MV pool, :c:func:`mps_pool_create_k`
takes four keyword arguments: :c:macro:`MPS_KEY_EXTEND_SIZE`,
:c:macro:`MPS_KEY_MEAN_SIZE`, :c:macro:`MPS_KEY_MAX_SIZE` are as
described above, and :c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS`
specifies the debugging options. See
:c:type:`mps_pool_debug_option_s`.
takes the following keyword arguments: :c:macro:`MPS_KEY_ALIGN`,
:c:macro:`MPS_KEY_EXTEND_SIZE`, :c:macro:`MPS_KEY_MEAN_SIZE`,
:c:macro:`MPS_KEY_MAX_SIZE` are as described above, and
:c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` specifies the debugging
options. See :c:type:`mps_debug_option_s`.
.. deprecated:: starting with version 1.112.

View file

@ -80,7 +80,7 @@ MVFF properties
* Allocations may be variable in size.
* The :term:`alignment` of blocks is configurable, but may not be
smaller than the :term:`natural alignment` of the platform.
smaller than ``sizeof(void *)``.
* Blocks do not have :term:`dependent objects`.
@ -127,10 +127,10 @@ MVFF interface
* :c:macro:`MPS_KEY_ALIGN` (type :c:type:`mps_align_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the
:term:`alignment` of addresses for allocation (and freeing) in
the pool. If an unaligned size is passed to :c:func:`mps_alloc` or
:c:func:`mps_free`, it will be rounded up to the pool's alignment.
The minimum alignment supported by pools of this class is
``sizeof(void *)``.
the pool. If an unaligned size is passed to :c:func:`mps_alloc`
or :c:func:`mps_free`, it will be rounded up to the pool's
alignment. The minimum alignment supported by pools of this
class is ``sizeof(void *)``.
* :c:macro:`MPS_KEY_SPARE` (type :c:type:`double`, default 0.75)
is the maximum proportion of memory that the pool will keep

View file

@ -78,6 +78,9 @@ MVT properties
* Allocations may be variable in size.
* The :term:`alignment` of blocks is configurable, but may not be
smaller than ``sizeof(void *)``.
* Blocks do not have :term:`dependent objects`.
* Blocks are not automatically :term:`reclaimed`.
@ -117,7 +120,7 @@ MVT interface
the pool. If an unaligned size is passed to :c:func:`mps_alloc` or
:c:func:`mps_free`, it will be rounded up to the pool's alignment.
The minimum alignment supported by pools of this class is
``sizeof(void *)``.
``sizeof(void *)``.
* :c:macro:`MPS_KEY_MIN_SIZE` (type :c:type:`size_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the

View file

@ -40,7 +40,6 @@ New features
the lowest generation whose new size was within its capacity.)
Interface changes
.................
@ -54,10 +53,24 @@ Interface changes
the value ``FALSE`` is appropriate only when you know that all
references are exact. See :ref:`pool-ams`.
#. It is now possible to configure the alignment of objects allocated
in a :ref:`pool-mv` pool, by passing the :c:macro:`MPS_KEY_ALIGN`
keyword argument to :c:func:`mps_pool_create_k`.
#. The :ref:`pool-mvff` pool class takes a new keyword argument
:c:macro:`MPS_KEY_SPARE`. This specifies the maximum proportion of
memory that the pool will keep spare for future allocations.
#. The alignment requirements for :ref:`pool-mvff` and :ref:`pool-mvt`
pools have been relaxed on the platforms ``w3i3mv`` and ``w3i6mv``.
On all platforms it is now possible to specify alignments down to
``sizeof(void *)`` as the alignment for pools of these classes.
#. The sizes of the templates in a :c:type:`mps_pool_debug_option_s`
structure no longer have to be related to the alignment of the
pools that they are used with. This makes it easier to reuse these
structures.
Other changes
.............
@ -81,18 +94,39 @@ Other changes
.. _job003745: https://www.ravenbrook.com/project/mps/issue/job003745/
#. The debugging version of the :ref:`pool-mvff` pool class,
:c:func:`mps_class_mvff_debug`, no longer triggers an assertion
failure if you allocate a large object. See job003751_.
.. _job003751: https://www.ravenbrook.com/project/mps/issue/job003751/
#. :program:`mpseventtxt` now successfully processes a telemetry log
containing multiple labels associated with the same address. See
job003756_.
.. _job003756: https://www.ravenbrook.com/project/mps/issue/job003756/
#. An :ref:`pool-ams` pool gets collected even if it is the only pool
on its generation chain and is allocating into a generation other
than the nursery. See job003771_.
#. :ref:`pool-ams`, :ref:`pool-awl` and :ref:`pool-lo` pools get
reliably collected, even in the case where the pool is the only
pool on its generation chain and is allocating into some generation
other than the nursery. See job003771_.
.. _job003771: https://www.ravenbrook.com/project/mps/issue/job003771/
#. Allocation into :ref:`pool-awl` pools again reliably provokes
garbage collections of the generation that the pool belongs to. (In
release 1.113.0, the generation would only be collected if a pool
of some other class allocated into it.) See job003772_.
.. _job003772: https://www.ravenbrook.com/project/mps/issue/job003772/
#. All unreachable objects in :ref:`pool-lo` pools are finalized.
(Previously, objects on a segment attached to an allocation point
were not finalized until the allocation point was full.) See
job003773_.
.. _job003773: https://www.ravenbrook.com/project/mps/issue/job003773/
#. The :ref:`pool-mvt` and :ref:`pool-mvff` pool classes are now
around 25% faster (in our benchmarks) than they were in release
1.113.0.

View file

@ -170,9 +170,9 @@ Cache interface
The size classes are described by an array of element type
:c:type:`mps_sac_class_s`. This array is used to initialize the
segregated allocation cache, and is not needed
after:c:func:`mps_sac_create` returns. The following constraints
apply to the array:
segregated allocation cache, and is not needed after
:c:func:`mps_sac_create` returns. The following constraints apply
to the array:
* You must specify at least one size class.

View file

@ -50,9 +50,9 @@ debugging:
for the pattern at any time by calling
:c:func:`mps_pool_check_free_space`.
The :term:`client program` specifies templates for both of these
features via the :c:type:`mps_pool_debug_option_s` structure. This
allows it to specify patterns:
The :term:`client program` may optionally specify templates for both
of these features via the :c:type:`mps_pool_debug_option_s` structure.
This allows it to specify patterns:
* that mimic illegal data values;
@ -66,8 +66,8 @@ allows it to specify patterns:
For example::
mps_pool_debug_option_s debug_options = {
(const void *)"postpost", 8,
(const void *)"freefree", 8,
"fencepost", 9,
"free", 4,
};
mps_pool_t pool;
mps_res_t res;
@ -81,7 +81,7 @@ For example::
.. c:type:: mps_pool_debug_option_s
The type of the structure passed as the
The type of the structure passed as the value for the optional
:c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` keyword argument to
:c:func:`mps_pool_create_k` when creating a debugging :term:`pool
class`. ::
@ -104,10 +104,6 @@ For example::
``free_size`` is the :term:`size` of ``free_template`` in bytes, or
zero if the debugging pool should not splat free space.
Both ``fence_size`` and ``free_size`` must be a multiple of the
:term:`alignment` of the :term:`pool`, and also a multiple of the
alignment of the pool's :term:`object format` if it has one.
The debugging pool will copy the ``fence_size`` bytes pointed to by
``fence_template`` in a repeating pattern onto each fencepost during
allocation, and it will copy the bytes pointed to by
@ -118,6 +114,13 @@ For example::
pieces smaller than the given size, for example to pad out part of
a block that was left unused because of alignment requirements.
If the client omits to pass the
:c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` keyword argument to
:c:func:`mps_pool_create_k`, then the fencepost template consists
of the four bytes ``50 4F 53 54`` (``POST`` in ASCII), and the
free space template consists of the four bytes ``46 52 45 45``
(``FREE`` in ASCII).
.. c:function:: void mps_pool_check_fenceposts(mps_pool_t pool)

View file

@ -86,7 +86,7 @@ now :c:macro:`MPS_KEY_ARGS_END`.
Keyword Type & field in ``arg.val`` See
======================================== ====================================================== ==========================================================
:c:macro:`MPS_KEY_ARGS_END` *none* *see above*
:c:macro:`MPS_KEY_ALIGN` :c:type:`mps_align_t` ``align`` :c:func:`mps_class_mvff`, :c:func:`mps_class_mvt`
:c:macro:`MPS_KEY_ALIGN` :c:type:`mps_align_t` ``align`` :c:func:`mps_class_mv`, :c:func:`mps_class_mvff`, :c:func:`mps_class_mvt`
:c:macro:`MPS_KEY_AMS_SUPPORT_AMBIGUOUS` :c:type:`mps_bool_t` ``b`` :c:func:`mps_class_ams`
:c:macro:`MPS_KEY_ARENA_CL_BASE` :c:type:`mps_addr_t` ``addr`` :c:func:`mps_arena_class_cl`
:c:macro:`MPS_KEY_ARENA_SIZE` :c:type:`size_t` ``size`` :c:func:`mps_arena_class_vm`, :c:func:`mps_arena_class_cl`