diff --git a/mps/Makefile.in b/mps/Makefile.in
index 1a7ff142811..71673c67db3 100644
--- a/mps/Makefile.in
+++ b/mps/Makefile.in
@@ -71,7 +71,7 @@ make-install-dirs:
install: @INSTALL_TARGET@
test-make-build:
- $(MAKE) $(TARGET_OPTS) testci
+ $(MAKE) $(TARGET_OPTS) testci testratio
$(MAKE) -C code -f anan$(MPS_BUILD_NAME).gmk VARIETY=cool clean testansi
$(MAKE) -C code -f anan$(MPS_BUILD_NAME).gmk VARIETY=cool CFLAGS="-DCONFIG_POLL_NONE" clean testpollnone
diff --git a/mps/code/abq.h b/mps/code/abq.h
index e781d00e156..90591de9a5f 100644
--- a/mps/code/abq.h
+++ b/mps/code/abq.h
@@ -50,10 +50,10 @@ typedef struct ABQStruct
void *queue;
/* Meter queue depth at each operation */
- METER_DECL(push);
- METER_DECL(pop);
- METER_DECL(peek);
- METER_DECL(delete);
+ METER_DECL(push)
+ METER_DECL(pop)
+ METER_DECL(peek)
+ METER_DECL(delete)
Sig sig;
} ABQStruct;
diff --git a/mps/code/arena.c b/mps/code/arena.c
index 9597482a6eb..a2c7a731c73 100644
--- a/mps/code/arena.c
+++ b/mps/code/arena.c
@@ -1,7 +1,7 @@
/* arena.c: ARENA ALLOCATION FEATURES
*
* $Id$
- * Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
+ * Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
*
* .sources: is the main design document. */
@@ -198,11 +198,13 @@ Bool ArenaCheck(Arena arena)
CHECKL(arena->spareCommitted <= arena->committed);
CHECKL(0.0 <= arena->pauseTime);
- CHECKL(ShiftCheck(arena->zoneShift));
+ CHECKL(arena->zoneShift == ZoneShiftUNSET
+ || ShiftCheck(arena->zoneShift));
CHECKL(ArenaGrainSizeCheck(arena->grainSize));
/* Stripes can't be smaller than grains. */
- CHECKL(((Size)1 << arena->zoneShift) >= arena->grainSize);
+ CHECKL(arena->zoneShift == ZoneShiftUNSET
+ || ((Size)1 << arena->zoneShift) >= arena->grainSize);
if (arena->lastTract == NULL) {
CHECKL(arena->lastTractBase == (Addr)0);
@@ -264,8 +266,8 @@ static Res ArenaAbsInit(Arena arena, Size grainSize, ArgList args)
arena->spareCommitLimit = spareCommitLimit;
arena->pauseTime = pauseTime;
arena->grainSize = grainSize;
- /* zoneShift is usually overridden by init */
- arena->zoneShift = ARENA_ZONESHIFT;
+ /* zoneShift must be overridden by arena class init */
+ arena->zoneShift = ZoneShiftUNSET;
arena->poolReady = FALSE; /* */
arena->lastTract = NULL;
arena->lastTractBase = NULL;
@@ -389,6 +391,9 @@ Res ArenaCreate(Arena *arenaReturn, ArenaClass klass, ArgList args)
if (res != ResOK)
goto failInit;
+ /* Zone shift must have been set up by klass->create() */
+ AVER(ShiftCheck(arena->zoneShift));
+
/* TODO: Consider how each of the stages below could be incorporated
into arena initialization, rather than tacked on here. */
@@ -998,6 +1003,7 @@ Res ArenaFreeLandInsert(Arena arena, Addr base, Addr limit)
Res res;
AVERT(Arena, arena);
+ AVER(base < limit);
RangeInit(&range, base, limit);
res = arenaFreeLandInsertExtend(&oldRange, arena, &range);
@@ -1438,7 +1444,7 @@ Res ArenaAddrObject(Addr *pReturn, Arena arena, Addr addr)
/* C. COPYRIGHT AND LICENSE
*
- * Copyright (C) 2001-2014 Ravenbrook Limited .
+ * Copyright (C) 2001-2016 Ravenbrook Limited .
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*
diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c
index 77f8237bb9a..edd52d2d004 100644
--- a/mps/code/arenavm.c
+++ b/mps/code/arenavm.c
@@ -295,8 +295,7 @@ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size)
if (res != ResOK)
goto failBootInit;
- /* Allocate and map the descriptor. */
- /* See .@@@@ */
+ /* .overhead.chunk-struct: Allocate and map the chunk structure. */
res = BootAlloc(&p, boot, sizeof(VMChunkStruct), MPS_PF_ALIGN);
if (res != ResOK)
goto failChunkAlloc;
@@ -348,11 +347,13 @@ static Res VMChunkInit(Chunk chunk, BootBlock boot)
vmChunk = Chunk2VMChunk(chunk);
AVERT(BootBlock, boot);
+ /* .overhead.sa-mapped: Chunk overhead for sparse array 'mapped' table. */
res = BootAlloc(&p, boot, BTSize(chunk->pages), MPS_PF_ALIGN);
if (res != ResOK)
goto failSaMapped;
saMapped = p;
+ /* .overhead.sa-pages: Chunk overhead for sparse array 'pages' table. */
res = BootAlloc(&p, boot, BTSize(chunk->pageTablePages), MPS_PF_ALIGN);
if (res != ResOK)
goto failSaPages;
@@ -360,8 +361,8 @@ static Res VMChunkInit(Chunk chunk, BootBlock boot)
overheadLimit = AddrAdd(chunk->base, (Size)BootAllocated(boot));
- /* Put the page table as late as possible, as in VM systems we don't want */
- /* to map it. */
+ /* .overhead.page-table: Put the page table as late as possible, as
+ * in VM systems we don't want to map it. */
res = BootAlloc(&p, boot, chunk->pageTablePages << chunk->pageShift, chunk->pageSize);
if (res != ResOK)
goto failAllocPageTable;
@@ -478,6 +479,64 @@ static void vmArenaTrivContracted(Arena arena, Addr base, Size size)
}
+/* vmArenaChunkSize -- compute chunk size
+ *
+ * Compute the size of the smallest chunk that has size bytes of usable
+ * address space (that is, after all overheads are accounted for).
+ *
+ * If successful, update *chunkSizeReturn with the computed chunk size
+ * and return ResOK. If size is too large for a chunk, leave
+ * *chunkSizeReturn unchanged and return ResRESOURCE.
+ */
+static Res vmArenaChunkSize(Size *chunkSizeReturn, VMArena vmArena, Size size)
+{
+ Size grainSize; /* Arena grain size. */
+ Shift grainShift; /* The corresponding Shift. */
+ Count pages; /* Number of usable pages in chunk. */
+ Size pageTableSize; /* Size of the page table. */
+ Count pageTablePages; /* Number of pages in the page table. */
+ Size chunkSize; /* Size of the chunk. */
+ Size overhead; /* Total overheads for the chunk. */
+
+ AVER(chunkSizeReturn != NULL);
+ AVERT(VMArena, vmArena);
+ AVER(size > 0);
+
+ grainSize = ArenaGrainSize(MustBeA(AbstractArena, vmArena));
+ grainShift = SizeLog2(grainSize);
+
+ overhead = 0;
+ do {
+ chunkSize = size + overhead;
+
+ /* See .overhead.chunk-struct. */
+ overhead = SizeAlignUp(sizeof(VMChunkStruct), MPS_PF_ALIGN);
+
+ /* See , */
+ pages = chunkSize >> grainShift;
+ overhead += SizeAlignUp(BTSize(pages), MPS_PF_ALIGN);
+
+ /* See .overhead.sa-mapped. */
+ overhead += SizeAlignUp(BTSize(pages), MPS_PF_ALIGN);
+
+ /* See .overhead.sa-pages. */
+ pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), grainSize);
+ pageTablePages = pageTableSize >> grainShift;
+ overhead += SizeAlignUp(BTSize(pageTablePages), MPS_PF_ALIGN);
+
+ /* See .overhead.page-table. */
+ overhead = SizeAlignUp(overhead, grainSize);
+ overhead += SizeAlignUp(pageTableSize, grainSize);
+
+ if (SizeMAX - overhead < size)
+ return ResRESOURCE;
+ } while (chunkSize < size + overhead);
+
+ *chunkSizeReturn = chunkSize;
+ return ResOK;
+}
+
+
/* VMArenaCreate -- create and initialize the VM arena
*
* .arena.init: Once the arena has been allocated, we call ArenaInit
@@ -578,6 +637,19 @@ static Res VMArenaCreate(Arena *arenaReturn, ArgList args)
if (res != ResOK)
goto failChunkCreate;
+#if defined(AVER_AND_CHECK_ALL)
+ /* Check that the computation of the chunk size in vmArenaChunkSize
+ * was correct, now that we have the actual chunk for comparison. */
+ {
+ Size usableSize, computedChunkSize;
+ usableSize = AddrOffset(PageIndexBase(chunk, chunk->allocBase),
+ chunk->limit);
+ res = vmArenaChunkSize(&computedChunkSize, vmArena, usableSize);
+ AVER(res == ResOK);
+ AVER(computedChunkSize == ChunkSize(chunk));
+ }
+#endif
+
/* .zoneshift: Set the zone shift to divide the chunk into the same */
/* number of stripes as will fit into a reference set (the number of */
/* bits in a word). Fail if the chunk is so small stripes are smaller */
@@ -641,50 +713,27 @@ static void VMArenaDestroy(Arena arena)
}
-/* vmArenaChunkSize -- choose chunk size for arena extension
- *
- * .vmchunk.overhead: This code still lacks a proper estimate of
- * the overhead required by a vmChunk for chunkStruct, page tables
- * etc. For now, estimate it as 10%. RHSK 2007-12-21
- */
-static Size vmArenaChunkSize(VMArena vmArena, Size size)
-{
- Size fraction = 10; /* 10% -- see .vmchunk.overhead */
- Size chunkSize;
- Size chunkOverhead;
-
- /* 1: use extendBy, if it is big enough for size + overhead */
- chunkSize = vmArena->extendBy;
- chunkOverhead = chunkSize / fraction;
- if(chunkSize > size && (chunkSize - size) >= chunkOverhead)
- return chunkSize;
-
- /* 2: use size + overhead (unless it overflows SizeMAX) */
- chunkOverhead = size / (fraction - 1);
- if((SizeMAX - size) >= chunkOverhead)
- return size + chunkOverhead;
-
- /* 3: use SizeMAX */
- return SizeMAX;
-}
-
-
/* VMArenaGrow -- Extend the arena by making a new chunk
*
- * The size arg specifies how much we wish to allocate after the extension.
+ * size specifies how much we wish to allocate after the extension.
+ * pref specifies the preference for the location of the allocation.
*/
static Res VMArenaGrow(Arena arena, LocusPref pref, Size size)
{
VMArena vmArena = MustBeA(VMArena, arena);
Chunk newChunk;
Size chunkSize;
+ Size chunkMin;
Res res;
/* TODO: Ensure that extended arena will be able to satisfy pref. */
AVERT(LocusPref, pref);
UNUSED(pref);
- chunkSize = vmArenaChunkSize(vmArena, size);
+ res = vmArenaChunkSize(&chunkMin, vmArena, size);
+ if (res != ResOK)
+ return res;
+ chunkSize = vmArena->extendBy;
EVENT3(vmArenaExtendStart, size, chunkSize, ArenaReserved(arena));
@@ -692,7 +741,6 @@ static Res VMArenaGrow(Arena arena, LocusPref pref, Size size)
{
unsigned fidelity = 8; /* max fraction of addr-space we may 'waste' */
Size chunkHalf;
- Size chunkMin = 4 * 1024; /* typical single page */
Size sliceSize;
if (vmArena->extendMin > chunkMin)
@@ -1097,30 +1145,26 @@ static Bool vmChunkCompact(Tree tree, void *closure)
static void VMCompact(Arena arena, Trace trace)
{
- Size vmem1;
+ STATISTIC_DECL(Size vmem1)
AVERT(Trace, trace);
- vmem1 = ArenaReserved(arena);
+ STATISTIC(vmem1 = ArenaReserved(arena));
/* Destroy chunks that are completely free, but not the primary
* chunk. See
* TODO: add hysteresis here. See job003815. */
TreeTraverseAndDelete(&arena->chunkTree, vmChunkCompact, arena);
- {
+ STATISTIC({
Size vmem0 = trace->preTraceArenaReserved;
Size vmem2 = ArenaReserved(arena);
- /* VMCompact event: emit for all client-requested collections, */
- /* plus any others where chunks were gained or lost during the */
- /* collection. */
- if(trace->why == TraceStartWhyCLIENTFULL_INCREMENTAL
- || trace->why == TraceStartWhyCLIENTFULL_BLOCK
- || vmem0 != vmem1
- || vmem1 != vmem2)
+ /* VMCompact event: emit for collections where chunks were gained
+ * or lost during the collection. */
+ if (vmem0 != vmem1 || vmem1 != vmem2)
EVENT3(VMCompact, vmem0, vmem1, vmem2);
- }
+ });
}
mps_res_t mps_arena_vm_growth(mps_arena_t mps_arena,
diff --git a/mps/code/cbs.c b/mps/code/cbs.c
index d3de50a5ab6..c081ed61610 100644
--- a/mps/code/cbs.c
+++ b/mps/code/cbs.c
@@ -59,7 +59,7 @@ Bool CBSCheck(CBS cbs)
CHECKL(cbs->blockStructSize > 0);
CHECKL(BoolCheck(cbs->ownPool));
CHECKL(SizeIsAligned(cbs->size, LandAlignment(land)));
- STATISTIC_STAT({CHECKL((cbs->size == 0) == (cbs->treeSize == 0));});
+ STATISTIC(CHECKL((cbs->size == 0) == (cbs->treeSize == 0)));
return TRUE;
}
@@ -248,7 +248,7 @@ static Res cbsInitComm(Land land, LandClass klass,
return res;
cbs->ownPool = TRUE;
}
- cbs->treeSize = 0;
+ STATISTIC(cbs->treeSize = 0);
cbs->size = 0;
cbs->blockStructSize = blockStructSize;
@@ -1107,7 +1107,7 @@ static Res cbsDescribe(Land land, mps_lib_FILE *stream, Count depth)
res = WriteF(stream, depth + 2,
"blockPool $P\n", (WriteFP)cbsBlockPool(cbs),
"ownPool $U\n", (WriteFU)cbs->ownPool,
- "treeSize $U\n", (WriteFU)cbs->treeSize,
+ STATISTIC_WRITE(" treeSize: $U\n", (WriteFU)cbs->treeSize)
NULL);
if (res != ResOK)
return res;
diff --git a/mps/code/comm.gmk b/mps/code/comm.gmk
index 9d71c6c231d..269163e8d99 100644
--- a/mps/code/comm.gmk
+++ b/mps/code/comm.gmk
@@ -319,6 +319,33 @@ $(addprefix $(PFM)/$(VARIETY)/,$(TEST_SUITES)): $(TEST_TARGETS)
../tool/testrun.sh -s "$(notdir $@)" "$(PFM)/$(VARIETY)"
+# == Automated performance testing ==
+#
+# testratio = measure performance ratio of hot variety versus rash
+
+TESTRATIO_SEED = 1564912146
+
+# These targets are set quite high to reduce false positives due to
+# the usual vagaries of performance measurement.
+TARGET_RATIO_AMC = 120
+TARGET_RATIO_MVFF = 150
+
+define ratio
+TIME_HOT=$$(/usr/bin/time -p $(PFM)/hot/$(1) -x $(TESTRATIO_SEED) $(2) 2>&1 | tail -2 | awk '{T += $$2} END {print T}'); \
+TIME_RASH=$$(/usr/bin/time -p $(PFM)/rash/$(1) -x $(TESTRATIO_SEED) $(2) 2>&1 | tail -2 | awk '{T += $$2} END {print T}'); \
+RATIO=$$(awk "BEGIN{print int(100 * $$TIME_HOT / $$TIME_RASH)}"); \
+printf "Performance ratio (hot/rash) for $(2): %d%%\n" $$RATIO; \
+test $$RATIO -lt $(3)
+endef
+
+.PHONY: testratio
+testratio:
+ $(MAKE) -f $(PFM).gmk VARIETY=hot djbench gcbench
+ $(MAKE) -f $(PFM).gmk VARIETY=rash djbench gcbench
+ $(call ratio,gcbench,amc,$(TARGET_RATIO_AMC))
+ $(call ratio,djbench,mvff,$(TARGET_RATIO_MVFF))
+
+
# == MMQA test suite ==
#
# See test/README for documentation on running the MMQA test suite.
diff --git a/mps/code/config.h b/mps/code/config.h
index 3e55fa1fa04..b5a5b6d1a93 100644
--- a/mps/code/config.h
+++ b/mps/code/config.h
@@ -1,7 +1,7 @@
/* config.h: MPS CONFIGURATION
*
* $Id$
- * Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
+ * Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
* Portions copyright (c) 2002 Global Graphics Software.
*
* PURPOSE
@@ -106,8 +106,6 @@
#if defined(CONFIG_STATS)
/* CONFIG_STATS = STATISTICS = METERs */
-/* WARNING: this may change the size and fields of MPS structs */
-/* (...but see STATISTIC_DECL, which is invariant) */
#define STATISTICS
#define MPS_STATS_STRING "stats"
#else
@@ -412,8 +410,6 @@
#define ArenaPollALLOCTIME (65536.0)
-#define ARENA_ZONESHIFT ((Shift)20)
-
/* .client.seg-size: ARENA_CLIENT_GRAIN_SIZE is the minimum size, in
* bytes, of a grain in the client arena. It's set at 8192 with no
* particular justification. */
@@ -708,7 +704,7 @@
/* C. COPYRIGHT AND LICENSE
*
- * Copyright (C) 2001-2014 Ravenbrook Limited .
+ * Copyright (C) 2001-2016 Ravenbrook Limited .
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*
diff --git a/mps/code/djbench.c b/mps/code/djbench.c
index fbc6dcabc1c..0e4dbb3fc20 100644
--- a/mps/code/djbench.c
+++ b/mps/code/djbench.c
@@ -1,7 +1,7 @@
/* djbench.c -- "DJ" Benchmark on ANSI C library
*
* $Id$
- * Copyright 2013 Ravenbrook Limited. See end of file for license.
+ * Copyright (c) 2013-2016 Ravenbrook Limited. See end of file for license.
*
* This is an allocation stress benchmark test for manual variable pools
* and also for stdlib malloc/free (for comparison).
@@ -243,6 +243,7 @@ static struct {
int main(int argc, char *argv[]) {
int ch;
unsigned i;
+ mps_bool_t seed_specified = FALSE;
seed = rnd_seed();
@@ -274,6 +275,7 @@ int main(int argc, char *argv[]) {
break;
case 'x':
seed = strtoul(optarg, NULL, 10);
+ seed_specified = TRUE;
break;
case 'z':
zoned = FALSE;
@@ -358,8 +360,10 @@ int main(int argc, char *argv[]) {
argc -= optind;
argv += optind;
- printf("seed: %lu\n", seed);
- (void)fflush(stdout);
+ if (!seed_specified) {
+ printf("seed: %lu\n", seed);
+ (void)fflush(stdout);
+ }
while (argc > 0) {
for (i = 0; i < NELEMS(pools); ++i)
@@ -381,7 +385,7 @@ int main(int argc, char *argv[]) {
/* C. COPYRIGHT AND LICENSE
*
- * Copyright (c) 2001-2014 Ravenbrook Limited .
+ * Copyright (c) 2013-2016 Ravenbrook Limited .
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*
diff --git a/mps/code/eventdef.h b/mps/code/eventdef.h
index 907dbf3520f..6c3c12f8522 100644
--- a/mps/code/eventdef.h
+++ b/mps/code/eventdef.h
@@ -138,7 +138,7 @@
EVENT(X, TraceScanSeg , 0x003C, TRUE, Seg) \
/* TraceScanSingleRef abuses kind, see .kind.abuse */ \
EVENT(X, TraceScanSingleRef , 0x003D, TRUE, Seg) \
- EVENT(X, TraceStatCondemn , 0x003E, TRUE, Trace) \
+ /* EVENT(X, TraceStatCondemn , 0x003E, TRUE, Trace) */ \
EVENT(X, TraceStatScan , 0x003F, TRUE, Trace) \
EVENT(X, TraceStatFix , 0x0040, TRUE, Trace) \
EVENT(X, TraceStatReclaim , 0x0041, TRUE, Trace) \
@@ -443,15 +443,6 @@
PARAM(X, 2, P, arena) \
PARAM(X, 3, A, refIO)
-#define EVENT_TraceStatCondemn_PARAMS(PARAM, X) \
- PARAM(X, 0, P, trace) \
- PARAM(X, 1, W, condemned) \
- PARAM(X, 2, W, notCondemned) \
- PARAM(X, 3, W, foundation) \
- PARAM(X, 4, W, quantumWork) \
- PARAM(X, 5, D, mortality) \
- PARAM(X, 6, D, finishingTime)
-
#define EVENT_TraceStatScan_PARAMS(PARAM, X) \
PARAM(X, 0, P, trace) \
PARAM(X, 1, W, rootScanCount) \
diff --git a/mps/code/finalcv.c b/mps/code/finalcv.c
index 326ed24f0ad..6f55e518cd4 100644
--- a/mps/code/finalcv.c
+++ b/mps/code/finalcv.c
@@ -1,7 +1,7 @@
/* finalcv.c: FINALIZATION COVERAGE TEST
*
* $Id$
- * Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
+ * Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
* Portions copyright (C) 2002 Global Graphics Software.
*
* DESIGN
@@ -40,7 +40,7 @@
#define finalizationRATE 6
#define gcINTERVAL ((size_t)150 * 1024)
#define collectionCOUNT 3
-#define messageCOUNT 3
+#define finalizationCOUNT 3
/* 3 words: wrapper | vector-len | first-slot */
#define vectorSIZE (3*sizeof(mps_word_t))
@@ -110,8 +110,8 @@ static void test(mps_arena_t arena, mps_pool_class_t pool_class)
mps_root_t mps_root[2];
mps_addr_t nullref = NULL;
int state[rootCOUNT];
- mps_message_t message;
- size_t messages = 0;
+ size_t finalizations = 0;
+ size_t collections = 0;
void *p;
printf("---- finalcv: pool class %s ----\n", ClassName(pool_class));
@@ -149,9 +149,11 @@ static void test(mps_arena_t arena, mps_pool_class_t pool_class)
p = NULL;
mps_message_type_enable(arena, mps_message_type_finalization());
+ mps_message_type_enable(arena, mps_message_type_gc());
/* */
- while (messages < messageCOUNT && mps_collections(arena) < collectionCOUNT) {
+ while (finalizations < finalizationCOUNT && collections < collectionCOUNT) {
+ mps_message_type_t type;
/* Perhaps cause (minor) collection */
churn(ap);
@@ -177,31 +179,37 @@ static void test(mps_arena_t arena, mps_pool_class_t pool_class)
}
}
- /* Test any finalized objects, and perhaps resurrect some */
- while (mps_message_poll(arena)) {
- mps_word_t *obj;
- mps_word_t objind;
- mps_addr_t objaddr;
+ while (mps_message_queue_type(&type, arena)) {
+ mps_message_t message;
+ cdie(mps_message_get(&message, arena, type), "message_get");
+ if (type == mps_message_type_finalization()) {
+ /* Check finalized object, and perhaps resurrect it. */
+ mps_word_t *obj;
+ mps_word_t objind;
+ mps_addr_t objaddr;
- /* */
- cdie(mps_message_get(&message, arena, mps_message_type_finalization()),
- "get");
- cdie(0 == mps_message_clock(arena, message),
- "message clock should be 0 (unset) for finalization messages");
- mps_message_finalization_ref(&objaddr, arena, message);
- obj = objaddr;
- objind = dylan_int_int(obj[vectorSLOT]);
- printf("Finalizing: object %"PRIuLONGEST" at %p\n",
- (ulongest_t)objind, objaddr);
- /* */
- cdie(root[objind] == NULL, "finalized live");
- cdie(state[objind] == finalizableSTATE, "finalized dead");
- state[objind] = finalizedSTATE;
- /* sometimes resurrect */
- if (rnd() % 2 == 0)
- root[objind] = objaddr;
+ /* */
+ cdie(0 == mps_message_clock(arena, message),
+ "message clock should be 0 (unset) for finalization messages");
+ mps_message_finalization_ref(&objaddr, arena, message);
+ obj = objaddr;
+ objind = dylan_int_int(obj[vectorSLOT]);
+ printf("Finalizing: object %"PRIuLONGEST" at %p\n",
+ (ulongest_t)objind, objaddr);
+ /* */
+ cdie(root[objind] == NULL, "finalized live");
+ cdie(state[objind] == finalizableSTATE, "finalized dead");
+ state[objind] = finalizedSTATE;
+ /* sometimes resurrect */
+ if (rnd() % 2 == 0)
+ root[objind] = objaddr;
+ ++ finalizations;
+ } else if (type == mps_message_type_gc()) {
+ ++ collections;
+ } else {
+ error("Unexpected message type %lu.", (unsigned long)type);
+ }
mps_message_discard(arena, message);
- ++ messages;
}
}
@@ -238,7 +246,7 @@ int main(int argc, char *argv[])
/* C. COPYRIGHT AND LICENSE
*
- * Copyright (c) 2001-2014 Ravenbrook Limited .
+ * Copyright (c) 2001-2016 Ravenbrook Limited .
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*
diff --git a/mps/code/finaltest.c b/mps/code/finaltest.c
index e262dc588b4..877e37f9699 100644
--- a/mps/code/finaltest.c
+++ b/mps/code/finaltest.c
@@ -43,6 +43,7 @@
#include "fmtdytst.h"
#include "mpstd.h"
+#include /* HUGE_VAL */
#include /* fflush, printf, stdout */
enum {
@@ -157,6 +158,7 @@ static void test_trees(int mode, const char *name, mps_arena_t arena,
mode == ModePARK ? "PARK" : "POLL",
ClassName(klass), name);
mps_arena_park(arena);
+ mps_message_type_enable(arena, mps_message_type_gc());
/* make some trees */
for(i = 0; i < rootCOUNT; ++i) {
@@ -170,6 +172,7 @@ static void test_trees(int mode, const char *name, mps_arena_t arena,
}
while (finals < object_count && collections < collectionCOUNT) {
+ mps_message_type_t type;
mps_word_t final_this_time = 0;
switch (mode) {
default:
@@ -189,7 +192,6 @@ static void test_trees(int mode, const char *name, mps_arena_t arena,
printf(" Done.\n");
break;
}
- ++ collections;
{
size_t live_size = (object_count - finals) * sizeof(void *) * 3;
size_t total_size = mps_pool_total_size(pool);
@@ -197,25 +199,36 @@ static void test_trees(int mode, const char *name, mps_arena_t arena,
Insist(free_size <= total_size);
Insist(free_size + live_size <= total_size);
}
- while (mps_message_poll(arena)) {
+ while (mps_message_queue_type(&type, arena)) {
mps_message_t message;
- mps_addr_t objaddr;
- cdie(mps_message_get(&message, arena, mps_message_type_finalization()),
- "message_get");
- mps_message_finalization_ref(&objaddr, arena, message);
+ cdie(mps_message_get(&message, arena, type), "message_get");
+ if (type == mps_message_type_finalization()) {
+ mps_addr_t objaddr;
+ mps_message_finalization_ref(&objaddr, arena, message);
+ ++ final_this_time;
+ } else if (type == mps_message_type_gc()) {
+ ++ collections;
+ } else {
+ error("Unexpected message type %lu.", (unsigned long)type);
+ }
mps_message_discard(arena, message);
- ++ final_this_time;
}
finals += final_this_time;
printf("%"PRIuLONGEST" objects finalized: total %"PRIuLONGEST
" of %"PRIuLONGEST"\n", (ulongest_t)final_this_time,
(ulongest_t)finals, (ulongest_t)object_count);
}
+
if (finals != object_count) {
PoolClass poolClass = ClassOfPoly(Pool, BufferOfAP(ap)->pool);
error("Not all objects were finalized for %s in mode %s.",
- ClassName(poolClass), mode == ModePOLL ? "POLL" : "PARK");
+ ClassName(poolClass),
+ mode == ModePOLL ? "POLL" : "PARK");
}
+
+ if (collections > collectionCOUNT)
+ error("Expected no more than %lu collections but got %lu.",
+ (unsigned long)collectionCOUNT, (unsigned long)collections);
}
static void test_pool(int mode, mps_arena_t arena, mps_chain_t chain,
@@ -274,8 +287,18 @@ int main(int argc, char *argv[])
testlib_init(argc, argv);
- die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE),
- "arena_create\n");
+ MPS_ARGS_BEGIN(args) {
+ /* Randomize pause time as a regression test for job004007. */
+ double t = rnd_double();
+ if (t == 0.0)
+ t = HUGE_VAL; /* Would prefer to use INFINITY but it's not in C89. */
+ else
+ t = 1 / t - 1;
+ MPS_ARGS_ADD(args, MPS_KEY_PAUSE_TIME, t);
+ MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE);
+ die(mps_arena_create_k(&arena, mps_arena_class_vm(), args),
+ "arena_create\n");
+ } MPS_ARGS_END(args);
mps_message_type_enable(arena, mps_message_type_finalization());
die(mps_thread_reg(&thread, arena), "thread_reg\n");
for (i = 0; i < gens; ++i) {
diff --git a/mps/code/gcbench.c b/mps/code/gcbench.c
index 0c0f18e0737..f8b53ca78d6 100644
--- a/mps/code/gcbench.c
+++ b/mps/code/gcbench.c
@@ -1,7 +1,7 @@
/* gcbench.c -- "GC" Benchmark on ANSI C library
*
* $Id$
- * Copyright 2014 Ravenbrook Limited. See end of file for license.
+ * Copyright (c) 2014-2016 Ravenbrook Limited. See end of file for license.
*
* This is an allocation stress benchmark test for gc pools
*/
@@ -298,15 +298,9 @@ static struct {
int main(int argc, char *argv[]) {
int ch;
unsigned i;
- int k;
+ mps_bool_t seed_specified = FALSE;
seed = rnd_seed();
- for(k=0; k 0) {
for (i = 0; i < NELEMS(pools); ++i)
@@ -480,7 +477,7 @@ int main(int argc, char *argv[]) {
/* C. COPYRIGHT AND LICENSE
*
- * Copyright (c) 2001-2014 Ravenbrook Limited .
+ * Copyright (c) 2014-2016 Ravenbrook Limited .
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*
diff --git a/mps/code/global.c b/mps/code/global.c
index e4a303d5ea2..58f1b3ed435 100644
--- a/mps/code/global.c
+++ b/mps/code/global.c
@@ -107,8 +107,6 @@ Bool GlobalsCheck(Globals arenaGlobals)
Arena arena;
TraceId ti;
Trace trace;
- Index i;
- RefSet rs;
Rank rank;
CHECKS(Globals, arenaGlobals);
@@ -181,18 +179,7 @@ Bool GlobalsCheck(Globals arenaGlobals)
/* no check for arena->lastWorldCollect (Clock) */
/* can't write a check for arena->epoch */
-
- /* check that each history entry is a subset of the next oldest */
- rs = RefSetEMPTY;
- /* note this loop starts from 1; there is no history age 0 */
- for (i=1; i <= LDHistoryLENGTH; ++ i) {
- /* check history age 'i'; 'j' is the history index. */
- Index j = (arena->epoch + LDHistoryLENGTH - i) % LDHistoryLENGTH;
- CHECKL(RefSetSub(rs, arena->history[j]));
- rs = arena->history[j];
- }
- /* the oldest history entry must be a subset of the prehistory */
- CHECKL(RefSetSub(rs, arena->prehistory));
+ CHECKD(History, ArenaHistory(arena));
/* we also check the statics now. */
CHECKL(BoolCheck(arenaRingInit));
@@ -218,7 +205,6 @@ Bool GlobalsCheck(Globals arenaGlobals)
Res GlobalsInit(Globals arenaGlobals)
{
Arena arena;
- Index i;
Rank rank;
TraceId ti;
@@ -297,11 +283,8 @@ Res GlobalsInit(Globals arenaGlobals)
STATISTIC(arena->writeBarrierHitCount = 0);
RingInit(&arena->chainRing);
- arena->epoch = (Epoch)0; /* */
- arena->prehistory = RefSetEMPTY;
- for(i = 0; i < LDHistoryLENGTH; ++i)
- arena->history[i] = RefSetEMPTY;
-
+ HistoryInit(ArenaHistory(arena));
+
arena->emergency = FALSE;
arena->stackAtArenaEnter = NULL;
@@ -380,12 +363,12 @@ void GlobalsFinish(Globals arenaGlobals)
arena = GlobalsArena(arenaGlobals);
AVERT(Globals, arenaGlobals);
- STATISTIC_STAT(EVENT2(ArenaWriteFaults, arena,
- arena->writeBarrierHitCount));
+ STATISTIC(EVENT2(ArenaWriteFaults, arena, arena->writeBarrierHitCount));
arenaGlobals->sig = SigInvalid;
ShieldFinish(ArenaShield(arena));
+ HistoryFinish(ArenaHistory(arena));
RingFinish(&arena->formatRing);
RingFinish(&arena->chainRing);
RingFinish(&arena->messageRing);
@@ -693,6 +676,7 @@ void (ArenaPoll)(Globals globals)
{
Arena arena;
Clock start;
+ Bool worldCollected = FALSE;
Bool moreWork, workWasDone = FALSE;
Work tracedWork;
@@ -714,7 +698,8 @@ void (ArenaPoll)(Globals globals)
EVENT3(ArenaPoll, arena, start, FALSE);
do {
- moreWork = TracePoll(&tracedWork, globals);
+ moreWork = TracePoll(&tracedWork, &worldCollected, globals,
+ !worldCollected);
if (moreWork) {
workWasDone = TRUE;
}
@@ -770,7 +755,8 @@ Bool ArenaStep(Globals globals, double interval, double multiplier)
arena->lastWorldCollect = now;
} else {
/* Not worth collecting the world; consider starting a trace. */
- if (!PolicyStartTrace(&trace, arena))
+ Bool worldCollected;
+ if (!PolicyStartTrace(&trace, &worldCollected, arena, FALSE))
break;
}
}
@@ -953,7 +939,6 @@ Res GlobalsDescribe(Globals arenaGlobals, mps_lib_FILE *stream, Count depth)
Res res;
Arena arena;
Ring node, nextNode;
- Index i;
TraceId ti;
Trace trace;
@@ -986,21 +971,13 @@ Res GlobalsDescribe(Globals arenaGlobals, mps_lib_FILE *stream, Count depth)
"threadSerial $U\n", (WriteFU)arena->threadSerial,
"busyTraces $B\n", (WriteFB)arena->busyTraces,
"flippedTraces $B\n", (WriteFB)arena->flippedTraces,
- "epoch $U\n", (WriteFU)arena->epoch,
- "prehistory = $B\n", (WriteFB)arena->prehistory,
- "history {\n",
- " [note: indices are raw, not rotated]\n",
NULL);
if (res != ResOK)
return res;
- for(i=0; i < LDHistoryLENGTH; ++ i) {
- res = WriteF(stream, depth + 2,
- "[$U] = $B\n", (WriteFU)i, (WriteFB)arena->history[i],
- NULL);
- if (res != ResOK)
- return res;
- }
+ res = HistoryDescribe(ArenaHistory(arena), stream, depth);
+ if (res != ResOK)
+ return res;
res = ShieldDescribe(ArenaShield(arena), stream, depth);
if (res != ResOK)
diff --git a/mps/code/land.c b/mps/code/land.c
index fa8c00d6e29..3d2bb84bdec 100644
--- a/mps/code/land.c
+++ b/mps/code/land.c
@@ -214,6 +214,7 @@ Res LandInsert(Range rangeReturn, Land land, Range range)
AVERC(Land, land);
AVERT(Range, range);
AVER(RangeIsAligned(range, land->alignment));
+ AVER(!RangeIsEmpty(range));
landEnter(land);
res = Method(Land, land, insert)(rangeReturn, land, range);
diff --git a/mps/code/ld.c b/mps/code/ld.c
index c9e79f8a762..71264ff78a7 100644
--- a/mps/code/ld.c
+++ b/mps/code/ld.c
@@ -51,6 +51,88 @@
SRCID(ld, "$Id$");
+void HistoryInit(History history)
+{
+ Index i;
+
+ AVER(history != NULL);
+
+ history->epoch = 0;
+ history->prehistory = RefSetEMPTY;
+ for (i = 0; i < LDHistoryLENGTH; ++i)
+ history->history[i] = RefSetEMPTY;
+
+ history->sig = HistorySig;
+ AVERT(History, history);
+}
+
+Bool HistoryCheck(History history)
+{
+ Index i;
+ RefSet rs;
+
+ CHECKS(History, history);
+
+ /* check that each history entry is a subset of the next oldest */
+ rs = RefSetEMPTY;
+ /* note this loop starts from 1; there is no history age 0 */
+ for (i = 1; i <= LDHistoryLENGTH; ++i) {
+ /* check history age 'i'; 'j' is the history index. */
+ Index j = (history->epoch + LDHistoryLENGTH - i) % LDHistoryLENGTH;
+ CHECKL(RefSetSub(rs, history->history[j]));
+ rs = history->history[j];
+ }
+ /* the oldest history entry must be a subset of the prehistory */
+ CHECKL(RefSetSub(rs, history->prehistory));
+
+ return TRUE;
+}
+
+void HistoryFinish(History history)
+{
+ AVERT(History, history);
+ history->sig = SigInvalid;
+}
+
+Res HistoryDescribe(History history, mps_lib_FILE *stream, Count depth)
+{
+ Res res;
+ Index i;
+
+ if (!TESTT(History, history))
+ return ResPARAM;
+ if (stream == NULL)
+ return ResPARAM;
+
+ res = WriteF(stream, depth,
+ "History $P {\n", (WriteFP)history,
+ " epoch = $U\n", (WriteFU)history->epoch,
+ " prehistory = $B\n", (WriteFB)history->prehistory,
+ " history {\n",
+ " [note: indices are raw, not rotated]\n",
+ NULL);
+ if (res != ResOK)
+ return res;
+
+ for (i = 0; i < LDHistoryLENGTH; ++i) {
+ res = WriteF(stream, depth + 4,
+ "[$U] = $B\n", (WriteFU)i, (WriteFB)history->history[i],
+ NULL);
+ if (res != ResOK)
+ return res;
+ }
+
+ res = WriteF(stream, depth,
+ " }\n",
+ "} History $P\n", (WriteFP)history,
+ NULL);
+ if (res != ResOK)
+ return res;
+
+ return ResOK;
+}
+
+
/* LDReset -- reset a dependency to empty
*
* .reset.sync: This does not need to be synchronized with LDAge
@@ -68,7 +150,7 @@ void LDReset(mps_ld_t ld, Arena arena)
b = SegOfAddr(&seg, arena, (Addr)ld);
if (b)
ShieldExpose(arena, seg); /* .ld.access */
- ld->_epoch = arena->epoch;
+ ld->_epoch = ArenaHistory(arena)->epoch;
ld->_rs = RefSetEMPTY;
if (b)
ShieldCover(arena, seg);
@@ -106,7 +188,7 @@ void LDAdd(mps_ld_t ld, Arena arena, Addr addr)
{
AVER(ld != NULL);
AVER(TESTT(Arena, arena)); /* see .add.lock-free */
- AVER(ld->_epoch <= arena->epoch);
+ AVER(ld->_epoch <= ArenaHistory(arena)->epoch);
ld->_rs = RefSetAdd(arena, ld->_rs, addr);
}
@@ -134,23 +216,25 @@ void LDAdd(mps_ld_t ld, Arena arena, Addr addr)
*/
Bool LDIsStaleAny(mps_ld_t ld, Arena arena)
{
+ History history;
RefSet rs;
AVER(ld != NULL);
AVER(TESTT(Arena, arena)); /* .stale.thread-safe */
- AVER(ld->_epoch <= arena->epoch);
+ history = ArenaHistory(arena);
+ AVER(ld->_epoch <= history->epoch);
- if (arena->epoch == ld->_epoch) /* .stale.current */
+ if (history->epoch == ld->_epoch) /* .stale.current */
return FALSE;
/* Load the history refset, _then_ check to see if it's recent.
* This may in fact load an okay refset, which we decide to throw
* away and use the pre-history instead. */
- rs = arena->history[ld->_epoch % LDHistoryLENGTH];
+ rs = history->history[ld->_epoch % LDHistoryLENGTH];
/* .stale.recent */
/* .stale.recent.conservative */
- if (arena->epoch - ld->_epoch > LDHistoryLENGTH) {
- rs = arena->prehistory; /* .stale.old */
+ if (history->epoch - ld->_epoch > LDHistoryLENGTH) {
+ rs = history->prehistory; /* .stale.old */
}
return RefSetInter(ld->_rs, rs) != RefSetEMPTY;
@@ -186,28 +270,30 @@ Bool LDIsStale(mps_ld_t ld, Arena arena, Addr addr)
*/
void LDAge(Arena arena, RefSet rs)
{
+ History history;
Size i;
AVERT(Arena, arena);
+ history = ArenaHistory(arena);
AVER(rs != RefSetEMPTY);
/* Replace the entry for epoch - LDHistoryLENGTH by an empty */
/* set which will become the set which has moved since the */
/* current epoch. */
- arena->history[arena->epoch % LDHistoryLENGTH] = RefSetEMPTY;
+ history->history[history->epoch % LDHistoryLENGTH] = RefSetEMPTY;
/* Record the fact that the moved set has moved, by adding it */
/* to all the sets in the history, including the set for the */
/* current epoch. */
for(i = 0; i < LDHistoryLENGTH; ++i)
- arena->history[i] = RefSetUnion(arena->history[i], rs);
+ history->history[i] = RefSetUnion(history->history[i], rs);
/* This is the union of all movement since time zero. */
- arena->prehistory = RefSetUnion(arena->prehistory, rs);
+ history->prehistory = RefSetUnion(history->prehistory, rs);
/* Advance the epoch by one. */
- ++arena->epoch;
- AVER(arena->epoch != 0); /* .epoch-size */
+ ++history->epoch;
+ AVER(history->epoch != 0); /* .epoch-size */
}
@@ -221,9 +307,9 @@ void LDMerge(mps_ld_t ld, Arena arena, mps_ld_t from)
{
AVER(ld != NULL);
AVER(TESTT(Arena, arena)); /* .merge.lock-free */
- AVER(ld->_epoch <= arena->epoch);
+ AVER(ld->_epoch <= ArenaHistory(arena)->epoch);
AVER(from != NULL);
- AVER(from->_epoch <= arena->epoch);
+ AVER(from->_epoch <= ArenaHistory(arena)->epoch);
/* If a reference has been added since epoch e1 then I've */
/* certainly added since epoch e0 where e0 < e1. Therefore */
diff --git a/mps/code/locus.c b/mps/code/locus.c
index e05267c5ec9..4e68cbcb8c8 100644
--- a/mps/code/locus.c
+++ b/mps/code/locus.c
@@ -452,6 +452,7 @@ Res PoolGenInit(PoolGen pgen, GenDesc gen, Pool pool)
pgen->segs = 0;
pgen->totalSize = 0;
pgen->freeSize = 0;
+ pgen->bufferedSize = 0;
pgen->newSize = 0;
pgen->oldSize = 0;
pgen->newDeferredSize = 0;
@@ -469,11 +470,12 @@ Res PoolGenInit(PoolGen pgen, GenDesc gen, Pool pool)
void PoolGenFinish(PoolGen pgen)
{
AVERT(PoolGen, pgen);
+ AVER(pgen->segs == 0);
AVER(pgen->totalSize == 0);
+ AVER(pgen->freeSize == 0);
+ AVER(pgen->bufferedSize == 0);
AVER(pgen->newSize == 0);
AVER(pgen->newDeferredSize == 0);
- AVER(pgen->segs == 0);
- AVER(pgen->freeSize == 0);
AVER(pgen->oldSize == 0);
AVER(pgen->oldDeferredSize == 0);
@@ -493,7 +495,8 @@ Bool PoolGenCheck(PoolGen pgen)
CHECKD_NOSIG(Ring, &pgen->genRing);
CHECKL((pgen->totalSize == 0) == (pgen->segs == 0));
CHECKL(pgen->totalSize >= pgen->segs * ArenaGrainSize(PoolArena(pgen->pool)));
- CHECKL(pgen->totalSize == pgen->freeSize + pgen->newSize + pgen->oldSize
+ CHECKL(pgen->totalSize == pgen->freeSize + pgen->bufferedSize
+ + pgen->newSize + pgen->oldSize
+ pgen->newDeferredSize + pgen->oldDeferredSize);
return TRUE;
}
@@ -502,47 +505,42 @@ Bool PoolGenCheck(PoolGen pgen)
/* PoolGenAccountForFill -- accounting for allocation
*
* Call this when the pool allocates memory to the client program via
- * BufferFill. The deferred flag indicates whether the accounting of
- * this memory (for the purpose of scheduling collections) should be
- * deferred until later.
+ * BufferFill.
*
* See
*/
-void PoolGenAccountForFill(PoolGen pgen, Size size, Bool deferred)
+void PoolGenAccountForFill(PoolGen pgen, Size size)
{
AVERT(PoolGen, pgen);
- AVERT(Bool, deferred);
AVER(pgen->freeSize >= size);
pgen->freeSize -= size;
- if (deferred)
- pgen->newDeferredSize += size;
- else
- pgen->newSize += size;
+ pgen->bufferedSize += size;
}
/* PoolGenAccountForEmpty -- accounting for emptying a buffer
*
- * Call this when the client program returns memory (that was never
- * condemned) to the pool via BufferEmpty. The deferred flag is as for
- * PoolGenAccountForFill.
+ * Call this when the client program returns memory to the pool via
+ * BufferEmpty. The deferred flag indicates whether the accounting of
+ * the used memory (for the purpose of scheduling collections) should
+ * be deferred until later.
*
* See
*/
-void PoolGenAccountForEmpty(PoolGen pgen, Size unused, Bool deferred)
+void PoolGenAccountForEmpty(PoolGen pgen, Size used, Size unused, Bool deferred)
{
AVERT(PoolGen, pgen);
AVERT(Bool, deferred);
+ AVER(pgen->bufferedSize >= used + unused);
+ pgen->bufferedSize -= used + unused;
if (deferred) {
- AVER(pgen->newDeferredSize >= unused);
- pgen->newDeferredSize -= unused;
+ pgen->newDeferredSize += used;
} else {
- AVER(pgen->newSize >= unused);
- pgen->newSize -= unused;
+ pgen->newSize += used;
}
pgen->freeSize += unused;
}
@@ -550,25 +548,30 @@ void PoolGenAccountForEmpty(PoolGen pgen, Size unused, Bool deferred)
/* PoolGenAccountForAge -- accounting for condemning
*
- * Call this when memory is condemned via PoolWhiten. The size
- * parameter should be the amount of memory that is being condemned
- * for the first time. The deferred flag is as for PoolGenAccountForFill.
+ * Call this when memory is condemned via PoolWhiten. The parameters
+ * specify the amount of memory that was buffered/new and is now being
+ * condemned for the first time. The deferred flag is as for
+ * PoolGenAccountForEmpty.
*
* See
*/
-void PoolGenAccountForAge(PoolGen pgen, Size size, Bool deferred)
+void PoolGenAccountForAge(PoolGen pgen, Size wasBuffered, Size wasNew,
+ Bool deferred)
{
AVERT(PoolGen, pgen);
-
+ AVERT(Bool, deferred);
+
+ AVER(pgen->bufferedSize >= wasBuffered);
+ pgen->bufferedSize -= wasBuffered;
if (deferred) {
- AVER(pgen->newDeferredSize >= size);
- pgen->newDeferredSize -= size;
- pgen->oldDeferredSize += size;
+ AVER(pgen->newDeferredSize >= wasNew);
+ pgen->newDeferredSize -= wasNew;
+ pgen->oldDeferredSize += wasBuffered + wasNew;
} else {
- AVER(pgen->newSize >= size);
- pgen->newSize -= size;
- pgen->oldSize += size;
+ AVER(pgen->newSize >= wasNew);
+ pgen->newSize -= wasNew;
+ pgen->oldSize += wasBuffered + wasNew;
}
}
@@ -576,7 +579,7 @@ void PoolGenAccountForAge(PoolGen pgen, Size size, Bool deferred)
/* PoolGenAccountForReclaim -- accounting for reclaiming
*
* Call this when reclaiming memory, passing the amount of memory that
- * was reclaimed. The deferred flag is as for PoolGenAccountForFill.
+ * was reclaimed. The deferred flag is as for PoolGenAccountForEmpty.
*
* See
*/
@@ -642,7 +645,7 @@ void PoolGenAccountForSegMerge(PoolGen pgen)
*
* Pass the amount of memory in the segment that is accounted as free,
* old, or new, respectively. The deferred flag is as for
- * PoolGenAccountForFill.
+ * PoolGenAccountForEmpty.
*
* See
*/
@@ -660,7 +663,7 @@ void PoolGenFree(PoolGen pgen, Seg seg, Size freeSize, Size oldSize,
/* Pretend to age and reclaim the contents of the segment to ensure
* that the entire segment is accounted as free. */
- PoolGenAccountForAge(pgen, newSize, deferred);
+ PoolGenAccountForAge(pgen, 0, newSize, deferred);
PoolGenAccountForReclaim(pgen, oldSize + newSize, deferred);
AVER(pgen->totalSize >= size);
@@ -695,6 +698,7 @@ Res PoolGenDescribe(PoolGen pgen, mps_lib_FILE *stream, Count depth)
" segs $U\n", (WriteFU)pgen->segs,
" totalSize $U\n", (WriteFU)pgen->totalSize,
" freeSize $U\n", (WriteFU)pgen->freeSize,
+ " bufferedSize $U\n", (WriteFU)pgen->bufferedSize,
" oldSize $U\n", (WriteFU)pgen->oldSize,
" oldDeferredSize $U\n", (WriteFU)pgen->oldDeferredSize,
" newSize $U\n", (WriteFU)pgen->newSize,
diff --git a/mps/code/locus.h b/mps/code/locus.h
index 68211007789..ff7c482c632 100644
--- a/mps/code/locus.h
+++ b/mps/code/locus.h
@@ -51,13 +51,14 @@ typedef struct PoolGenStruct {
RingStruct genRing;
/* Accounting of memory in this generation for this pool */
- STATISTIC_DECL(Size segs); /* number of segments */
- Size totalSize; /* total (sum of segment sizes) */
- STATISTIC_DECL(Size freeSize); /* unused (free or lost to fragmentation) */
- Size newSize; /* allocated since last collection */
- STATISTIC_DECL(Size oldSize); /* allocated prior to last collection */
- Size newDeferredSize; /* new (but deferred) */
- STATISTIC_DECL(Size oldDeferredSize); /* old (but deferred) */
+ Size segs; /* number of segments */
+ Size totalSize; /* total (sum of segment sizes) */
+ Size freeSize; /* unused (free or lost to fragmentation) */
+ Size bufferedSize; /* held in buffers but not condemned yet */
+ Size newSize; /* allocated since last collection */
+ Size oldSize; /* allocated prior to last collection */
+ Size newDeferredSize; /* new (but deferred) */
+ Size oldDeferredSize; /* old (but deferred) */
} PoolGenStruct;
@@ -99,9 +100,9 @@ extern Res PoolGenAlloc(Seg *segReturn, PoolGen pgen, SegClass klass,
Size size, ArgList args);
extern void PoolGenFree(PoolGen pgen, Seg seg, Size freeSize, Size oldSize,
Size newSize, Bool deferred);
-extern void PoolGenAccountForFill(PoolGen pgen, Size size, Bool deferred);
-extern void PoolGenAccountForEmpty(PoolGen pgen, Size unused, Bool deferred);
-extern void PoolGenAccountForAge(PoolGen pgen, Size aged, Bool deferred);
+extern void PoolGenAccountForFill(PoolGen pgen, Size size);
+extern void PoolGenAccountForEmpty(PoolGen pgen, Size used, Size unused, Bool deferred);
+extern void PoolGenAccountForAge(PoolGen pgen, Size wasBuffered, Size wasNew, Bool deferred);
extern void PoolGenAccountForReclaim(PoolGen pgen, Size reclaimed, Bool deferred);
extern void PoolGenUndefer(PoolGen pgen, Size oldSize, Size newSize);
extern void PoolGenAccountForSegSplit(PoolGen pgen);
diff --git a/mps/code/mpm.h b/mps/code/mpm.h
index 847b43a4613..e8ba89c5ed8 100644
--- a/mps/code/mpm.h
+++ b/mps/code/mpm.h
@@ -226,9 +226,7 @@ extern Res PoolWhiten(Pool pool, Trace trace, Seg seg);
extern void PoolGrey(Pool pool, Trace trace, Seg seg);
extern void PoolBlacken(Pool pool, TraceSet traceSet, Seg seg);
extern Res PoolScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg);
-extern Res (PoolFix)(Pool pool, ScanState ss, Seg seg, Addr *refIO);
-#define PoolFix(pool, ss, seg, refIO) \
- ((*(pool)->fix)(pool, ss, seg, refIO))
+extern Res PoolFix(Pool pool, ScanState ss, Seg seg, Addr *refIO);
extern Res PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO);
extern void PoolReclaim(Pool pool, Trace trace, Seg seg);
extern void PoolTraceEnd(Pool pool, Trace trace);
@@ -392,7 +390,8 @@ extern Bool TraceIsEmpty(Trace trace);
extern Res TraceAddWhite(Trace trace, Seg seg);
extern Res TraceCondemnZones(Trace trace, ZoneSet condemnedSet);
extern Res TraceStart(Trace trace, double mortality, double finishingTime);
-extern Bool TracePoll(Work *workReturn, Globals globals);
+extern Bool TracePoll(Work *workReturn, Bool *collectWorldReturn,
+ Globals globals, Bool collectWorldAllowed);
extern Rank TraceRankForAccess(Arena arena, Seg seg);
extern void TraceSegAccess(Arena arena, Seg seg, AccessSet mode);
@@ -502,7 +501,7 @@ extern Ring GlobalsRememberedSummaryRing(Globals);
#define ArenaThreadRing(arena) (&(arena)->threadRing)
#define ArenaDeadRing(arena) (&(arena)->deadRing)
-#define ArenaEpoch(arena) ((arena)->epoch) /* .epoch.ts */
+#define ArenaEpoch(arena) (ArenaHistory(arena)->epoch) /* .epoch.ts */
#define ArenaTrace(arena, ti) (&(arena)->trace[ti])
#define ArenaZoneShift(arena) ((arena)->zoneShift)
#define ArenaStripeSize(arena) ((Size)1 << ArenaZoneShift(arena))
@@ -512,6 +511,7 @@ extern Ring GlobalsRememberedSummaryRing(Globals);
#define ArenaChunkTree(arena) RVALUE((arena)->chunkTree)
#define ArenaChunkRing(arena) RVALUE(&(arena)->chunkRing)
#define ArenaShield(arena) (&(arena)->shieldStruct)
+#define ArenaHistory(arena) (&(arena)->historyStruct)
extern Bool ArenaGrainSizeCheck(Size size);
#define AddrArenaGrainUp(addr, arena) AddrAlignUp(addr, ArenaGrainSize(arena))
@@ -634,7 +634,8 @@ extern Res PolicyAlloc(Tract *tractReturn, Arena arena, LocusPref pref,
Size size, Pool pool);
extern Bool PolicyShouldCollectWorld(Arena arena, double availableTime,
Clock now, Clock clocks_per_sec);
-extern Bool PolicyStartTrace(Trace *traceReturn, Arena arena);
+extern Bool PolicyStartTrace(Trace *traceReturn, Bool *collectWorldReturn,
+ Arena arena, Bool collectWorldAllowed);
extern Bool PolicyPoll(Arena arena);
extern Bool PolicyPollAgain(Arena arena, Clock start, Bool moreWork, Work tracedWork);
@@ -904,6 +905,10 @@ extern void (ShieldFlush)(Arena arena);
/* Location Dependency -- see */
+extern void HistoryInit(History history);
+extern void HistoryFinish(History);
+extern Res HistoryDescribe(History history, mps_lib_FILE *stream, Count depth);
+extern Bool HistoryCheck(History history);
extern void LDReset(mps_ld_t ld, Arena arena);
extern void LDAdd(mps_ld_t ld, Arena arena, Addr addr);
extern Bool LDIsStaleAny(mps_ld_t ld, Arena arena);
@@ -987,31 +992,17 @@ DECLARE_CLASS(Land, Land, Inst);
/* STATISTIC -- gather statistics (in some varieties)
*
- * The argument of STATISTIC is an expression; the expansion followed by
- * a semicolon is syntactically a statement.
- *
- * The argument of STATISTIC_STAT is a statement; the expansion followed by
- * a semicolon is syntactically a statement.
- *
- * STATISTIC_WRITE is inserted in WriteF arguments to output the values
- * of statistic fields.
- *
- * .statistic.whitehot: The implementation of STATISTIC for
- * non-statistical varieties passes the parameter to DISCARD to ensure
- * the parameter is syntactically an expression. The parameter is
- * passed as part of a comma-expression so that its type is not
- * important. This permits an expression of type void. */
+ * See .
+ */
#if defined(STATISTICS)
-#define STATISTIC(gather) BEGIN (gather); END
-#define STATISTIC_STAT(gather) BEGIN gather; END
+#define STATISTIC(gather) BEGIN gather; END
#define STATISTIC_WRITE(format, arg) (format), (arg),
#elif defined(STATISTICS_NONE)
-#define STATISTIC(gather) DISCARD(((gather), 0))
-#define STATISTIC_STAT DISCARD_STAT
+#define STATISTIC(gather) NOOP
#define STATISTIC_WRITE(format, arg)
#else /* !defined(STATISTICS) && !defined(STATISTICS_NONE) */
diff --git a/mps/code/mpmst.h b/mps/code/mpmst.h
index e1da4e7618b..065088e9cf0 100644
--- a/mps/code/mpmst.h
+++ b/mps/code/mpmst.h
@@ -428,17 +428,17 @@ typedef struct ScanStateStruct {
Rank rank; /* reference rank of scanning */
Bool wasMarked; /* design.mps.fix.protocol.was-ready */
RefSet fixedSummary; /* accumulated summary of fixed references */
- STATISTIC_DECL(Count fixRefCount); /* refs which pass zone check */
- STATISTIC_DECL(Count segRefCount); /* refs which refer to segs */
- STATISTIC_DECL(Count whiteSegRefCount); /* refs which refer to white segs */
- STATISTIC_DECL(Count nailCount); /* segments nailed by ambig refs */
- STATISTIC_DECL(Count snapCount); /* refs snapped to forwarded objs */
- STATISTIC_DECL(Count forwardedCount); /* objects preserved by moving */
+ STATISTIC_DECL(Count fixRefCount) /* refs which pass zone check */
+ STATISTIC_DECL(Count segRefCount) /* refs which refer to segs */
+ STATISTIC_DECL(Count whiteSegRefCount) /* refs which refer to white segs */
+ STATISTIC_DECL(Count nailCount) /* segments nailed by ambig refs */
+ STATISTIC_DECL(Count snapCount) /* refs snapped to forwarded objs */
+ STATISTIC_DECL(Count forwardedCount) /* objects preserved by moving */
Size forwardedSize; /* bytes preserved by moving */
- STATISTIC_DECL(Count preservedInPlaceCount); /* objects preserved in place */
+ STATISTIC_DECL(Count preservedInPlaceCount) /* objects preserved in place */
Size preservedInPlaceSize; /* bytes preserved in place */
- STATISTIC_DECL(Size copiedSize); /* bytes copied */
- STATISTIC_DECL(Size scannedSize); /* bytes scanned */
+ STATISTIC_DECL(Size copiedSize) /* bytes copied */
+ Size scannedSize; /* bytes scanned */
} ScanStateStruct;
@@ -459,35 +459,35 @@ typedef struct TraceStruct {
PoolFixMethod fix; /* fix method to apply to references */
void *fixClosure; /* closure information for fix method */
Chain chain; /* chain being incrementally collected */
- STATISTIC_DECL(Size preTraceArenaReserved); /* ArenaReserved before this trace */
+ STATISTIC_DECL(Size preTraceArenaReserved) /* ArenaReserved before this trace */
Size condemned; /* condemned bytes */
Size notCondemned; /* collectable but not condemned */
Size foundation; /* initial grey set size */
Work quantumWork; /* tracing work to be done in each poll */
- STATISTIC_DECL(Count greySegCount); /* number of grey segs */
- STATISTIC_DECL(Count greySegMax); /* max number of grey segs */
- STATISTIC_DECL(Count rootScanCount); /* number of roots scanned */
+ STATISTIC_DECL(Count greySegCount) /* number of grey segs */
+ STATISTIC_DECL(Count greySegMax) /* max number of grey segs */
+ STATISTIC_DECL(Count rootScanCount) /* number of roots scanned */
Count rootScanSize; /* total size of scanned roots */
- Size rootCopiedSize; /* bytes copied by scanning roots */
- STATISTIC_DECL(Count segScanCount); /* number of segs scanned */
+ STATISTIC_DECL(Size rootCopiedSize) /* bytes copied by scanning roots */
+ STATISTIC_DECL(Count segScanCount) /* number of segs scanned */
Count segScanSize; /* total size of scanned segments */
- Size segCopiedSize; /* bytes copied by scanning segments */
- STATISTIC_DECL(Count singleScanCount); /* number of single refs scanned */
- STATISTIC_DECL(Count singleScanSize); /* total size of single refs scanned */
- STATISTIC_DECL(Size singleCopiedSize); /* bytes copied by scanning single refs */
- STATISTIC_DECL(Count fixRefCount); /* refs which pass zone check */
- STATISTIC_DECL(Count segRefCount); /* refs which refer to segs */
- STATISTIC_DECL(Count whiteSegRefCount); /* refs which refer to white segs */
- STATISTIC_DECL(Count nailCount); /* segments nailed by ambig refs */
- STATISTIC_DECL(Count snapCount); /* refs snapped to forwarded objs */
- STATISTIC_DECL(Count readBarrierHitCount); /* read barrier faults */
- STATISTIC_DECL(Count pointlessScanCount); /* pointless seg scans */
- STATISTIC_DECL(Count forwardedCount); /* objects preserved by moving */
+ STATISTIC_DECL(Size segCopiedSize) /* bytes copied by scanning segments */
+ STATISTIC_DECL(Count singleScanCount) /* number of single refs scanned */
+ STATISTIC_DECL(Count singleScanSize) /* total size of single refs scanned */
+ STATISTIC_DECL(Size singleCopiedSize) /* bytes copied by scanning single refs */
+ STATISTIC_DECL(Count fixRefCount) /* refs which pass zone check */
+ STATISTIC_DECL(Count segRefCount) /* refs which refer to segs */
+ STATISTIC_DECL(Count whiteSegRefCount) /* refs which refer to white segs */
+ STATISTIC_DECL(Count nailCount) /* segments nailed by ambig refs */
+ STATISTIC_DECL(Count snapCount) /* refs snapped to forwarded objs */
+ STATISTIC_DECL(Count readBarrierHitCount) /* read barrier faults */
+ STATISTIC_DECL(Count pointlessScanCount) /* pointless seg scans */
+ STATISTIC_DECL(Count forwardedCount) /* objects preserved by moving */
Size forwardedSize; /* bytes preserved by moving */
- STATISTIC_DECL(Count preservedInPlaceCount); /* objects preserved in place */
+ STATISTIC_DECL(Count preservedInPlaceCount) /* objects preserved in place */
Size preservedInPlaceSize; /* bytes preserved in place */
- STATISTIC_DECL(Count reclaimCount); /* segments reclaimed */
- STATISTIC_DECL(Count reclaimSize); /* bytes reclaimed */
+ STATISTIC_DECL(Count reclaimCount) /* segments reclaimed */
+ STATISTIC_DECL(Count reclaimSize) /* bytes reclaimed */
} TraceStruct;
@@ -622,13 +622,13 @@ typedef struct LandStruct {
typedef struct CBSStruct {
LandStruct landStruct; /* superclass fields come first */
SplayTreeStruct splayTreeStruct;
- STATISTIC_DECL(Count treeSize);
+ STATISTIC_DECL(Count treeSize)
Pool blockPool; /* pool that manages blocks */
Size blockStructSize; /* size of block structure */
Bool ownPool; /* did we create blockPool? */
Size size; /* total size of ranges in CBS */
/* meters for sizes of search structures at each op */
- METER_DECL(treeSearch);
+ METER_DECL(treeSearch)
Sig sig; /* .class.end-sig */
} CBSStruct;
@@ -708,6 +708,21 @@ typedef struct ShieldStruct {
} ShieldStruct;
+/* History -- location dependency history
+ *
+ * See design.mps.arena.ld.
+ */
+
+#define HistorySig ((Sig)0x51981520) /* SIGnature HISTOry */
+
+typedef struct HistoryStruct {
+ Sig sig; /* design.mps.sig */
+ Epoch epoch; /* */
+ RefSet prehistory; /* */
+ RefSet history[LDHistoryLENGTH]; /* */
+} HistoryStruct;
+
+
/* ArenaStruct -- generic arena
*
* See .
@@ -788,14 +803,11 @@ typedef struct mps_arena_s {
Clock lastWorldCollect;
RingStruct greyRing[RankLIMIT]; /* ring of grey segments at each rank */
- STATISTIC_DECL(Count writeBarrierHitCount); /* write barrier hits */
+ STATISTIC_DECL(Count writeBarrierHitCount) /* write barrier hits */
RingStruct chainRing; /* ring of chains */
- /* location dependency fields () */
- Epoch epoch; /* */
- RefSet prehistory; /* */
- RefSet history[LDHistoryLENGTH]; /* */
-
+ struct HistoryStruct historyStruct;
+
Bool emergency; /* garbage collect in emergency mode? */
Word *stackAtArenaEnter; /* NULL or hot end of client stack, in the thread */
diff --git a/mps/code/mpmtypes.h b/mps/code/mpmtypes.h
index 281431c0947..5d8070eaa46 100644
--- a/mps/code/mpmtypes.h
+++ b/mps/code/mpmtypes.h
@@ -1,7 +1,7 @@
/* mpmtypes.h: MEMORY POOL MANAGER TYPES
*
* $Id$
- * Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
+ * Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
* Portions copyright (c) 2001 Global Graphics Software.
*
* .design:
@@ -105,6 +105,7 @@ typedef struct LandStruct *Land; /* */
typedef struct LandClassStruct *LandClass; /* */
typedef unsigned FindDelete; /* */
typedef struct ShieldStruct *Shield; /* design.mps.shield */
+typedef struct HistoryStruct *History; /* design.mps.arena.ld */
/* Arena*Method -- see */
@@ -282,6 +283,7 @@ typedef Res (*LandDescribeMethod)(Land land, mps_lib_FILE *stream, Count depth);
#define RefSetUNIV BS_UNIV(RefSet)
#define ZoneSetEMPTY BS_EMPTY(ZoneSet)
#define ZoneSetUNIV BS_UNIV(ZoneSet)
+#define ZoneShiftUNSET ((Shift)-1)
#define TraceSetEMPTY BS_EMPTY(TraceSet)
#define TraceSetUNIV ((TraceSet)((1u << TraceLIMIT) - 1))
#define RankSetEMPTY BS_EMPTY(RankSet)
@@ -423,14 +425,13 @@ typedef double WriteFD;
/* STATISTIC_DECL -- declare a field to accumulate statistics in
*
* The argument is a field declaration (a struct-declaration minus the
- * semicolon) for a single field (no commas). Currently, we always
- * leave them in, see design.mps.metrics.
+ * semicolon) for a single field (no commas).
*/
#if defined(STATISTICS)
-#define STATISTIC_DECL(field) field
+#define STATISTIC_DECL(field) field;
#elif defined(STATISTICS_NONE)
-#define STATISTIC_DECL(field) field
+#define STATISTIC_DECL(field)
#else
#error "No statistics configured."
#endif
@@ -441,7 +442,7 @@ typedef double WriteFD;
/* C. COPYRIGHT AND LICENSE
*
- * Copyright (C) 2001-2014 Ravenbrook Limited .
+ * Copyright (C) 2001-2016 Ravenbrook Limited .
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*
diff --git a/mps/code/mpsicv.c b/mps/code/mpsicv.c
index f0415f4e847..d026f2cd684 100644
--- a/mps/code/mpsicv.c
+++ b/mps/code/mpsicv.c
@@ -542,8 +542,13 @@ int main(int argc, char *argv[])
testlib_init(argc, argv);
- die(mps_arena_create(&arena, mps_arena_class_vm(), TEST_ARENA_SIZE),
- "arena_create");
+ MPS_ARGS_BEGIN(args) {
+ /* Randomize pause time as a regression test for job004011. */
+ MPS_ARGS_ADD(args, MPS_KEY_PAUSE_TIME, rnd_pause_time());
+ MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, TEST_ARENA_SIZE);
+ die(mps_arena_create_k(&arena, mps_arena_class_vm(), args),
+ "arena_create\n");
+ } MPS_ARGS_END(args);
die(mps_thread_reg(&thread, arena), "thread_reg");
if (rnd() % 2) {
diff --git a/mps/code/policy.c b/mps/code/policy.c
index a6c794bb462..8b72a74dc7e 100644
--- a/mps/code/policy.c
+++ b/mps/code/policy.c
@@ -266,40 +266,53 @@ static Res policyCondemnChain(double *mortalityReturn, Chain chain, Trace trace)
/* PolicyStartTrace -- consider starting a trace
+ *
+ * If collectWorldAllowed is TRUE, consider starting a collection of
+ * the world. Otherwise, consider only starting collections of individual
+ * chains or generations.
+ *
+ * If a collection of the world was started, set *collectWorldReturn
+ * to TRUE. Otherwise leave it unchanged.
*
* If a trace was started, update *traceReturn and return TRUE.
* Otherwise, leave *traceReturn unchanged and return FALSE.
*/
-Bool PolicyStartTrace(Trace *traceReturn, Arena arena)
+Bool PolicyStartTrace(Trace *traceReturn, Bool *collectWorldReturn,
+ Arena arena, Bool collectWorldAllowed)
{
Res res;
Trace trace;
- Size sFoundation, sCondemned, sSurvivors, sConsTrace;
- double tTracePerScan; /* tTrace/cScan */
- double dynamicDeferral;
- /* Compute dynamic criterion. See strategy.lisp-machine. */
- AVER(arena->topGen.mortality >= 0.0);
- AVER(arena->topGen.mortality <= 1.0);
- sFoundation = (Size)0; /* condemning everything, only roots @@@@ */
- /* @@@@ sCondemned should be scannable only */
- sCondemned = ArenaCommitted(arena) - ArenaSpareCommitted(arena);
- sSurvivors = (Size)(sCondemned * (1 - arena->topGen.mortality));
- tTracePerScan = sFoundation + (sSurvivors * (1 + TraceCopyScanRATIO));
- AVER(TraceWorkFactor >= 0);
- AVER(sSurvivors + tTracePerScan * TraceWorkFactor <= (double)SizeMAX);
- sConsTrace = (Size)(sSurvivors + tTracePerScan * TraceWorkFactor);
- dynamicDeferral = (double)ArenaAvail(arena) - (double)sConsTrace;
+ if (collectWorldAllowed) {
+ Size sFoundation, sCondemned, sSurvivors, sConsTrace;
+ double tTracePerScan; /* tTrace/cScan */
+ double dynamicDeferral;
- if (dynamicDeferral < 0.0) {
- /* Start full collection. */
- res = TraceStartCollectAll(&trace, arena, TraceStartWhyDYNAMICCRITERION);
- if (res != ResOK)
- goto failStart;
- *traceReturn = trace;
- return TRUE;
- } else {
+ /* Compute dynamic criterion. See strategy.lisp-machine. */
+ AVER(arena->topGen.mortality >= 0.0);
+ AVER(arena->topGen.mortality <= 1.0);
+ sFoundation = (Size)0; /* condemning everything, only roots @@@@ */
+ /* @@@@ sCondemned should be scannable only */
+ sCondemned = ArenaCommitted(arena) - ArenaSpareCommitted(arena);
+ sSurvivors = (Size)(sCondemned * (1 - arena->topGen.mortality));
+ tTracePerScan = sFoundation + (sSurvivors * (1 + TraceCopyScanRATIO));
+ AVER(TraceWorkFactor >= 0);
+ AVER(sSurvivors + tTracePerScan * TraceWorkFactor <= (double)SizeMAX);
+ sConsTrace = (Size)(sSurvivors + tTracePerScan * TraceWorkFactor);
+ dynamicDeferral = (double)ArenaAvail(arena) - (double)sConsTrace;
+
+ if (dynamicDeferral < 0.0) {
+ /* Start full collection. */
+ res = TraceStartCollectAll(&trace, arena, TraceStartWhyDYNAMICCRITERION);
+ if (res != ResOK)
+ goto failStart;
+ *collectWorldReturn = TRUE;
+ *traceReturn = trace;
+ return TRUE;
+ }
+ }
+ {
/* Find the chain most over its capacity. */
Ring node, nextNode;
double firstTime = 0.0;
diff --git a/mps/code/pool.c b/mps/code/pool.c
index 53daf5e2d62..85c904d88aa 100644
--- a/mps/code/pool.c
+++ b/mps/code/pool.c
@@ -348,10 +348,10 @@ Res PoolScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
/* PoolFix* -- fix a reference to an object in this pool
*
- * See for macro version; see .
+ * See .
*/
-Res (PoolFix)(Pool pool, ScanState ss, Seg seg, Addr *refIO)
+Res PoolFix(Pool pool, ScanState ss, Seg seg, Addr *refIO)
{
AVERT_CRITICAL(Pool, pool);
AVERT_CRITICAL(ScanState, ss);
@@ -362,7 +362,7 @@ Res (PoolFix)(Pool pool, ScanState ss, Seg seg, Addr *refIO)
/* Should only be fixing references to white segments. */
AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY);
- return PoolFix(pool, ss, seg, refIO);
+ return pool->fix(pool, ss, seg, refIO);
}
Res PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO)
diff --git a/mps/code/poolamc.c b/mps/code/poolamc.c
index 23c9dfeb612..bf83d5458e8 100644
--- a/mps/code/poolamc.c
+++ b/mps/code/poolamc.c
@@ -75,9 +75,13 @@ enum {
/* amcSegStruct -- AMC-specific fields appended to GCSegStruct
*
- * .seg.old: The "old" flag is FALSE if the segment has never been
- * collected, and so its size is accounted against the pool
- * generation's newSize; it is TRUE if the segment has been collected
+ * .seg.accounted-as-buffered: The "accountedAsBuffered" flag is TRUE
+ * if the segment has an atached buffer and is accounted against the
+ * pool generation's bufferedSize. But note that if this is FALSE, the
+ * segment might still have an attached buffer -- this happens if the
+ * segment was condemned while the buffer was attached.
+ *
+ * .seg.old: The "old" flag is TRUE if the segment has been collected
* at least once, and so its size is accounted against the pool
* generation's oldSize.
*
@@ -98,6 +102,7 @@ typedef struct amcSegStruct {
GCSegStruct gcSegStruct; /* superclass fields must come first */
amcGen gen; /* generation this segment belongs to */
Nailboard board; /* nailboard for this segment or NULL if none */
+ BOOLFIELD(accountedAsBuffered); /* .seg.accounted-as-buffered */
BOOLFIELD(old); /* .seg.old */
BOOLFIELD(deferred); /* .seg.deferred */
Sig sig; /* */
@@ -114,6 +119,7 @@ static Bool amcSegCheck(amcSeg amcseg)
CHECKD(Nailboard, amcseg->board);
CHECKL(SegNailed(MustBeA(Seg, amcseg)) != TraceSetEMPTY);
}
+ /* CHECKL(BoolCheck(amcseg->accountedAsBuffered)); */
/* CHECKL(BoolCheck(amcseg->old)); */
/* CHECKL(BoolCheck(amcseg->deferred)); */
return TRUE;
@@ -143,6 +149,7 @@ static Res AMCSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
amcseg->gen = amcgen;
amcseg->board = NULL;
+ amcseg->accountedAsBuffered = FALSE;
amcseg->old = FALSE;
amcseg->deferred = FALSE;
@@ -849,6 +856,8 @@ static void AMCFinish(Pool pool)
Seg seg = SegOfPoolRing(node);
amcGen gen = amcSegGen(seg);
amcSeg amcseg = MustBeA(amcSeg, seg);
+ AVERT(amcSeg, amcseg);
+ AVER(!amcseg->accountedAsBuffered);
PoolGenFree(&gen->pgen, seg,
0,
amcseg->old ? SegSize(seg) : 0,
@@ -957,7 +966,9 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn,
}
}
- PoolGenAccountForFill(pgen, SegSize(seg), MustBeA(amcSeg, seg)->deferred);
+ PoolGenAccountForFill(pgen, SegSize(seg));
+ MustBeA(amcSeg, seg)->accountedAsBuffered = TRUE;
+
*baseReturn = base;
*limitReturn = limit;
return ResOK;
@@ -975,6 +986,7 @@ static void AMCBufferEmpty(Pool pool, Buffer buffer,
Size size;
Arena arena;
Seg seg;
+ amcSeg amcseg;
AVERT(Buffer, buffer);
AVER(BufferIsReady(buffer));
@@ -999,10 +1011,13 @@ static void AMCBufferEmpty(Pool pool, Buffer buffer,
ShieldCover(arena, seg);
}
- /* The unused part of the buffer is not reused by AMC, so we pass 0
- * for the unused argument. This call therefore has no effect on the
- * accounting, but we call it anyway for consistency. */
- PoolGenAccountForEmpty(&amcSegGen(seg)->pgen, 0, MustBeA(amcSeg, seg)->deferred);
+ amcseg = MustBeA(amcSeg, seg);
+ if (amcseg->accountedAsBuffered) {
+ /* Account the entire buffer (including the padding object) as used. */
+ PoolGenAccountForEmpty(&amcSegGen(seg)->pgen, SegSize(seg), 0,
+ amcseg->deferred);
+ amcseg->accountedAsBuffered = FALSE;
+ }
}
@@ -1069,9 +1084,10 @@ static void AMCRampEnd(Pool pool, Buffer buf)
&& amcseg->deferred
&& SegWhite(seg) == TraceSetEMPTY)
{
- PoolGenUndefer(pgen,
- amcseg->old ? SegSize(seg) : 0,
- amcseg->old ? 0 : SegSize(seg));
+ if (!amcseg->accountedAsBuffered)
+ PoolGenUndefer(pgen,
+ amcseg->old ? SegSize(seg) : 0,
+ amcseg->old ? 0 : SegSize(seg));
amcseg->deferred = FALSE;
}
}
@@ -1129,7 +1145,7 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
BufferScanLimit(buffer),
BufferLimit(buffer));
}
- ++trace->nailCount;
+ STATISTIC(++trace->nailCount);
SegSetNailed(seg, TraceSetSingle(trace));
} else {
/* Segment is nailed already, cannot create a nailboard */
@@ -1161,8 +1177,14 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
gen = amcSegGen(seg);
AVERT(amcGen, gen);
if (!amcseg->old) {
- PoolGenAccountForAge(&gen->pgen, SegSize(seg), amcseg->deferred);
amcseg->old = TRUE;
+ if (amcseg->accountedAsBuffered) {
+ /* Note that the segment remains buffered but the buffer contents
+ * are accounted as old. See .seg.accounted-as-buffered. */
+ amcseg->accountedAsBuffered = FALSE;
+ PoolGenAccountForAge(&gen->pgen, SegSize(seg), 0, amcseg->deferred);
+ } else
+ PoolGenAccountForAge(&gen->pgen, 0, SegSize(seg), amcseg->deferred);
}
/* Ensure we are forwarding into the right generation. */
@@ -1513,7 +1535,7 @@ static Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
res = amcSegCreateNailboard(seg, pool);
if(res != ResOK)
return res;
- ++ss->nailCount;
+ STATISTIC(++ss->nailCount);
SegSetNailed(seg, TraceSetUnion(SegNailed(seg), ss->traces));
}
amcFixInPlace(pool, seg, ss, refIO);
@@ -1571,7 +1593,7 @@ static Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
AVER_CRITICAL(buffer != NULL);
length = AddrOffset(ref, clientQ); /* .exposed.seg */
- STATISTIC_STAT(++ss->forwardedCount);
+ STATISTIC(++ss->forwardedCount);
ss->forwardedSize += length;
do {
res = BUFFER_RESERVE(&newBase, buffer, length);
@@ -1598,7 +1620,7 @@ static Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
ShieldCover(arena, toSeg);
} while (!BUFFER_COMMIT(buffer, newBase, length));
- ss->copiedSize += length;
+ STATISTIC(ss->copiedSize += length);
(*format->move)(ref, newRef); /* .exposed.seg */
@@ -1606,7 +1628,7 @@ static Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
} else {
/* reference to broken heart (which should be snapped out -- */
/* consider adding to (non-existent) snap-out cache here) */
- STATISTIC_STAT(++ss->snapCount);
+ STATISTIC(++ss->snapCount);
}
/* .fix.update: update the reference to whatever the above code */
@@ -1628,7 +1650,7 @@ static void amcReclaimNailed(Pool pool, Trace trace, Seg seg)
Addr p, limit;
Arena arena;
Format format;
- Size bytesReclaimed = (Size)0;
+ STATISTIC_DECL(Size bytesReclaimed = (Size)0)
Count preservedInPlaceCount = (Count)0;
Size preservedInPlaceSize = (Size)0;
AMC amc = MustBeA(AMCZPool, pool);
@@ -1673,7 +1695,7 @@ static void amcReclaimNailed(Pool pool, Trace trace, Seg seg)
/* Replace run of forwarding pointers and unreachable objects
* with a padding object. */
(*format->pad)(padBase, padLength);
- bytesReclaimed += padLength;
+ STATISTIC(bytesReclaimed += padLength);
padLength = 0;
}
padBase = q;
@@ -1690,7 +1712,7 @@ static void amcReclaimNailed(Pool pool, Trace trace, Seg seg)
/* Replace final run of forwarding pointers and unreachable
* objects with a padding object. */
(*format->pad)(padBase, padLength);
- bytesReclaimed += padLength;
+ STATISTIC(bytesReclaimed += padLength);
}
ShieldCover(arena, seg);
@@ -1701,9 +1723,9 @@ static void amcReclaimNailed(Pool pool, Trace trace, Seg seg)
MustBeA(amcSeg, seg)->board = NULL;
}
- AVER(bytesReclaimed <= SegSize(seg));
- trace->reclaimSize += bytesReclaimed;
- trace->preservedInPlaceCount += preservedInPlaceCount;
+ STATISTIC(AVER(bytesReclaimed <= SegSize(seg)));
+ STATISTIC(trace->reclaimSize += bytesReclaimed);
+ STATISTIC(trace->preservedInPlaceCount += preservedInPlaceCount);
trace->preservedInPlaceSize += preservedInPlaceSize;
/* Free the seg if we can; fixes .nailboard.limitations.middle. */
@@ -1758,7 +1780,7 @@ static void AMCReclaim(Pool pool, Trace trace, Seg seg)
/* segs should have been nailed anyway). */
AVER(SegBuffer(seg) == NULL);
- trace->reclaimSize += SegSize(seg);
+ STATISTIC(trace->reclaimSize += SegSize(seg));
PoolGenFree(&gen->pgen, seg, 0, SegSize(seg), 0, MustBeA(amcSeg, seg)->deferred);
}
diff --git a/mps/code/poolams.c b/mps/code/poolams.c
index 4155da162b5..e281513229a 100644
--- a/mps/code/poolams.c
+++ b/mps/code/poolams.c
@@ -55,7 +55,8 @@ Bool AMSSegCheck(AMSSeg amsseg)
CHECKL(amsseg->grains == AMSGrains(amsseg->ams, SegSize(seg)));
CHECKL(amsseg->grains > 0);
- CHECKL(amsseg->grains == amsseg->freeGrains + amsseg->oldGrains + amsseg->newGrains);
+ CHECKL(amsseg->grains == amsseg->freeGrains + amsseg->bufferedGrains
+ + amsseg->oldGrains + amsseg->newGrains);
CHECKL(BoolCheck(amsseg->allocTableInUse));
if (!amsseg->allocTableInUse)
@@ -236,8 +237,9 @@ static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
amsseg->grains = size >> ams->grainShift;
amsseg->freeGrains = amsseg->grains;
- amsseg->oldGrains = (Count)0;
+ amsseg->bufferedGrains = (Count)0;
amsseg->newGrains = (Count)0;
+ amsseg->oldGrains = (Count)0;
amsseg->marksChanged = FALSE; /* */
amsseg->ambiguousFixes = FALSE;
@@ -382,8 +384,9 @@ static Res AMSSegMerge(Seg seg, Seg segHi,
amsseg->grains = allGrains;
amsseg->freeGrains = amsseg->freeGrains + amssegHi->freeGrains;
- amsseg->oldGrains = amsseg->oldGrains + amssegHi->oldGrains;
+ amsseg->bufferedGrains = amsseg->bufferedGrains + amssegHi->bufferedGrains;
amsseg->newGrains = amsseg->newGrains + amssegHi->newGrains;
+ amsseg->oldGrains = amsseg->oldGrains + amssegHi->oldGrains;
/* other fields in amsseg are unaffected */
RingRemove(&amssegHi->segRing);
@@ -474,8 +477,9 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
AVER(amsseg->freeGrains >= hiGrains);
amsseg->freeGrains -= hiGrains;
amssegHi->freeGrains = hiGrains;
- amssegHi->oldGrains = (Count)0;
+ amssegHi->bufferedGrains = (Count)0;
amssegHi->newGrains = (Count)0;
+ amssegHi->oldGrains = (Count)0;
amssegHi->marksChanged = FALSE; /* */
amssegHi->ambiguousFixes = FALSE;
@@ -544,9 +548,10 @@ static Res AMSSegDescribe(Seg seg, mps_lib_FILE *stream, Count depth)
res = WriteF(stream, depth,
" AMS $P\n", (WriteFP)amsseg->ams,
" grains $W\n", (WriteFW)amsseg->grains,
- " freeGrains $W\n", (WriteFW)amsseg->freeGrains,
- " oldGrains $W\n", (WriteFW)amsseg->oldGrains,
- " newGrains $W\n", (WriteFW)amsseg->newGrains,
+ " freeGrains $W\n", (WriteFW)amsseg->freeGrains,
+ " buffferedGrains $W\n", (WriteFW)amsseg->bufferedGrains,
+ " newGrains $W\n", (WriteFW)amsseg->newGrains,
+ " oldGrains $W\n", (WriteFW)amsseg->oldGrains,
NULL);
if (res != ResOK)
return res;
@@ -728,8 +733,10 @@ static void AMSSegsDestroy(AMS ams)
RING_FOR(node, ring, next) {
Seg seg = SegOfPoolRing(node);
AMSSeg amsseg = Seg2AMSSeg(seg);
+ AVER(SegBuffer(seg) == NULL);
AVERT(AMSSeg, amsseg);
AVER(amsseg->ams == ams);
+ AVER(amsseg->bufferedGrains == 0);
AMSSegFreeCheck(amsseg);
PoolGenFree(ams->pgen, seg,
AMSGrainsSize(ams, amsseg->freeGrains),
@@ -933,7 +940,7 @@ static Bool amsSegAlloc(Index *baseReturn, Index *limitReturn,
AVER(amsseg->freeGrains >= limit - base);
amsseg->freeGrains -= limit - base;
- amsseg->newGrains += limit - base;
+ amsseg->bufferedGrains += limit - base;
*baseReturn = base;
*limitReturn = limit;
return TRUE;
@@ -1005,7 +1012,8 @@ found:
DebugPoolFreeCheck(pool, baseAddr, limitAddr);
allocatedSize = AddrOffset(baseAddr, limitAddr);
- PoolGenAccountForFill(ams->pgen, allocatedSize, FALSE);
+ PoolGenAccountForFill(ams->pgen, allocatedSize);
+
*baseReturn = baseAddr;
*limitReturn = limitAddr;
return ResOK;
@@ -1023,7 +1031,7 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
Index initIndex, limitIndex;
Seg seg;
AMSSeg amsseg;
- Size size;
+ Count usedGrains, unusedGrains;
AVERT(Pool, pool);
ams = PoolAMS(pool);
@@ -1039,55 +1047,57 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
amsseg = Seg2AMSSeg(seg);
AVERT(AMSSeg, amsseg);
- if (init == limit)
- return;
-
- /* Tripped allocations might have scribbled on it, need to splat again. */
- DebugPoolFreeSplat(pool, init, limit);
-
initIndex = AMS_ADDR_INDEX(seg, init);
limitIndex = AMS_ADDR_INDEX(seg, limit);
+ AVER(initIndex <= limitIndex);
- if (amsseg->allocTableInUse) {
- /* check that it's allocated */
- AVER(BTIsSetRange(amsseg->allocTable, initIndex, limitIndex));
- BTResRange(amsseg->allocTable, initIndex, limitIndex);
- } else {
- /* check that it's allocated */
- AVER(limitIndex <= amsseg->firstFree);
- if (limitIndex == amsseg->firstFree) /* is it at the end? */ {
- amsseg->firstFree = initIndex;
- } else if (ams->shareAllocTable && amsseg->colourTablesInUse) {
- /* The nonwhiteTable is shared with allocTable and in use, so we
- * mustn't start using allocTable. In this case we know: 1. the
- * segment has been condemned (because colour tables are turned
- * on in AMSWhiten); 2. the segment has not yet been reclaimed
- * (because colour tables are turned off in AMSReclaim); 3. the
- * unused portion of the buffer is black (see AMSWhiten). So we
- * need to whiten the unused portion of the buffer. The
- * allocTable will be turned back on (if necessary) in
- * AMSReclaim, when we know that the nonwhite grains are exactly
- * the allocated grains.
- */
- } else {
- /* start using allocTable */
- amsseg->allocTableInUse = TRUE;
- BTSetRange(amsseg->allocTable, 0, amsseg->firstFree);
- if (amsseg->firstFree < amsseg->grains)
- BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains);
+ if (init < limit) {
+ /* Tripped allocations might have scribbled on it, need to splat again. */
+ DebugPoolFreeSplat(pool, init, limit);
+
+ if (amsseg->allocTableInUse) {
+ /* check that it's allocated */
+ AVER(BTIsSetRange(amsseg->allocTable, initIndex, limitIndex));
BTResRange(amsseg->allocTable, initIndex, limitIndex);
+ } else {
+ /* check that it's allocated */
+ AVER(limitIndex <= amsseg->firstFree);
+ if (limitIndex == amsseg->firstFree) /* is it at the end? */ {
+ amsseg->firstFree = initIndex;
+ } else if (ams->shareAllocTable && amsseg->colourTablesInUse) {
+ /* The nonwhiteTable is shared with allocTable and in use, so we
+ * mustn't start using allocTable. In this case we know: 1. the
+ * segment has been condemned (because colour tables are turned
+ * on in AMSWhiten); 2. the segment has not yet been reclaimed
+ * (because colour tables are turned off in AMSReclaim); 3. the
+ * unused portion of the buffer is black (see AMSWhiten). So we
+ * need to whiten the unused portion of the buffer. The
+ * allocTable will be turned back on (if necessary) in
+ * AMSReclaim, when we know that the nonwhite grains are exactly
+ * the allocated grains.
+ */
+ } else {
+ /* start using allocTable */
+ amsseg->allocTableInUse = TRUE;
+ BTSetRange(amsseg->allocTable, 0, amsseg->firstFree);
+ if (amsseg->firstFree < amsseg->grains)
+ BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains);
+ BTResRange(amsseg->allocTable, initIndex, limitIndex);
+ }
}
+
+ if (amsseg->colourTablesInUse)
+ AMS_RANGE_WHITEN(seg, initIndex, limitIndex);
}
- if (amsseg->colourTablesInUse)
- AMS_RANGE_WHITEN(seg, initIndex, limitIndex);
-
- amsseg->freeGrains += limitIndex - initIndex;
- /* Unused portion of the buffer must be new, since it's not condemned. */
- AVER(amsseg->newGrains >= limitIndex - initIndex);
- amsseg->newGrains -= limitIndex - initIndex;
- size = AddrOffset(init, limit);
- PoolGenAccountForEmpty(ams->pgen, size, FALSE);
+ unusedGrains = limitIndex - initIndex;
+ AVER(amsseg->bufferedGrains >= unusedGrains);
+ usedGrains = amsseg->bufferedGrains - unusedGrains;
+ amsseg->freeGrains += unusedGrains;
+ amsseg->bufferedGrains = 0;
+ amsseg->newGrains += usedGrains;
+ PoolGenAccountForEmpty(ams->pgen, AMSGrainsSize(ams, usedGrains),
+ AMSGrainsSize(ams, unusedGrains), FALSE);
}
@@ -1114,7 +1124,7 @@ static Res AMSWhiten(Pool pool, Trace trace, Seg seg)
AMS ams;
AMSSeg amsseg;
Buffer buffer; /* the seg's buffer, if it has one */
- Count uncondemned;
+ Count agedGrains, uncondemnedGrains;
AVERT(Pool, pool);
ams = PoolAMS(pool);
@@ -1164,16 +1174,20 @@ static Res AMSWhiten(Pool pool, Trace trace, Seg seg)
AMS_RANGE_BLACKEN(seg, scanLimitIndex, limitIndex);
amsRangeWhiten(seg, limitIndex, amsseg->grains);
/* We didn't condemn the buffer, subtract it from the count. */
- uncondemned = limitIndex - scanLimitIndex;
+ uncondemnedGrains = limitIndex - scanLimitIndex;
} else { /* condemn whole seg */
amsRangeWhiten(seg, 0, amsseg->grains);
- uncondemned = (Count)0;
+ uncondemnedGrains = (Count)0;
}
- /* The unused part of the buffer remains new: the rest becomes old. */
- PoolGenAccountForAge(ams->pgen, AMSGrainsSize(ams, amsseg->newGrains - uncondemned), FALSE);
- amsseg->oldGrains += amsseg->newGrains - uncondemned;
- amsseg->newGrains = uncondemned;
+ /* The unused part of the buffer remains buffered: the rest becomes old. */
+ AVER(amsseg->bufferedGrains >= uncondemnedGrains);
+ agedGrains = amsseg->bufferedGrains - uncondemnedGrains;
+ PoolGenAccountForAge(ams->pgen, AMSGrainsSize(ams, agedGrains),
+ AMSGrainsSize(ams, amsseg->newGrains), FALSE);
+ amsseg->oldGrains += agedGrains + amsseg->newGrains;
+ amsseg->bufferedGrains = uncondemnedGrains;
+ amsseg->newGrains = 0;
amsseg->marksChanged = FALSE; /* */
amsseg->ambiguousFixes = FALSE;
@@ -1506,7 +1520,7 @@ static Res AMSFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
if (ss->rank == RankWEAK) { /* then splat the reference */
*refIO = (Ref)0;
} else {
- ++ss->preservedInPlaceCount; /* Size updated on reclaim */
+ STATISTIC(++ss->preservedInPlaceCount); /* Size updated on reclaim */
if (SegRankSet(seg) == RankSetEMPTY && ss->rank != RankAMBIG) {
/* */
Addr clientNext, next;
@@ -1635,7 +1649,7 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
amsseg->oldGrains -= reclaimedGrains;
amsseg->freeGrains += reclaimedGrains;
PoolGenAccountForReclaim(ams->pgen, AMSGrainsSize(ams, reclaimedGrains), FALSE);
- trace->reclaimSize += AMSGrainsSize(ams, reclaimedGrains);
+ STATISTIC(trace->reclaimSize += AMSGrainsSize(ams, reclaimedGrains));
/* preservedInPlaceCount is updated on fix */
trace->preservedInPlaceSize += AMSGrainsSize(ams, amsseg->oldGrains);
@@ -1643,13 +1657,15 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
amsseg->colourTablesInUse = FALSE;
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
- if (amsseg->freeGrains == grains && SegBuffer(seg) == NULL)
+ if (amsseg->freeGrains == grains && SegBuffer(seg) == NULL) {
/* No survivors */
+ AVER(amsseg->bufferedGrains == 0);
PoolGenFree(ams->pgen, seg,
AMSGrainsSize(ams, amsseg->freeGrains),
AMSGrainsSize(ams, amsseg->oldGrains),
AMSGrainsSize(ams, amsseg->newGrains),
FALSE);
+ }
}
diff --git a/mps/code/poolams.h b/mps/code/poolams.h
index db01a94503f..e1dd091168c 100644
--- a/mps/code/poolams.h
+++ b/mps/code/poolams.h
@@ -61,8 +61,9 @@ typedef struct AMSSegStruct {
RingStruct segRing; /* ring that this seg belongs to */
Count grains; /* total grains */
Count freeGrains; /* free grains */
- Count oldGrains; /* grains allocated prior to last collection */
+ Count bufferedGrains; /* grains in buffers */
Count newGrains; /* grains allocated since last collection */
+ Count oldGrains; /* grains allocated prior to last collection */
Bool allocTableInUse; /* allocTable is used */
Index firstFree; /* 1st free grain, if allocTable is not used */
BT allocTable; /* set if grain is allocated */
diff --git a/mps/code/poolawl.c b/mps/code/poolawl.c
index c989e1107d1..6ea7105185b 100644
--- a/mps/code/poolawl.c
+++ b/mps/code/poolawl.c
@@ -125,8 +125,9 @@ typedef struct AWLSegStruct {
BT alloc;
Count grains;
Count freeGrains; /* free grains */
- Count oldGrains; /* grains allocated prior to last collection */
+ Count bufferedGrains; /* grains in buffers */
Count newGrains; /* grains allocated since last collection */
+ Count oldGrains; /* grains allocated prior to last collection */
Count singleAccesses; /* number of accesses processed singly */
awlStatSegStruct stats;
Sig sig;
@@ -143,7 +144,8 @@ static Bool AWLSegCheck(AWLSeg awlseg)
CHECKL(awlseg->scanned != NULL);
CHECKL(awlseg->alloc != NULL);
CHECKL(awlseg->grains > 0);
- CHECKL(awlseg->grains == awlseg->freeGrains + awlseg->oldGrains + awlseg->newGrains);
+ CHECKL(awlseg->grains == awlseg->freeGrains + awlseg->bufferedGrains
+ + awlseg->newGrains + awlseg->oldGrains);
return TRUE;
}
@@ -221,8 +223,9 @@ static Res AWLSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
BTResRange(awlseg->alloc, 0, bits);
SegSetRankAndSummary(seg, rankSet, RefSetUNIV);
awlseg->freeGrains = bits;
- awlseg->oldGrains = (Count)0;
+ awlseg->bufferedGrains = (Count)0;
awlseg->newGrains = (Count)0;
+ awlseg->oldGrains = (Count)0;
awlseg->singleAccesses = 0;
awlStatSegInit(awlseg);
@@ -586,7 +589,9 @@ static void AWLFinish(Pool pool)
RING_FOR(node, ring, nextNode) {
Seg seg = SegOfPoolRing(node);
AWLSeg awlseg = MustBeA(AWLSeg, seg);
-
+ AVER(SegBuffer(seg) == NULL);
+ AVERT(AWLSeg, awlseg);
+ AVER(awlseg->bufferedGrains == 0);
PoolGenFree(awl->pgen, seg,
AWLGrainsSize(awl, awlseg->freeGrains),
AWLGrainsSize(awl, awlseg->oldGrains),
@@ -651,8 +656,8 @@ found:
BTSetRange(awlseg->scanned, i, j);
AVER(awlseg->freeGrains >= j - i);
awlseg->freeGrains -= j - i;
- awlseg->newGrains += j - i;
- PoolGenAccountForFill(awl->pgen, AddrOffset(base, limit), FALSE);
+ awlseg->bufferedGrains += j - i;
+ PoolGenAccountForFill(awl->pgen, AddrOffset(base, limit));
}
*baseReturn = base;
*limitReturn = limit;
@@ -669,19 +674,24 @@ static void AWLBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
AWLSeg awlseg = MustBeA(AWLSeg, seg);
Addr segBase = SegBase(seg);
Index i, j;
+ Count usedGrains, unusedGrains;
AVER(init <= limit);
i = awlIndexOfAddr(segBase, awl, init);
j = awlIndexOfAddr(segBase, awl, limit);
AVER(i <= j);
- if (i < j) {
+ if (i < j)
BTResRange(awlseg->alloc, i, j);
- AVER(awlseg->newGrains >= j - i);
- awlseg->newGrains -= j - i;
- awlseg->freeGrains += j - i;
- PoolGenAccountForEmpty(awl->pgen, AddrOffset(init, limit), FALSE);
- }
+
+ unusedGrains = j - i;
+ AVER(awlseg->bufferedGrains >= unusedGrains);
+ usedGrains = awlseg->bufferedGrains - unusedGrains;
+ awlseg->freeGrains += unusedGrains;
+ awlseg->bufferedGrains = 0;
+ awlseg->newGrains += usedGrains;
+ PoolGenAccountForEmpty(awl->pgen, AWLGrainsSize(awl, usedGrains),
+ AWLGrainsSize(awl, unusedGrains), FALSE);
}
@@ -706,7 +716,7 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
AWL awl = MustBeA(AWLPool, pool);
AWLSeg awlseg = MustBeA(AWLSeg, seg);
Buffer buffer = SegBuffer(seg);
- Count uncondemned;
+ Count agedGrains, uncondemnedGrains;
/* All parameters checked by generic PoolWhiten. */
@@ -716,13 +726,13 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
if(buffer == NULL) {
awlRangeWhiten(awlseg, 0, awlseg->grains);
- uncondemned = (Count)0;
+ uncondemnedGrains = (Count)0;
} else {
/* Whiten everything except the buffer. */
Addr base = SegBase(seg);
Index scanLimitIndex = awlIndexOfAddr(base, awl, BufferScanLimit(buffer));
Index limitIndex = awlIndexOfAddr(base, awl, BufferLimit(buffer));
- uncondemned = limitIndex - scanLimitIndex;
+ uncondemnedGrains = limitIndex - scanLimitIndex;
awlRangeWhiten(awlseg, 0, scanLimitIndex);
awlRangeWhiten(awlseg, limitIndex, awlseg->grains);
@@ -735,9 +745,14 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
}
}
- PoolGenAccountForAge(awl->pgen, AWLGrainsSize(awl, awlseg->newGrains - uncondemned), FALSE);
- awlseg->oldGrains += awlseg->newGrains - uncondemned;
- awlseg->newGrains = uncondemned;
+ /* The unused part of the buffer remains buffered: the rest becomes old. */
+ AVER(awlseg->bufferedGrains >= uncondemnedGrains);
+ agedGrains = awlseg->bufferedGrains - uncondemnedGrains;
+ PoolGenAccountForAge(awl->pgen, AWLGrainsSize(awl, agedGrains),
+ AWLGrainsSize(awl, awlseg->newGrains), FALSE);
+ awlseg->oldGrains += agedGrains + awlseg->newGrains;
+ awlseg->bufferedGrains = uncondemnedGrains;
+ awlseg->newGrains = 0;
if (awlseg->oldGrains > 0) {
trace->condemned += AWLGrainsSize(awl, awlseg->oldGrains);
@@ -1065,18 +1080,20 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
awlseg->freeGrains += reclaimedGrains;
PoolGenAccountForReclaim(awl->pgen, AWLGrainsSize(awl, reclaimedGrains), FALSE);
- trace->reclaimSize += AWLGrainsSize(awl, reclaimedGrains);
- trace->preservedInPlaceCount += preservedInPlaceCount;
+ STATISTIC(trace->reclaimSize += AWLGrainsSize(awl, reclaimedGrains));
+ STATISTIC(trace->preservedInPlaceCount += preservedInPlaceCount);
trace->preservedInPlaceSize += preservedInPlaceSize;
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
- if (awlseg->freeGrains == awlseg->grains && buffer == NULL)
+ if (awlseg->freeGrains == awlseg->grains && buffer == NULL) {
/* No survivors */
+ AVER(awlseg->bufferedGrains == 0);
PoolGenFree(awl->pgen, seg,
AWLGrainsSize(awl, awlseg->freeGrains),
AWLGrainsSize(awl, awlseg->oldGrains),
AWLGrainsSize(awl, awlseg->newGrains),
FALSE);
+ }
}
diff --git a/mps/code/poollo.c b/mps/code/poollo.c
index ec5552f1a56..871dcd2c944 100644
--- a/mps/code/poollo.c
+++ b/mps/code/poollo.c
@@ -52,8 +52,9 @@ typedef struct LOSegStruct {
BT mark; /* mark bit table */
BT alloc; /* alloc bit table */
Count freeGrains; /* free grains */
- Count oldGrains; /* grains allocated prior to last collection */
+ Count bufferedGrains; /* grains in buffers */
Count newGrains; /* grains allocated since last collection */
+ Count oldGrains; /* grains allocated prior to last collection */
Sig sig; /* */
} LOSegStruct;
@@ -88,7 +89,8 @@ static Bool LOSegCheck(LOSeg loseg)
CHECKL(loseg->mark != NULL);
CHECKL(loseg->alloc != NULL);
/* Could check exactly how many bits are set in the alloc table. */
- CHECKL(loseg->freeGrains + loseg->oldGrains + loseg->newGrains
+ CHECKL(loseg->freeGrains + loseg->bufferedGrains + loseg->newGrains
+ + loseg->oldGrains
== SegSize(seg) >> lo->alignShift);
return TRUE;
}
@@ -128,8 +130,9 @@ static Res loSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
BTResRange(loseg->alloc, 0, grains);
BTSetRange(loseg->mark, 0, grains);
loseg->freeGrains = grains;
- loseg->oldGrains = (Count)0;
+ loseg->bufferedGrains = (Count)0;
loseg->newGrains = (Count)0;
+ loseg->oldGrains = (Count)0;
SetClassOfPoly(seg, CLASS(LOSeg));
loseg->sig = LOSegSig;
@@ -353,18 +356,20 @@ static void loSegReclaim(LOSeg loseg, Trace trace)
loseg->freeGrains += reclaimedGrains;
PoolGenAccountForReclaim(lo->pgen, LOGrainsSize(lo, reclaimedGrains), FALSE);
- trace->reclaimSize += LOGrainsSize(lo, reclaimedGrains);
- trace->preservedInPlaceCount += preservedInPlaceCount;
+ STATISTIC(trace->reclaimSize += LOGrainsSize(lo, reclaimedGrains));
+ STATISTIC(trace->preservedInPlaceCount += preservedInPlaceCount);
trace->preservedInPlaceSize += preservedInPlaceSize;
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
- if (!marked)
+ if (!marked) {
+ AVER(loseg->bufferedGrains == 0);
PoolGenFree(lo->pgen, seg,
LOGrainsSize(lo, loseg->freeGrains),
LOGrainsSize(lo, loseg->oldGrains),
LOGrainsSize(lo, loseg->newGrains),
FALSE);
+ }
}
/* This walks over _all_ objects in the heap, whether they are */
@@ -513,7 +518,9 @@ static void LOFinish(Pool pool)
RING_FOR(node, &pool->segRing, nextNode) {
Seg seg = SegOfPoolRing(node);
LOSeg loseg = MustBeA(LOSeg, seg);
+ AVER(SegBuffer(seg) == NULL);
AVERT(LOSeg, loseg);
+ AVER(loseg->bufferedGrains == 0);
PoolGenFree(lo->pgen, seg,
LOGrainsSize(lo, loseg->freeGrains),
LOGrainsSize(lo, loseg->oldGrains),
@@ -578,10 +585,10 @@ found:
BTSetRange(loseg->alloc, baseIndex, limitIndex);
AVER(loseg->freeGrains >= limitIndex - baseIndex);
loseg->freeGrains -= limitIndex - baseIndex;
- loseg->newGrains += limitIndex - baseIndex;
+ loseg->bufferedGrains += limitIndex - baseIndex;
}
- PoolGenAccountForFill(lo->pgen, AddrOffset(base, limit), FALSE);
+ PoolGenAccountForFill(lo->pgen, AddrOffset(base, limit));
*baseReturn = base;
*limitReturn = limit;
@@ -598,6 +605,7 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
Seg seg;
LOSeg loseg;
Index initIndex, limitIndex;
+ Count usedGrains, unusedGrains;
AVERT(Buffer, buffer);
AVER(BufferIsReady(buffer));
@@ -620,15 +628,18 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
initIndex = loIndexOfAddr(segBase, lo, init);
limitIndex = loIndexOfAddr(segBase, lo, limit);
- if(initIndex != limitIndex) {
- /* Free the unused portion of the buffer (this must be "new", since
- * it's not condemned). */
+ AVER(initIndex <= limitIndex);
+ if (initIndex < limitIndex)
loSegFree(loseg, initIndex, limitIndex);
- AVER(loseg->newGrains >= limitIndex - initIndex);
- loseg->newGrains -= limitIndex - initIndex;
- loseg->freeGrains += limitIndex - initIndex;
- PoolGenAccountForEmpty(lo->pgen, AddrOffset(init, limit), FALSE);
- }
+
+ unusedGrains = limitIndex - initIndex;
+ AVER(loseg->bufferedGrains >= unusedGrains);
+ usedGrains = loseg->bufferedGrains - unusedGrains;
+ loseg->freeGrains += unusedGrains;
+ loseg->bufferedGrains = 0;
+ loseg->newGrains += usedGrains;
+ PoolGenAccountForEmpty(lo->pgen, LOGrainsSize(lo, usedGrains),
+ LOGrainsSize(lo, unusedGrains), FALSE);
}
@@ -639,7 +650,7 @@ static Res LOWhiten(Pool pool, Trace trace, Seg seg)
LO lo = MustBeA(LOPool, pool);
LOSeg loseg = MustBeA(LOSeg, seg);
Buffer buffer;
- Count grains, uncondemned;
+ Count grains, agedGrains, uncondemnedGrains;
AVERT(Trace, trace);
AVER(SegWhite(seg) == TraceSetEMPTY);
@@ -652,19 +663,26 @@ static Res LOWhiten(Pool pool, Trace trace, Seg seg)
Addr base = SegBase(seg);
Index scanLimitIndex = loIndexOfAddr(base, lo, BufferScanLimit(buffer));
Index limitIndex = loIndexOfAddr(base, lo, BufferLimit(buffer));
- uncondemned = limitIndex - scanLimitIndex;
+ uncondemnedGrains = limitIndex - scanLimitIndex;
if (0 < scanLimitIndex)
BTCopyInvertRange(loseg->alloc, loseg->mark, 0, scanLimitIndex);
if (limitIndex < grains)
BTCopyInvertRange(loseg->alloc, loseg->mark, limitIndex, grains);
} else {
- uncondemned = (Count)0;
+ uncondemnedGrains = (Count)0;
BTCopyInvertRange(loseg->alloc, loseg->mark, 0, grains);
}
- PoolGenAccountForAge(lo->pgen, LOGrainsSize(lo, loseg->newGrains - uncondemned), FALSE);
- loseg->oldGrains += loseg->newGrains - uncondemned;
- loseg->newGrains = uncondemned;
+
+ /* The unused part of the buffer remains buffered: the rest becomes old. */
+ AVER(loseg->bufferedGrains >= uncondemnedGrains);
+ agedGrains = loseg->bufferedGrains - uncondemnedGrains;
+ PoolGenAccountForAge(lo->pgen, LOGrainsSize(lo, agedGrains),
+ LOGrainsSize(lo, loseg->newGrains), FALSE);
+ loseg->oldGrains += agedGrains + loseg->newGrains;
+ loseg->bufferedGrains = uncondemnedGrains;
+ loseg->newGrains = 0;
+
trace->condemned += LOGrainsSize(lo, loseg->oldGrains);
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
diff --git a/mps/code/poolmv2.c b/mps/code/poolmv2.c
index 3b74ce7aa62..b11f6b65762 100644
--- a/mps/code/poolmv2.c
+++ b/mps/code/poolmv2.c
@@ -95,42 +95,42 @@ typedef struct MVTStruct
Size unavailable; /* bytes lost to fragmentation */
/* pool meters*/
- METER_DECL(segAllocs);
- METER_DECL(segFrees);
- METER_DECL(bufferFills);
- METER_DECL(bufferEmpties);
- METER_DECL(poolFrees);
- METER_DECL(poolSize);
- METER_DECL(poolAllocated);
- METER_DECL(poolAvailable);
- METER_DECL(poolUnavailable);
- METER_DECL(poolUtilization);
+ METER_DECL(segAllocs)
+ METER_DECL(segFrees)
+ METER_DECL(bufferFills)
+ METER_DECL(bufferEmpties)
+ METER_DECL(poolFrees)
+ METER_DECL(poolSize)
+ METER_DECL(poolAllocated)
+ METER_DECL(poolAvailable)
+ METER_DECL(poolUnavailable)
+ METER_DECL(poolUtilization)
/* abq meters */
- METER_DECL(finds);
- METER_DECL(overflows);
- METER_DECL(underflows);
- METER_DECL(refills);
- METER_DECL(refillPushes);
- METER_DECL(returns);
+ METER_DECL(finds)
+ METER_DECL(overflows)
+ METER_DECL(underflows)
+ METER_DECL(refills)
+ METER_DECL(refillPushes)
+ METER_DECL(returns)
/* fragmentation meters */
- METER_DECL(perfectFits);
- METER_DECL(firstFits);
- METER_DECL(secondFits);
- METER_DECL(failures);
+ METER_DECL(perfectFits)
+ METER_DECL(firstFits)
+ METER_DECL(secondFits)
+ METER_DECL(failures)
/* contingency meters */
- METER_DECL(emergencyContingencies);
- METER_DECL(fragLimitContingencies);
- METER_DECL(contingencySearches);
- METER_DECL(contingencyHardSearches);
+ METER_DECL(emergencyContingencies)
+ METER_DECL(fragLimitContingencies)
+ METER_DECL(contingencySearches)
+ METER_DECL(contingencyHardSearches)
/* splinter meters */
- METER_DECL(splinters);
- METER_DECL(splintersUsed);
- METER_DECL(splintersDropped);
- METER_DECL(sawdust);
+ METER_DECL(splinters)
+ METER_DECL(splintersUsed)
+ METER_DECL(splintersDropped)
+ METER_DECL(sawdust)
/* exception meters */
- METER_DECL(exceptions);
- METER_DECL(exceptionSplinters);
- METER_DECL(exceptionReturns);
+ METER_DECL(exceptions)
+ METER_DECL(exceptionSplinters)
+ METER_DECL(exceptionReturns)
Sig sig;
} MVTStruct;
diff --git a/mps/code/seg.c b/mps/code/seg.c
index fbdec1d1784..9143245832f 100644
--- a/mps/code/seg.c
+++ b/mps/code/seg.c
@@ -1174,24 +1174,22 @@ static void gcSegSetGreyInternal(Seg seg, TraceSet oldGrey, TraceSet grey)
RingRemove(&gcseg->greyRing);
}
- STATISTIC_STAT
- ({
- TraceId ti; Trace trace;
- TraceSet diff;
+ STATISTIC({
+ TraceId ti; Trace trace;
+ TraceSet diff;
- diff = TraceSetDiff(grey, oldGrey);
- TRACE_SET_ITER(ti, trace, diff, arena)
- ++trace->greySegCount;
- if (trace->greySegCount > trace->greySegMax)
- trace->greySegMax = trace->greySegCount;
- TRACE_SET_ITER_END(ti, trace, diff, arena);
-
- diff = TraceSetDiff(oldGrey, grey);
- TRACE_SET_ITER(ti, trace, diff, arena)
- --trace->greySegCount;
- TRACE_SET_ITER_END(ti, trace, diff, arena);
- });
+ diff = TraceSetDiff(grey, oldGrey);
+ TRACE_SET_ITER(ti, trace, diff, arena)
+ ++trace->greySegCount;
+ if (trace->greySegCount > trace->greySegMax)
+ trace->greySegMax = trace->greySegCount;
+ TRACE_SET_ITER_END(ti, trace, diff, arena);
+ diff = TraceSetDiff(oldGrey, grey);
+ TRACE_SET_ITER(ti, trace, diff, arena)
+ --trace->greySegCount;
+ TRACE_SET_ITER_END(ti, trace, diff, arena);
+ });
}
diff --git a/mps/code/segsmss.c b/mps/code/segsmss.c
index f816748114a..5055bf3b939 100644
--- a/mps/code/segsmss.c
+++ b/mps/code/segsmss.c
@@ -429,6 +429,7 @@ static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit)
{
AMSSeg amsseg;
Index baseIndex, limitIndex;
+ Count unallocatedGrains;
/* parameters checked by caller */
amsseg = Seg2AMSSeg(seg);
@@ -453,10 +454,13 @@ static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit)
BTResRange(amsseg->allocTable, baseIndex, limitIndex);
}
}
- amsseg->freeGrains += limitIndex - baseIndex;
- AVER(amsseg->newGrains >= limitIndex - baseIndex);
- amsseg->newGrains -= limitIndex - baseIndex;
- PoolGenAccountForEmpty(ams->pgen, AddrOffset(base, limit), FALSE);
+
+ unallocatedGrains = limitIndex - baseIndex;
+ AVER(amsseg->bufferedGrains >= unallocatedGrains);
+ amsseg->freeGrains += unallocatedGrains;
+ amsseg->bufferedGrains -= unallocatedGrains;
+ PoolGenAccountForEmpty(ams->pgen, 0, AMSGrainsSize(ams, unallocatedGrains),
+ FALSE);
}
@@ -469,6 +473,7 @@ static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit)
{
AMSSeg amsseg;
Index baseIndex, limitIndex;
+ Count allocatedGrains;
/* parameters checked by caller */
amsseg = Seg2AMSSeg(seg);
@@ -493,10 +498,12 @@ static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit)
BTSetRange(amsseg->allocTable, baseIndex, limitIndex);
}
}
- AVER(amsseg->freeGrains >= limitIndex - baseIndex);
- amsseg->freeGrains -= limitIndex - baseIndex;
- amsseg->newGrains += limitIndex - baseIndex;
- PoolGenAccountForFill(ams->pgen, AddrOffset(base, limit), FALSE);
+
+ allocatedGrains = limitIndex - baseIndex;
+ AVER(amsseg->freeGrains >= allocatedGrains);
+ amsseg->freeGrains -= allocatedGrains;
+ amsseg->bufferedGrains += allocatedGrains;
+ PoolGenAccountForFill(ams->pgen, AddrOffset(base, limit));
}
diff --git a/mps/code/testlib.c b/mps/code/testlib.c
index c4ca112df6f..05225b36b75 100644
--- a/mps/code/testlib.c
+++ b/mps/code/testlib.c
@@ -12,7 +12,7 @@
#include "mps.h"
#include "misc.h" /* for NOOP */
-#include /* fmod, log */
+#include /* fmod, log, HUGE_VAL */
#include /* fflush, printf, stderr, sscanf, vfprintf */
#include /* abort, exit, getenv */
#include /* time */
@@ -246,6 +246,15 @@ size_t rnd_align(size_t min, size_t max)
return min;
}
+double rnd_pause_time(void)
+{
+ double t = rnd_double();
+ if (t == 0.0)
+ return HUGE_VAL; /* Would prefer to use INFINITY but it's not in C89. */
+ else
+ return 1 / t - 1;
+}
+
rnd_state_t rnd_seed(void)
{
/* Initialize seed based on seconds since epoch and on processor
diff --git a/mps/code/testlib.h b/mps/code/testlib.h
index 0492aaf138b..7e4c651c0b6 100644
--- a/mps/code/testlib.h
+++ b/mps/code/testlib.h
@@ -265,6 +265,11 @@ extern size_t rnd_grain(size_t arena_size);
extern size_t rnd_align(size_t min, size_t max);
+/* rnd_pause_time -- random pause time */
+
+extern double rnd_pause_time(void);
+
+
/* randomize -- randomize the generator, or initialize to replay
*
* randomize(argc, argv) randomizes the rnd generator (using time(3))
diff --git a/mps/code/trace.c b/mps/code/trace.c
index 3a0fcf98a46..2672799dd38 100644
--- a/mps/code/trace.c
+++ b/mps/code/trace.c
@@ -272,13 +272,13 @@ static void traceUpdateCounts(Trace trace, ScanState ss,
switch(phase) {
case traceAccountingPhaseRootScan: {
trace->rootScanSize += ss->scannedSize;
- trace->rootCopiedSize += ss->copiedSize;
+ STATISTIC(trace->rootCopiedSize += ss->copiedSize);
STATISTIC(++trace->rootScanCount);
break;
}
case traceAccountingPhaseSegScan: {
trace->segScanSize += ss->scannedSize; /* see .work */
- trace->segCopiedSize += ss->copiedSize;
+ STATISTIC(trace->segCopiedSize += ss->copiedSize);
STATISTIC(++trace->segScanCount);
break;
}
@@ -676,7 +676,8 @@ failRootFlip:
* This code is written to be adaptable to allocating Trace objects
* dynamically. */
-static void TraceCreatePoolGen(GenDesc gen)
+ATTRIBUTE_UNUSED
+static void traceCreatePoolGen(GenDesc gen)
{
Ring n, nn;
RING_FOR(n, &gen->locusRing, nn) {
@@ -724,10 +725,10 @@ found:
STATISTIC(trace->greySegMax = (Count)0);
STATISTIC(trace->rootScanCount = (Count)0);
trace->rootScanSize = (Size)0;
- trace->rootCopiedSize = (Size)0;
+ STATISTIC(trace->rootCopiedSize = (Size)0);
STATISTIC(trace->segScanCount = (Count)0);
trace->segScanSize = (Size)0; /* see .work */
- trace->segCopiedSize = (Size)0;
+ STATISTIC(trace->segCopiedSize = (Size)0);
STATISTIC(trace->singleScanCount = (Count)0);
STATISTIC(trace->singleScanSize = (Size)0);
STATISTIC(trace->singleCopiedSize = (Size)0);
@@ -750,7 +751,7 @@ found:
EVENT3(TraceCreate, trace, arena, (EventFU)why);
- STATISTIC_STAT ({
+ STATISTIC({
/* Iterate over all chains, all GenDescs within a chain, and all
* PoolGens within a GenDesc. */
Ring node;
@@ -761,12 +762,12 @@ found:
Index i;
for (i = 0; i < chain->genCount; ++i) {
GenDesc gen = &chain->gens[i];
- TraceCreatePoolGen(gen);
+ traceCreatePoolGen(gen);
}
}
/* Now do topgen GenDesc, and all PoolGens within it. */
- TraceCreatePoolGen(&arena->topGen);
+ traceCreatePoolGen(&arena->topGen);
});
*traceReturn = trace;
@@ -824,26 +825,23 @@ void TraceDestroyFinished(Trace trace)
ChainEndGC(trace->chain, trace);
}
- STATISTIC_STAT(EVENT13
- (TraceStatScan, trace,
- trace->rootScanCount, trace->rootScanSize,
- trace->rootCopiedSize,
- trace->segScanCount, trace->segScanSize,
- trace->segCopiedSize,
- trace->singleScanCount, trace->singleScanSize,
- trace->singleCopiedSize,
- trace->readBarrierHitCount, trace->greySegMax,
- trace->pointlessScanCount));
- STATISTIC_STAT(EVENT10
- (TraceStatFix, trace,
- trace->fixRefCount, trace->segRefCount,
- trace->whiteSegRefCount,
- trace->nailCount, trace->snapCount,
- trace->forwardedCount, trace->forwardedSize,
- trace->preservedInPlaceCount,
- trace->preservedInPlaceSize));
- STATISTIC_STAT(EVENT3
- (TraceStatReclaim, trace,
+ STATISTIC(EVENT13(TraceStatScan, trace,
+ trace->rootScanCount, trace->rootScanSize,
+ trace->rootCopiedSize,
+ trace->segScanCount, trace->segScanSize,
+ trace->segCopiedSize,
+ trace->singleScanCount, trace->singleScanSize,
+ trace->singleCopiedSize,
+ trace->readBarrierHitCount, trace->greySegMax,
+ trace->pointlessScanCount));
+ STATISTIC(EVENT10(TraceStatFix, trace,
+ trace->fixRefCount, trace->segRefCount,
+ trace->whiteSegRefCount,
+ trace->nailCount, trace->snapCount,
+ trace->forwardedCount, trace->forwardedSize,
+ trace->preservedInPlaceCount,
+ trace->preservedInPlaceSize));
+ STATISTIC(EVENT3(TraceStatReclaim, trace,
trace->reclaimCount, trace->reclaimSize));
EVENT1(TraceDestroy, trace);
@@ -1160,19 +1158,18 @@ static Res traceScanSegRes(TraceSet ts, Rank rank, Arena arena, Seg seg)
traceSetUpdateCounts(ts, arena, ss, traceAccountingPhaseSegScan);
/* Count segments scanned pointlessly */
- STATISTIC_STAT
- ({
- TraceId ti; Trace trace;
- Count whiteSegRefCount = 0;
+ STATISTIC({
+ TraceId ti; Trace trace;
+ Count whiteSegRefCount = 0;
- TRACE_SET_ITER(ti, trace, ts, arena)
- whiteSegRefCount += trace->whiteSegRefCount;
- TRACE_SET_ITER_END(ti, trace, ts, arena);
- if(whiteSegRefCount == 0)
- TRACE_SET_ITER(ti, trace, ts, arena)
- ++trace->pointlessScanCount;
- TRACE_SET_ITER_END(ti, trace, ts, arena);
- });
+ TRACE_SET_ITER(ti, trace, ts, arena)
+ whiteSegRefCount += trace->whiteSegRefCount;
+ TRACE_SET_ITER_END(ti, trace, ts, arena);
+ if(whiteSegRefCount == 0)
+ TRACE_SET_ITER(ti, trace, ts, arena)
+ ++trace->pointlessScanCount;
+ TRACE_SET_ITER_END(ti, trace, ts, arena);
+ });
/* Following is true whether or not scan was total. */
/* See . */
@@ -1275,8 +1272,6 @@ void TraceSegAccess(Arena arena, Seg seg, AccessSet mode)
seg->defer = WB_DEFER_HIT;
if (readHit) {
- Trace trace;
- TraceId ti;
Rank rank;
TraceSet traces;
@@ -1297,7 +1292,9 @@ void TraceSegAccess(Arena arena, Seg seg, AccessSet mode)
/* can go ahead and access it. */
AVER(TraceSetInter(SegGrey(seg), traces) == TraceSetEMPTY);
- STATISTIC_STAT({
+ STATISTIC({
+ Trace trace;
+ TraceId ti;
TRACE_SET_ITER(ti, trace, traces, arena)
++trace->readBarrierHitCount;
TRACE_SET_ITER_END(ti, trace, traces, arena);
@@ -1382,13 +1379,12 @@ mps_res_t _mps_fix2(mps_ss_t mps_ss, mps_addr_t *mps_ref_io)
if (TraceSetInter(TractWhite(tract), ss->traces) == TraceSetEMPTY) {
/* Reference points to a tract that is not white for any of the
* active traces. See */
- STATISTIC_STAT
- ({
- if(TRACT_SEG(&seg, tract)) {
- ++ss->segRefCount;
- EVENT1(TraceFixSeg, seg);
- }
- });
+ STATISTIC({
+ if (TRACT_SEG(&seg, tract)) {
+ ++ss->segRefCount;
+ EVENT1(TraceFixSeg, seg);
+ }
+ });
goto done;
}
@@ -1689,7 +1685,7 @@ Res TraceStart(Trace trace, double mortality, double finishingTime)
res = RootsIterate(ArenaGlobals(arena), rootGrey, (void *)trace);
AVER(res == ResOK);
- STATISTIC_STAT(EVENT2(ArenaWriteFaults, arena, arena->writeBarrierHitCount));
+ STATISTIC(EVENT2(ArenaWriteFaults, arena, arena->writeBarrierHitCount));
/* Calculate the rate of scanning. */
{
@@ -1716,11 +1712,6 @@ Res TraceStart(Trace trace, double mortality, double finishingTime)
trace->foundation, trace->white,
trace->quantumWork);
- STATISTIC_STAT(EVENT7(TraceStatCondemn, trace,
- trace->condemned, trace->notCondemned,
- trace->foundation, trace->quantumWork,
- mortality, finishingTime));
-
trace->state = TraceUNFLIPPED;
TracePostStartMessage(trace);
@@ -1831,12 +1822,17 @@ failCondemn:
/* TracePoll -- Check if there's any tracing work to be done
*
* Consider starting a trace if none is running; advance the running
- * trace (if any) by one quantum. If there may be more work to do,
- * update *workReturn with a measure of the work done and return TRUE.
- * Otherwise return FALSE.
+ * trace (if any) by one quantum.
+ *
+ * The collectWorldReturn and collectWorldAllowed arguments are as for
+ * PolicyStartTrace.
+ *
+ * If there may be more work to do, update *workReturn with a measure
+ * of the work done and return TRUE. Otherwise return FALSE.
*/
-Bool TracePoll(Work *workReturn, Globals globals)
+Bool TracePoll(Work *workReturn, Bool *collectWorldReturn, Globals globals,
+ Bool collectWorldAllowed)
{
Trace trace;
Arena arena;
@@ -1849,7 +1845,8 @@ Bool TracePoll(Work *workReturn, Globals globals)
trace = ArenaTrace(arena, (TraceId)0);
} else {
/* No traces are running: consider starting one now. */
- if (!PolicyStartTrace(&trace, arena))
+ if (!PolicyStartTrace(&trace, collectWorldReturn, arena,
+ collectWorldAllowed))
return FALSE;
}
@@ -1905,9 +1902,11 @@ Res TraceDescribe(Trace trace, mps_lib_FILE *stream, Count depth)
" foundation $U\n", (WriteFU)trace->foundation,
" quantumWork $U\n", (WriteFU)trace->quantumWork,
" rootScanSize $U\n", (WriteFU)trace->rootScanSize,
- " rootCopiedSize $U\n", (WriteFU)trace->rootCopiedSize,
+ STATISTIC_WRITE(" rootCopiedSize $U\n",
+ (WriteFU)trace->rootCopiedSize)
" segScanSize $U\n", (WriteFU)trace->segScanSize,
- " segCopiedSize $U\n", (WriteFU)trace->segCopiedSize,
+ STATISTIC_WRITE(" segCopiedSize $U\n",
+ (WriteFU)trace->segCopiedSize)
" forwardedSize $U\n", (WriteFU)trace->forwardedSize,
" preservedInPlaceSize $U\n", (WriteFU)trace->preservedInPlaceSize,
"} Trace $P\n", (WriteFP)trace,
diff --git a/mps/code/tract.c b/mps/code/tract.c
index 328d34dd4f6..45fbb668c94 100644
--- a/mps/code/tract.c
+++ b/mps/code/tract.c
@@ -174,6 +174,7 @@ Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved,
Count pages;
Shift pageShift;
Size pageTableSize;
+ Addr allocBase;
void *p;
Res res;
@@ -196,6 +197,7 @@ Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved,
chunk->reserved = reserved;
size = ChunkSize(chunk);
+ /* .overhead.pages: Chunk overhead for the page allocation table. */
chunk->pages = pages = size >> pageShift;
res = BootAlloc(&p, boot, (size_t)BTSize(pages), MPS_PF_ALIGN);
if (res != ResOK)
@@ -219,12 +221,14 @@ Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved,
/* Init allocTable after class init, because it might be mapped there. */
BTResRange(chunk->allocTable, 0, pages);
+ /* Check that there is some usable address space remaining in the chunk. */
+ allocBase = PageIndexBase(chunk, chunk->allocBase);
+ AVER(allocBase < chunk->limit);
+
/* Add the chunk's free address space to the arena's freeLand, so that
we can allocate from it. */
if (arena->hasFreeLand) {
- res = ArenaFreeLandInsert(arena,
- PageIndexBase(chunk, chunk->allocBase),
- chunk->limit);
+ res = ArenaFreeLandInsert(arena, allocBase, chunk->limit);
if (res != ResOK)
goto failLandInsert;
}
diff --git a/mps/code/zcoll.c b/mps/code/zcoll.c
index 459f87595f5..87bd39f5cad 100644
--- a/mps/code/zcoll.c
+++ b/mps/code/zcoll.c
@@ -804,8 +804,13 @@ static void testscriptA(const char *script)
printf(" Create arena, size = %lu.\n", arenasize);
/* arena */
- die(mps_arena_create(&arena, mps_arena_class_vm(), (size_t)arenasize),
- "arena_create");
+ MPS_ARGS_BEGIN(args) {
+ /* Randomize pause time as a regression test for job004011. */
+ MPS_ARGS_ADD(args, MPS_KEY_PAUSE_TIME, rnd_pause_time());
+ MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, arenasize);
+ die(mps_arena_create_k(&arena, mps_arena_class_vm(), args),
+ "arena_create\n");
+ } MPS_ARGS_END(args);
/* thr: used to stop/restart multiple threads */
die(mps_thread_reg(&thr, arena), "thread");
diff --git a/mps/design/arena.txt b/mps/design/arena.txt
index 11059088829..426e6d6da4f 100644
--- a/mps/design/arena.txt
+++ b/mps/design/arena.txt
@@ -563,21 +563,25 @@ end of ``ArenaPoll()`` to the current polling time plus
Location dependencies
.....................
-_`.ld.epoch`: ``arena->epoch`` is the "current epoch". This is the
+_`.ld`: The ``historyStruct`` contains fields used to maintain a
+history of garbage collection and in particular object motion in order
+to implement location dependency.
+
+_`.ld.epoch`: The ``epoch`` is the "current epoch". This is the
number of 'flips' of traces in the arena since the arena was created.
From the mutator's point of view locations change atomically at flip.
-_`.ld.history`: ``arena->history`` is a circular buffer of
+_`.ld.history`: The ``history`` is a circular buffer of
``LDHistoryLENGTH`` elements of type ``RefSet``. These are the
summaries of moved objects since the last ``LDHistoryLENGTH`` epochs.
If ``e`` is one of these recent epochs, then ::
- arena->history[e % LDHistoryLENGTH]
+ history->history[e % LDHistoryLENGTH]
is a summary of (the original locations of) objects moved since epoch
``e``.
-_`.ld.prehistory`: ``arena->prehistory`` is a ``RefSet`` summarizing
+_`.ld.prehistory`: The ``prehistory`` is a ``RefSet`` summarizing
the original locations of all objects ever moved. When considering
whether a really old location dependency is stale, it is compared with
this summary.
diff --git a/mps/design/diag.txt b/mps/design/diag.txt
index 15384db203b..90918307a6c 100644
--- a/mps/design/diag.txt
+++ b/mps/design/diag.txt
@@ -135,6 +135,51 @@ diagnostic system:
- the ``METER`` macros and meter subsystem.
+Statistics
+..........
+
+_`.stat`: The statistic system collects information about the
+behaviour and performance of the MPS that may be useful for MPS
+developers and customers, but which is not needed by the MPS itself
+for internal decision-making.
+
+_`.stat.remove`: The space needed for these statistics, and the code
+for maintaining them, can therefore be removed (compiled out) in some
+varieties.
+
+_`.stat.config`: Statistics are compiled in if ``CONFIG_STATS`` is
+defined (in the cool variety) and compiled out if
+``CONFIG_STATS_NONE`` is defined (in the hot and rash varieties).
+
+``STATISTIC_DECL(decl)``
+
+_`.stat.decl`: The ``STATISTIC_DECL`` macro is used to wrap the
+declaration of storage for a statistic. Note that the expansion
+supplies a terminating semi-colon and so it must not be followed by a
+semi-colon in use. This is so that it can be used in structure
+declarations.
+
+``STATISTIC(gather)``
+
+_`.stat.gather`: The ``STATISTIC`` macro is used to gather statistics.
+The argument is a statement and the expansion followed by a semicolon
+is syntactically a statement. The macro expends to ``NOOP`` in
+non-statistical varieties. (Note that it can't use ``DISCARD_STAT`` to
+check the syntax of the statement because it is expected to use fields
+that have been compiled away by ``STATISTIC_DECL``, and these will
+cause compilation errors.)
+
+_`.stat.gather.effect`: The argument to the ``STATISTIC`` macro is not
+executed in non-statistical varieties and must have no side effects,
+except for updates to fields that are declared in ``STATISTIC_DECL``,
+and telemetry output containing the values of such fields.
+
+``STATISTIC_WRITE(format, arg)``
+
+_`.stat.write`: The ``STATISTIC_WRITE`` macro is used in ``WriteF()``
+argument lists to output the values of statistics.
+
+
Related systems
...............
diff --git a/mps/design/seg.txt b/mps/design/seg.txt
index 4e118bfbb96..912784c1bbd 100644
--- a/mps/design/seg.txt
+++ b/mps/design/seg.txt
@@ -207,7 +207,7 @@ before calling ``SegMerge()``:
represented if this is not so.
- _`.merge.inv.buffer`: One or other of ``segLo`` and ``segHi`` may
- attached to a buffer, but not both. Justification: the segment
+ be attached to a buffer, but not both. Justification: the segment
module does not support attachment of a single seg to 2 buffers.
- _`.merge.inv.similar`: ``segLo`` and ``segHi`` must be sufficiently
diff --git a/mps/design/strategy.txt b/mps/design/strategy.txt
index 97aa86a4a08..db6965c59e5 100644
--- a/mps/design/strategy.txt
+++ b/mps/design/strategy.txt
@@ -215,13 +215,14 @@ collected; it also uses the *total size* of the generation to compute
the mortality.
_`.accounting.check`: Computing the new size for a pool generation is
-far from straightforward: see job003772_ for some (former) errors in
-this code. In order to assist with checking that this has been
-computed correctly, the locus module uses a double-entry book-keeping
-system to account for every byte in each pool generation. This uses
-six accounts:
+far from straightforward: see job003772_ and job004007_ for some
+(former) errors in this code. In order to assist with checking that
+this has been computed correctly, the locus module uses a double-entry
+book-keeping system to account for every byte in each pool generation.
+This uses seven accounts:
.. _job003772: http://www.ravenbrook.com/project/mps/issue/job003772/
+.. _job004007: http://www.ravenbrook.com/project/mps/issue/job004007/
_`.account.total`: Memory acquired from the arena.
@@ -238,6 +239,10 @@ would complain.
_`.account.free`: Memory that is not in use (free or lost to
fragmentation).
+_`.account.buffered`: Memory in a buffer that was handed out to the
+client program via ``BufferFill()``, and which has not yet been
+condemned.
+
_`.account.new`: Memory in use by the client program, allocated
since the last time the generation was condemned.
@@ -267,15 +272,19 @@ accounted as *old* or *oldDeferred* (see `.accounting.op.reclaim`_).
Finally, debit *free*, credit *total*. (But see
`.account.total.negated`_.)
-_`.accounting.op.fill`: Allocate memory, for example by filling a
-buffer. Debit *free*, credit *new* or *newDeferred*.
+_`.accounting.op.fill`: Fill a buffer. Debit *free*, credit *buffered*.
-_`.accounting.op.empty`: Deallocate memory, for example by emptying
-the unused portion of a buffer. Debit *new* or *newDeferred*, credit
-*free*.
+_`.accounting.op.empty`: Empty a buffer. Debit *buffered*, credit
+*new* or *newDeferred* with the allocated part of the buffer, credit
+*free* with the unused part of the buffer.
-_`.accounting.op.age`: Condemn memory. Debit *new* or *newDeferred*,
-credit *old* or *oldDeferred*.
+_`.accounting.op.age`: Condemn memory. Debit *buffered* (if part or
+all of a buffer was condemned) and either *new* or *newDeferred*,
+credit *old* or *oldDeferred*. Note that the condemned part of the
+buffer remains part of the buffer until the buffer is emptied, but is
+now accounted as *old* or *oldDeferred*. The uncondemned part of the
+buffer, if any, remains accounted as *buffered* until it is either
+emptied or condemned in its turn.
_`.accounting.op.reclaim`: Reclaim dead memory. Debit *old* or
*oldDeferred*, credit *free*.
@@ -477,22 +486,37 @@ runtime in collections. (This fraction is given by the
Starting a trace
................
-``Bool PolicyStartTrace(Trace *traceReturn, Arena arena)``
+``Bool PolicyStartTrace(Trace *traceReturn, Bool *collectWorldReturn, Arena arena, Bool collectWorldAllowed)``
_`.policy.start`: Consider starting a trace. If a trace was started,
update ``*traceReturn`` to point to the trace and return TRUE.
Otherwise, leave ``*traceReturn`` unchanged and return FALSE.
-_`.policy.start.impl`: This uses the "Lisp Machine" strategy, which
-tries to schedule collections of the world so that the collector just
-keeps pace with the mutator: that is, it starts a collection when the
+_`.policy.start.world`: If ``collectWorldAllowed`` is TRUE, consider
+starting a collection of the whole world, and if such a collection is
+started, set ``*collectWorldReturn`` to TRUE.
+
+This decision uses the "Lisp Machine" strategy, which tries to
+schedule collections of the world so that the collector just keeps
+pace with the mutator: that is, it starts a collection when the
predicted completion time of the collection is around the time when
the mutator is predicted to reach the current memory limit. See
[Pirinen]_.
-_`.policy.start.chain`: If it is not yet time to schedule a collection
-of the world, ``PolicyStartTrace()`` considers collecting a set of
-zones corresponding to a set of generations on a chain.
+_`.policy.start.world.hack`: The ``collectWorldAllowed`` flag was
+added to fix job004011_ by ensuring that the MPS starts at most one
+collection of the world in each call to ``ArenaPoll()``. But this is
+is fragile and inelegant. Ideally the MPS would be able to deduce that
+a collection of a set of generations can't possibly make progress
+(because nothing that refers to this set of generations has changed),
+and so not start such a collection.
+
+.. _job004011: http://www.ravenbrook.com/project/mps/issue/job004011/
+
+_`.policy.start.chain`: If ``collectWorldAllowed`` is FALSE, or if it
+is not yet time to schedule a collection of the world,
+``PolicyStartTrace()`` considers collecting a set of zones
+corresponding to a set of generations on a chain.
It picks these generations by calling ``ChainDeferral()`` for each
chain; this function indicates if the chain needs collecting, and if
diff --git a/mps/design/tests.txt b/mps/design/tests.txt
index 31cfffb0bf2..eb341fc9ed7 100644
--- a/mps/design/tests.txt
+++ b/mps/design/tests.txt
@@ -56,6 +56,26 @@ _`.test.zcoll`: Collection scheduling, and collection feedback.
_`.test.zmess`: Message lifecycle and finalization messages.
+Performance test
+----------------
+
+_`.test.ratio`: The ``testratio`` target checks that the hot variety
+is not too much slower than the rash variety. A failure of this test
+usually is expected to indicate that there are assertions on the
+critical path using ``AVER`` instead of ``AVER_CRITICAL`` (and so on).
+This works by running gcbench for the AMC pool class and djbench for
+the MVFF pool class, in the hot variety and the rash variety,
+computing the ratio of CPU time taken in the two varieties, and
+testing that this falls under an acceptable limit.
+
+Note that we don't use the elapsed time (as reported by the benchmark)
+because we want to be able to run this test on continuous integration
+machines that might be heavily loaded.
+
+This target is currently supported only on Unix platforms using GNU
+Makefiles.
+
+
Document History
----------------
diff --git a/mps/manual/source/release.rst b/mps/manual/source/release.rst
index b4ff232163b..d39467f495c 100644
--- a/mps/manual/source/release.rst
+++ b/mps/manual/source/release.rst
@@ -25,6 +25,17 @@ Other changes
.. _job003883: https://www.ravenbrook.com/project/mps/issue/job003883/
+#. Memory in :term:`allocation points` no longer contributes to the
+ decision to start a collection, avoid wasted work repeatedly
+ collecting generations with very small capacities. See job004007_.
+
+ .. _job004007: https://www.ravenbrook.com/project/mps/issue/job004007/
+
+#. The MPS no longer considers collecting the world again, without
+ allowing the :term:`client program` to run first. See job004011_.
+
+ .. _job004011: https://www.ravenbrook.com/project/mps/issue/job004011/
+
.. _release-notes-1.115:
diff --git a/mps/manual/source/topic/arena.rst b/mps/manual/source/topic/arena.rst
index d538402cba9..ba46a746b6f 100644
--- a/mps/manual/source/topic/arena.rst
+++ b/mps/manual/source/topic/arena.rst
@@ -314,11 +314,20 @@ Arena properties
.. c:function:: mps_word_t mps_collections(mps_arena_t arena)
- Return the number of :term:`flips` that have taken place in an
- :term:`arena` since it was created.
+ Return the number of garbage collections (technically, the number
+ of :term:`flips`) in which objects might have moved, that have
+ taken place in an :term:`arena` since it was created.
``arena`` is the arena.
+ .. note::
+
+ If you are only using non-moving pool classes like
+ :ref:`pool-ams`, then :c:func:`mps_collections` will always
+ return 0. To find out about these collections, consider
+ enabling garbage collection messages: see
+ :c:func:`mps_message_type_gc`.
+
.. c:function:: size_t mps_arena_commit_limit(mps_arena_t arena)