1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-03-26 08:41:47 -07:00

Catch-up merge from master sources to mps/branch/2016-03-27/cbs-tidy.

Copied from Perforce
 Change: 190674
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Richard Brooksby 2016-04-04 19:58:53 +01:00
commit 69a25a90d0
27 changed files with 205 additions and 88 deletions

1
mps/.renamed-gitignore Symbolic link
View file

@ -0,0 +1 @@
.p4ignore

View file

@ -14,6 +14,8 @@ lii6ll
w3i3mv
w3i6mv
xci3gc
xci3ll
xci6gc
xci6ll
# Visual Studio junk
Debug

1
mps/code/.renamed-gitignore Symbolic link
View file

@ -0,0 +1 @@
.p4ignore

View file

@ -23,6 +23,7 @@
#define testArenaSIZE ((((size_t)3)<<24) - 4)
#define testSetSIZE 200
#define testLOOPS 10
#define MAX_ALIGN 64 /* TODO: Make this test work up to arena_grain_size? */
/* make -- allocate one object */
@ -169,13 +170,16 @@ static mps_pool_debug_option_s fenceOptions = {
*/
static void test(mps_arena_class_t arena_class, mps_arg_s arena_args[],
size_t arena_grain_size,
mps_pool_debug_option_s *options)
{
mps_arena_t arena;
die(mps_arena_create_k(&arena, arena_class, arena_args), "mps_arena_create");
(void)arena_grain_size; /* TODO: test larger alignments up to this */
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
mps_align_t align = rnd_align(sizeof(void *), MAX_ALIGN);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE);
MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE);
@ -185,18 +189,19 @@ static void test(mps_arena_class_t arena_class, mps_arg_s arena_args[],
mps_class_mvff(), args), "stress MVFF");
} MPS_ARGS_END(args);
/* IWBN to test MVFFDebug, but the MPS doesn't support debugging APs, */
/* yet (MV Debug works here, because it fakes it through PoolAlloc). */
/* IWBN to test MVFFDebug, but the MPS doesn't support debugging
APs, yet (MV Debug works here, because it fakes it through
PoolAlloc). See job003995. */
MPS_ARGS_BEGIN(args) {
mps_align_t align = (mps_align_t)1 << (rnd() % 6);
mps_align_t align = rnd_align(sizeof(void *), MAX_ALIGN);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
die(stress(arena, NULL, align, randomSizeAligned, "MV",
mps_class_mv(), args), "stress MV");
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
mps_align_t align = (mps_align_t)1 << (rnd() % 6);
mps_align_t align = rnd_align(sizeof(void *), MAX_ALIGN);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, options);
die(stress(arena, options, align, randomSizeAligned, "MV debug",
@ -204,7 +209,7 @@ static void test(mps_arena_class_t arena_class, mps_arg_s arena_args[],
} MPS_ARGS_END(args);
MPS_ARGS_BEGIN(args) {
mps_align_t align = sizeof(void *) << (rnd() % 4);
mps_align_t align = rnd_align(sizeof(void *), MAX_ALIGN);
MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align);
die(stress(arena, NULL, align, randomSizeAligned, "MVT",
mps_class_mvt(), args), "stress MVT");
@ -218,28 +223,33 @@ static void test(mps_arena_class_t arena_class, mps_arg_s arena_args[],
int main(int argc, char *argv[])
{
size_t arena_grain_size;
testlib_init(argc, argv);
arena_grain_size = rnd_grain(2 * testArenaSIZE);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 2 * testArenaSIZE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(2*testArenaSIZE));
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size);
MPS_ARGS_ADD(args, MPS_KEY_COMMIT_LIMIT, testArenaSIZE);
test(mps_arena_class_vm(), args, &fenceOptions);
test(mps_arena_class_vm(), args, arena_grain_size, &fenceOptions);
} MPS_ARGS_END(args);
arena_grain_size = rnd_grain(2 * testArenaSIZE);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 2 * testArenaSIZE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(2*testArenaSIZE));
test(mps_arena_class_vm(), args, &bothOptions);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size);
test(mps_arena_class_vm(), args, arena_grain_size, &bothOptions);
} MPS_ARGS_END(args);
arena_grain_size = rnd_grain(testArenaSIZE);
MPS_ARGS_BEGIN(args) {
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_CL_BASE, malloc(testArenaSIZE));
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(testArenaSIZE));
test(mps_arena_class_cl(), args, &bothOptions);
MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, arena_grain_size);
test(mps_arena_class_cl(), args, arena_grain_size, &bothOptions);
} MPS_ARGS_END(args);
printf("%s: Conclusion: Failed to find any defects.\n", argv[0]);

View file

@ -724,15 +724,15 @@ extern Addr (SegLimit)(Seg seg);
/* .bitfield.promote: The bit field accesses need to be cast to the */
/* right type, otherwise they'll be promoted to signed int, see */
/* standard.ansic.6.2.1.1. */
#define SegRankSet(seg) ((RankSet)(seg)->rankSet)
#define SegPM(seg) ((AccessSet)(seg)->pm)
#define SegSM(seg) ((AccessSet)(seg)->sm)
#define SegDepth(seg) ((unsigned)(seg)->depth)
#define SegGrey(seg) ((TraceSet)(seg)->grey)
#define SegWhite(seg) ((TraceSet)(seg)->white)
#define SegNailed(seg) ((TraceSet)(seg)->nailed)
#define SegPoolRing(seg) (&(seg)->poolRing)
#define SegOfPoolRing(node) (RING_ELT(Seg, poolRing, (node)))
#define SegRankSet(seg) RVALUE((RankSet)(seg)->rankSet)
#define SegPM(seg) RVALUE((AccessSet)(seg)->pm)
#define SegSM(seg) RVALUE((AccessSet)(seg)->sm)
#define SegDepth(seg) RVALUE((unsigned)(seg)->depth)
#define SegGrey(seg) RVALUE((TraceSet)(seg)->grey)
#define SegWhite(seg) RVALUE((TraceSet)(seg)->white)
#define SegNailed(seg) RVALUE((TraceSet)(seg)->nailed)
#define SegPoolRing(seg) RVALUE(&(seg)->poolRing)
#define SegOfPoolRing(node) RING_ELT(Seg, poolRing, (node))
#define SegOfGreyRing(node) (&(RING_ELT(GCSeg, greyRing, (node)) \
->segStruct))

View file

@ -1170,9 +1170,13 @@ static Res AMSWhiten(Pool pool, Trace trace, Seg seg)
amsseg->newGrains = uncondemned;
amsseg->marksChanged = FALSE; /* <design/poolams/#marked.condemn> */
amsseg->ambiguousFixes = FALSE;
trace->condemned += AMSGrainsSize(ams, amsseg->oldGrains);
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
if (amsseg->oldGrains > 0) {
trace->condemned += AMSGrainsSize(ams, amsseg->oldGrains);
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
} else {
amsseg->colourTablesInUse = FALSE;
}
return ResOK;
}

View file

@ -791,8 +791,12 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
PoolGenAccountForAge(&awl->pgen, AWLGrainsSize(awl, awlseg->newGrains - uncondemned), FALSE);
awlseg->oldGrains += awlseg->newGrains - uncondemned;
awlseg->newGrains = uncondemned;
trace->condemned += AWLGrainsSize(awl, awlseg->oldGrains);
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
if (awlseg->oldGrains > 0) {
trace->condemned += AWLGrainsSize(awl, awlseg->oldGrains);
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
}
return ResOK;
}

View file

@ -29,6 +29,7 @@
#include "dbgpool.h"
#include "poolmv.h"
#include "poolmfs.h"
#include "mpscmvff.h"
#include "mpm.h"
SRCID(poolmv, "$Id$");
@ -236,7 +237,10 @@ static Res MVInit(Pool pool, ArgList args)
if (ArgPick(&arg, args, MPS_KEY_MAX_SIZE))
maxSize = arg.val.size;
arena = PoolArena(pool);
AVERT(Align, align);
AVER(align <= ArenaGrainSize(arena));
AVER(extendBy > 0);
AVER(avgSize > 0);
AVER(avgSize <= extendBy);
@ -245,7 +249,6 @@ static Res MVInit(Pool pool, ArgList args)
pool->alignment = align;
mv = PoolMV(pool);
arena = PoolArena(pool);
/* At 100% fragmentation we will need one block descriptor for every other */
/* allocated block, or (extendBy/avgSize)/2 descriptors. See note 1. */

View file

@ -259,10 +259,10 @@ static Res MVTInit(Pool pool, ArgList args)
AVERT(Align, align);
/* This restriction on the alignment is necessary because of the use
* of a Freelist to store the free address ranges in low-memory
* situations. See <design/freelist/#impl.grain.align>.
*/
of a Freelist to store the free address ranges in low-memory
situations. See <design/freelist/#impl.grain.align>. */
AVER(AlignIsAligned(align, FreelistMinimumAlignment));
AVER(align <= ArenaGrainSize(arena));
AVER(0 < minSize);
AVER(minSize <= meanSize);
AVER(meanSize <= maxSize);

View file

@ -486,10 +486,10 @@ static Res MVFFInit(Pool pool, ArgList args)
AVER(spare <= 1.0); /* .arg.check */
AVERT(Align, align);
/* This restriction on the alignment is necessary because of the use
* of a Freelist to store the free address ranges in low-memory
* situations. <design/freelist/#impl.grain.align>.
*/
of a Freelist to store the free address ranges in low-memory
situations. <design/freelist/#impl.grain.align>. */
AVER(AlignIsAligned(align, FreelistMinimumAlignment));
AVER(align <= ArenaGrainSize(arena));
AVERT(Bool, slotHigh);
AVERT(Bool, arenaHigh);
AVERT(Bool, firstFit);

View file

@ -658,6 +658,52 @@ static void SNCWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
}
/* SNCTotalSize -- total memory allocated from the arena */
static Size SNCTotalSize(Pool pool)
{
SNC snc;
Ring ring, node, nextNode;
Size total = 0;
AVERT(Pool, pool);
snc = PoolSNC(pool);
AVERT(SNC, snc);
ring = &pool->segRing;
RING_FOR(node, ring, nextNode) {
Seg seg = SegOfPoolRing(node);
AVERT(Seg, seg);
total += SegSize(seg);
}
return total;
}
/* SNCFreeSize -- free memory (unused by client program) */
static Size SNCFreeSize(Pool pool)
{
SNC snc;
Seg seg;
Size free = 0;
AVERT(Pool, pool);
snc = PoolSNC(pool);
AVERT(SNC, snc);
seg = snc->freeSegs;
while (seg != NULL) {
AVERT(Seg, seg);
free += SegSize(seg);
seg = sncSegNext(seg);
}
return free;
}
/* SNCPoolClass -- the class definition */
DEFINE_POOL_CLASS(SNCPoolClass, this)
@ -678,6 +724,8 @@ DEFINE_POOL_CLASS(SNCPoolClass, this)
this->framePopPending = SNCFramePopPending;
this->walk = SNCWalk;
this->bufferClass = SNCBufClassGet;
this->totalSize = SNCTotalSize;
this->freeSize = SNCFreeSize;
AVERT(PoolClass, this);
}

View file

@ -28,10 +28,6 @@ SRCID(seg, "$Id$");
#define SegGCSeg(seg) ((GCSeg)(seg))
/* SegPoolRing -- Pool ring accessor */
#define SegPoolRing(seg) (&(seg)->poolRing)
/* forward declarations */
@ -1463,6 +1459,21 @@ static Res gcSegMerge(Seg seg, Seg segHi,
grey = SegGrey(segHi); /* check greyness */
AVER(SegGrey(seg) == grey);
/* Assume that the write barrier shield is being used to implement
the remembered set only, and so we can merge the shield and
protection modes by unioning the segment summaries. See also
design.mps.seg.merge.inv.similar. */
summary = RefSetUnion(gcseg->summary, gcsegHi->summary);
SegSetSummary(seg, summary);
SegSetSummary(segHi, summary);
AVER(SegSM(seg) == SegSM(segHi));
if (SegPM(seg) != SegPM(segHi)) {
/* This shield won't cope with a partially-protected segment, so
flush the shield queue to bring both halves in sync. See also
design.mps.seg.split-merge.shield.re-flush. */
ShieldFlush(PoolArena(SegPool(seg)));
}
/* Merge the superclass fields via next-method call */
super = SEG_SUPERCLASS(GCSegClass);
res = super->merge(seg, segHi, base, mid, limit);
@ -1470,13 +1481,6 @@ static Res gcSegMerge(Seg seg, Seg segHi,
goto failSuper;
/* Update fields of gcseg. Finish gcsegHi. */
summary = RefSetUnion(gcseg->summary, gcsegHi->summary);
if (summary != gcseg->summary) {
gcSegSetSummary(seg, summary);
/* <design/seg/#split-merge.shield.re-flush> */
ShieldFlush(PoolArena(SegPool(seg)));
}
gcSegSetGreyInternal(segHi, grey, TraceSetEMPTY);
gcsegHi->summary = RefSetEMPTY;
gcsegHi->sig = SigInvalid;

View file

@ -508,10 +508,10 @@ static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit)
* Calls next method - but possibly splits or merges the chosen
* segment.
*
* .merge: A merge is performed when the next method returns
* the entire segment, this segment had previously been split
* from the segment below, and the segment below is appropriately
* similar (i.e. not already attached to a buffer and similarly grey)
* .merge: A merge is performed when the next method returns the
* entire segment, this segment had previously been split from the
* segment below, and the segment below is appropriately similar
* (i.e. not already attached to a buffer and similarly coloured)
*
* .split: If we're not merging, a split is performed if the next method
* returns the entire segment, and yet lower half of the segment would
@ -551,7 +551,9 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn,
if (SegLimit(seg) == limit && SegBase(seg) == base) {
if (amstseg->prev != NULL) {
Seg segLo = AMSTSeg2Seg(amstseg->prev);
if (SegBuffer(segLo) == NULL && SegGrey(segLo) == SegGrey(seg)) {
if (SegBuffer(segLo) == NULL &&
SegGrey(segLo) == SegGrey(seg) &&
SegWhite(segLo) == SegWhite(seg)) {
/* .merge */
Seg mergedSeg;
Res mres;

View file

@ -220,14 +220,30 @@ double rnd_double(void)
return rnd() / R_m_float;
}
static unsigned sizelog2(size_t size)
{
return (unsigned)(log((double)size) / log(2.0));
}
size_t rnd_grain(size_t arena_size)
{
/* The grain size must be small enough to allow for a complete set
* of zones in the initial chunk. */
size_t s = (size_t)(log((double)arena_size) / log(2.0));
size_t shift = MPS_WORD_SHIFT;
Insist(s > shift);
return (size_t)1 << (rnd() % (s - shift));
of zones in the initial chunk, but bigger than one word. */
Insist(arena_size >> MPS_WORD_SHIFT >= sizeof(void *));
return rnd_align(sizeof(void *), (size_t)1 << sizelog2(arena_size >> MPS_WORD_SHIFT));
}
size_t rnd_align(size_t min, size_t max)
{
unsigned log2min = sizelog2(min);
unsigned log2max = sizelog2(max);
Insist(min <= max);
Insist(1uL << log2min == min);
Insist(1uL << log2max == max);
if (log2min < log2max)
return min << (rnd() % (log2max - log2min + 1));
else
return min;
}
rnd_state_t rnd_seed(void)

View file

@ -260,6 +260,11 @@ extern double rnd_double(void);
extern size_t rnd_grain(size_t arena_size);
/* rnd_align -- random alignment */
extern size_t rnd_align(size_t min, size_t max);
/* randomize -- randomize the generator, or initialize to replay
*
* randomize(argc, argv) randomizes the rnd generator (using time(3))

View file

@ -339,7 +339,12 @@ static ZoneSet traceSetWhiteUnion(TraceSet ts, Arena arena)
}
/* TraceIsEmpty -- return TRUE if trace has no condemned segments */
/* TraceIsEmpty -- return TRUE if trace has no condemned segments
*
* .empty.size: If the trace has a condemned size of zero, then it has
* no white segments, because we don't allow pools to whiten segments
* with no white objects in.
*/
Bool TraceIsEmpty(Trace trace)
{
@ -354,6 +359,7 @@ Res TraceAddWhite(Trace trace, Seg seg)
{
Res res;
Pool pool;
Size condemnedBefore;
AVERT(Trace, trace);
AVERT(Seg, seg);
@ -362,18 +368,25 @@ Res TraceAddWhite(Trace trace, Seg seg)
pool = SegPool(seg);
AVERT(Pool, pool);
condemnedBefore = trace->condemned;
/* Give the pool the opportunity to turn the segment white. */
/* If it fails, unwind. */
res = PoolWhiten(pool, trace, seg);
if(res != ResOK)
return res;
/* Add the segment to the approximation of the white set if the */
/* pool made it white. */
if(TraceSetIsMember(SegWhite(seg), trace)) {
if (TraceSetIsMember(SegWhite(seg), trace)) {
/* Pools must not condemn empty segments, otherwise we can't tell
when a trace is empty and safe to destroy. See .empty.size. */
AVER(trace->condemned > condemnedBefore);
/* Add the segment to the approximation of the white set if the
pool made it white. */
trace->white = ZoneSetUnion(trace->white, ZoneSetOfSeg(trace->arena, seg));
/* if the pool is a moving GC, then condemned objects may move */
if(PoolHasAttr(pool, AttrMOVINGGC)) {
if (PoolHasAttr(pool, AttrMOVINGGC)) {
trace->mayMove = ZoneSetUnion(trace->mayMove,
ZoneSetOfSeg(trace->arena, seg));
}
@ -1532,11 +1545,13 @@ static Res traceCondemnAll(Trace trace)
Ring segNode, nextSegNode;
RING_FOR(segNode, PoolSegRing(pool), nextSegNode) {
Seg seg = SegOfPoolRing(segNode);
AVERT(Seg, seg);
res = TraceAddWhite(trace, seg);
if (res != ResOK)
goto failBegin;
}
}
}

View file

@ -0,0 +1 @@
.p4ignore

View file

@ -213,12 +213,10 @@ before calling ``SegMerge()``:
- _`.merge.inv.similar`: ``segLo`` and ``segHi`` must be sufficiently
similar. Two segments are sufficiently similar if they have
identical values for each of the following fields: ``class``,
``sm``, ``grey``, ``white``, ``nailed``, ``rankSet``. Justification:
there is no single choice of behaviour for cases where these fields
are not identical. The pool class must make it's own choices about
this if it wishes to permit more flexible merging. If so, it should
be a simple matter for the pool to arrange for the segments to look
sufficiently similar before calling ``SegMerge()``.
``grey``, ``white``, ``nailed``, ``rankSet``. Justification: There
has yet to be a need to implement default behaviour for these
cases. Pool classes should arrange for these values to be the same
before calling ``SegMerge()``.
_`.merge.state`: The merged segment will share the same state as
``segLo`` and ``segHi`` for those fields which are identical (see

View file

@ -0,0 +1 @@
.p4ignore

View file

@ -0,0 +1 @@
.p4ignore

View file

@ -0,0 +1 @@
.p4ignore

View file

@ -70,10 +70,11 @@ MV interface
optional :term:`keyword arguments`:
* :c:macro:`MPS_KEY_ALIGN` (type :c:type:`mps_align_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the
:term:`alignment` of addresses for allocation (and freeing) in
the pool. If an unaligned size is passed to :c:func:`mps_alloc` or
:c:func:`mps_free`, it will be rounded up to the pool's alignment.
:c:macro:`MPS_PF_ALIGN`) is the :term:`alignment` of the
addresses allocated (and freed) in the pool. The minimum
alignment supported by pools of this class is 1 (one)
and the maximum is the arena grain size
(see :c:macro:`MPS_KEY_ARENA_GRAIN_SIZE`).
* :c:macro:`MPS_KEY_EXTEND_BY` (type :c:type:`size_t`,
default 65536) is the :term:`size` of block that the pool will

View file

@ -115,12 +115,11 @@ MVFF interface
efficient if this is wrong, but nothing will break.
* :c:macro:`MPS_KEY_ALIGN` (type :c:type:`mps_align_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the
:term:`alignment` of addresses for allocation (and freeing) in
the pool. If an unaligned size is passed to :c:func:`mps_alloc`
or :c:func:`mps_free`, it will be rounded up to the pool's
alignment. The minimum alignment supported by pools of this
class is ``sizeof(void *)``.
:c:macro:`MPS_PF_ALIGN`) is the :term:`alignment` of the
addresses allocated (and freed) in the pool. The minimum
alignment supported by pools of this class is ``sizeof(void *)``
and the maximum is the arena grain size
(see :c:macro:`MPS_KEY_ARENA_GRAIN_SIZE`).
* :c:macro:`MPS_KEY_SPARE` (type :c:type:`double`, default 0.75)
is the maximum proportion of memory that the pool will keep

View file

@ -115,12 +115,11 @@ MVT interface
optional :term:`keyword arguments`:
* :c:macro:`MPS_KEY_ALIGN` (type :c:type:`mps_align_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the
:term:`alignment` of addresses for allocation (and freeing) in
the pool. If an unaligned size is passed to :c:func:`mps_alloc` or
:c:func:`mps_free`, it will be rounded up to the pool's alignment.
The minimum alignment supported by pools of this class is
``sizeof(void *)``.
:c:macro:`MPS_PF_ALIGN`) is the :term:`alignment` of the
addresses allocated (and freed) in the pool. The minimum
alignment supported by pools of this class is ``sizeof(void *)``
and the maximum is the arena grain size
(see :c:macro:`MPS_KEY_ARENA_GRAIN_SIZE`).
* :c:macro:`MPS_KEY_MIN_SIZE` (type :c:type:`size_t`, default is
:c:macro:`MPS_PF_ALIGN`) is the

View file

@ -143,13 +143,13 @@ Other changes
#. The MPS is less aggressive in its use of hardware memory protection
to maintain :term:`write barrier` to speed up future collections.
This is particularly important for OS X, where memory protection is
poorly implemented. See job003371_ and job003975_.
This is particularly important for OS X, where memory protection
operations are very expensive. See job003371_ and job003975_.
#. The MPS coalesces memory protection, reducing the number of system
calls. This drastically improves real run time on operating systems
where memory protection is poorly implemented, such as OS X, but
also has a significant effect on Linux. See job003371_ and
calls. This markedly improves real run time on operating systems
where memory protection operations are very expensive, such as OS
X, but also has a significant effect on Linux. See job003371_ and
job003975_.
.. _job003371: http://www.ravenbrook.com/project/mps/issue/job003371/

View file

@ -150,9 +150,9 @@ Client arenas
* :c:macro:`MPS_KEY_ARENA_GRAIN_SIZE` (type :c:type:`size_t`,
default 8192) is the granularity with which the arena will
manage memory internally. It must be a power of 2. Larger
granularity reduces overheads, but increases
:term:`fragmentation` and :term:`retention`.
manage memory internally. It must be a power of 2, and at least
``sizeof(void *)``. Larger granularity reduces overheads, but
increases :term:`fragmentation` and :term:`retention`.
* :c:macro:`MPS_KEY_PAUSE_TIME` (type :c:type:`double`, default
0.1) is the maximum time, in seconds, that operations within the

1
mps/tool/.renamed-gitignore Symbolic link
View file

@ -0,0 +1 @@
.p4ignore