mirror of
git://git.sv.gnu.org/emacs.git
synced 2026-03-12 09:51:05 -07:00
Catch-up merge from master sources at changelevel 194268 to branch/2018-06-18/job004070.
Copied from Perforce Change: 194274
This commit is contained in:
commit
ddffa73317
81 changed files with 1931 additions and 1952 deletions
|
|
@ -107,6 +107,7 @@ static void ArenaNoDestroy(Arena arena)
|
|||
DEFINE_CLASS(Inst, ArenaClass, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, ArenaClass, InstClass);
|
||||
AVERT(InstClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -132,6 +133,7 @@ DEFINE_CLASS(Arena, AbstractArena, klass)
|
|||
klass->pagesMarkAllocated = ArenaNoPagesMarkAllocated;
|
||||
klass->chunkPageMapped = ArenaNoChunkPageMapped;
|
||||
klass->sig = ArenaClassSig;
|
||||
AVERT(ArenaClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -154,6 +156,15 @@ Bool ArenaClassCheck(ArenaClass klass)
|
|||
CHECKL(FUNCHECK(klass->compact));
|
||||
CHECKL(FUNCHECK(klass->pagesMarkAllocated));
|
||||
CHECKL(FUNCHECK(klass->chunkPageMapped));
|
||||
|
||||
/* Check that arena classes override sets of related methods. */
|
||||
CHECKL((klass->init == ArenaAbsInit)
|
||||
== (klass->instClassStruct.finish == ArenaAbsFinish));
|
||||
CHECKL((klass->create == ArenaNoCreate)
|
||||
== (klass->destroy == ArenaNoDestroy));
|
||||
CHECKL((klass->chunkInit == ArenaNoChunkInit)
|
||||
== (klass->chunkFinish == ArenaNoChunkFinish));
|
||||
|
||||
CHECKS(ArenaClass, klass);
|
||||
return TRUE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -458,6 +458,7 @@ DEFINE_CLASS(Arena, ClientArena, klass)
|
|||
klass->chunkInit = ClientChunkInit;
|
||||
klass->chunkFinish = ClientChunkFinish;
|
||||
klass->chunkPageMapped = ClientChunkPageMapped;
|
||||
AVERT(ArenaClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1219,6 +1219,7 @@ DEFINE_CLASS(Arena, VMArena, klass)
|
|||
klass->compact = VMCompact;
|
||||
klass->pagesMarkAllocated = VMPagesMarkAllocated;
|
||||
klass->chunkPageMapped = VMChunkPageMapped;
|
||||
AVERT(ArenaClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1014,6 +1014,13 @@ Bool BufferClassCheck(BufferClass klass)
|
|||
CHECKL(FUNCHECK(klass->rankSet));
|
||||
CHECKL(FUNCHECK(klass->setRankSet));
|
||||
CHECKL(FUNCHECK(klass->reassignSeg));
|
||||
|
||||
/* Check that buffer classes override sets of related methods. */
|
||||
CHECKL((klass->init == BufferAbsInit)
|
||||
== (klass->instClassStruct.finish == BufferAbsFinish));
|
||||
CHECKL((klass->attach == bufferTrivAttach)
|
||||
== (klass->detach == bufferTrivDetach));
|
||||
|
||||
CHECKS(BufferClass, klass);
|
||||
return TRUE;
|
||||
}
|
||||
|
|
@ -1026,6 +1033,7 @@ Bool BufferClassCheck(BufferClass klass)
|
|||
DEFINE_CLASS(Inst, BufferClass, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, BufferClass, InstClass);
|
||||
AVERT(InstClass, klass);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Buffer, Buffer, klass)
|
||||
|
|
@ -1043,6 +1051,7 @@ DEFINE_CLASS(Buffer, Buffer, klass)
|
|||
klass->setRankSet = bufferNoSetRankSet;
|
||||
klass->reassignSeg = bufferNoReassignSeg;
|
||||
klass->sig = BufferClassSig;
|
||||
AVERT(BufferClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1248,6 +1257,7 @@ DEFINE_CLASS(Buffer, SegBuf, klass)
|
|||
klass->rankSet = segBufRankSet;
|
||||
klass->setRankSet = segBufSetRankSet;
|
||||
klass->reassignSeg = segBufReassignSeg;
|
||||
AVERT(BufferClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1304,6 +1314,7 @@ DEFINE_CLASS(Buffer, RankBuf, klass)
|
|||
INHERIT_CLASS(klass, RankBuf, SegBuf);
|
||||
klass->varargs = rankBufVarargs;
|
||||
klass->init = rankBufInit;
|
||||
AVERT(BufferClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1151,18 +1151,21 @@ DEFINE_CLASS(Land, CBS, klass)
|
|||
klass->findLast = cbsFindLast;
|
||||
klass->findLargest = cbsFindLargest;
|
||||
klass->findInZones = cbsFindInZones;
|
||||
AVERT(LandClass, klass);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Land, CBSFast, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, CBSFast, CBS);
|
||||
klass->init = cbsInitFast;
|
||||
AVERT(LandClass, klass);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Land, CBSZoned, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, CBSZoned, CBSFast);
|
||||
klass->init = cbsInitZoned;
|
||||
AVERT(LandClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -286,6 +286,7 @@ DEFINE_CLASS(Land, Failover, klass)
|
|||
klass->findLast = failoverFindLast;
|
||||
klass->findLargest = failoverFindLargest;
|
||||
klass->findInZones = failoverFindInZones;
|
||||
AVERT(LandClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* finalcv.c: FINALIZATION COVERAGE TEST
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
* Portions copyright (C) 2002 Global Graphics Software.
|
||||
*
|
||||
* DESIGN
|
||||
|
|
@ -143,6 +143,10 @@ static void test(mps_arena_t arena, mps_pool_class_t pool_class)
|
|||
/* store index in vector's slot */
|
||||
((mps_word_t *)p)[vectorSLOT] = dylan_int(i);
|
||||
|
||||
/* mps_definalize fails when there have been no calls to mps_finalize
|
||||
yet, or for an address that was not registered for finalization. */
|
||||
Insist(mps_definalize(arena, &p) == MPS_RES_FAIL);
|
||||
|
||||
die(mps_finalize(arena, &p), "finalize\n");
|
||||
root[i] = p; state[i] = rootSTATE;
|
||||
}
|
||||
|
|
@ -246,7 +250,7 @@ int main(int argc, char *argv[])
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -794,6 +794,7 @@ DEFINE_CLASS(Land, Freelist, klass)
|
|||
klass->findLast = freelistFindLast;
|
||||
klass->findLargest = freelistFindLargest;
|
||||
klass->findInZones = freelistFindInZones;
|
||||
AVERT(LandClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* global.c: ARENA-GLOBAL INTERFACES
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
* Portions copyright (C) 2002 Global Graphics Software.
|
||||
*
|
||||
* .sources: See <design/arena/>. design.mps.thread-safety is relevant
|
||||
|
|
@ -53,7 +53,9 @@ static void arenaReleaseRingLock(void)
|
|||
}
|
||||
|
||||
|
||||
/* GlobalsClaimAll -- claim all MPS locks <design/thread-safety/#fork.lock> */
|
||||
/* GlobalsClaimAll -- claim all MPS locks
|
||||
* <design/thread-safety/#sol.fork.lock>
|
||||
*/
|
||||
|
||||
void GlobalsClaimAll(void)
|
||||
{
|
||||
|
|
@ -674,7 +676,7 @@ Bool ArenaAccess(Addr addr, AccessSet mode, MutatorContext context)
|
|||
* thread. */
|
||||
mode &= SegPM(seg);
|
||||
if (mode != AccessSetEMPTY) {
|
||||
res = PoolAccess(SegPool(seg), seg, addr, mode, context);
|
||||
res = SegAccess(seg, arena, addr, mode, context);
|
||||
AVER(res == ResOK); /* Mutator can't continue unless this succeeds */
|
||||
} else {
|
||||
/* Protection was already cleared, for example by another thread
|
||||
|
|
@ -1117,7 +1119,7 @@ Bool ArenaEmergency(Arena arena)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -12,6 +12,12 @@
|
|||
SRCID(land, "$Id$");
|
||||
|
||||
|
||||
/* Forward declarations */
|
||||
|
||||
static Res landNoInsert(Range rangeReturn, Land land, Range range);
|
||||
static Res landNoDelete(Range rangeReturn, Land land, Range range);
|
||||
|
||||
|
||||
/* FindDeleteCheck -- check method for a FindDelete value */
|
||||
|
||||
Bool FindDeleteCheck(FindDelete findDelete)
|
||||
|
|
@ -409,6 +415,12 @@ Bool LandClassCheck(LandClass klass)
|
|||
CHECKL(FUNCHECK(klass->findLast));
|
||||
CHECKL(FUNCHECK(klass->findLargest));
|
||||
CHECKL(FUNCHECK(klass->findInZones));
|
||||
|
||||
/* Check that land classes override sets of related methods. */
|
||||
CHECKL((klass->init == LandAbsInit)
|
||||
== (klass->instClassStruct.finish == LandAbsFinish));
|
||||
CHECKL((klass->insert == landNoInsert) == (klass->delete == landNoDelete));
|
||||
|
||||
CHECKS(LandClass, klass);
|
||||
return TRUE;
|
||||
}
|
||||
|
|
@ -528,6 +540,7 @@ static Res LandAbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
|
|||
DEFINE_CLASS(Inst, LandClass, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, LandClass, InstClass);
|
||||
AVERT(InstClass, klass);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Land, Land, klass)
|
||||
|
|
@ -547,6 +560,7 @@ DEFINE_CLASS(Land, Land, klass)
|
|||
klass->findLargest = landNoFind;
|
||||
klass->findInZones = landNoFindInZones;
|
||||
klass->sig = LandClassSig;
|
||||
AVERT(LandClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -29,20 +29,18 @@
|
|||
* implementation (lockli.c).
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) && !defined(MPS_OS_XC)
|
||||
#error "lockix.c is specific to MPS_OS_FR, MPS_OS_LI or MPS_OS_XC"
|
||||
#endif
|
||||
|
||||
#include "lock.h"
|
||||
|
||||
#include <pthread.h> /* see .feature.li in config.h */
|
||||
#include <semaphore.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include "lock.h"
|
||||
#include "mpmtypes.h"
|
||||
|
||||
|
||||
#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) && !defined(MPS_OS_XC)
|
||||
#error "lockix.c is Unix specific."
|
||||
#endif
|
||||
|
||||
SRCID(lockix, "$Id$");
|
||||
|
||||
#if defined(LOCK)
|
||||
|
|
@ -161,8 +159,8 @@ void (LockClaimRecursive)(Lock lock)
|
|||
/* pthread_mutex_lock will return: */
|
||||
/* 0 if we have just claimed the lock */
|
||||
/* EDEADLK if we own the lock already. */
|
||||
AVER((res == 0 && lock->claims == 0) ||
|
||||
(res == EDEADLK && lock->claims > 0));
|
||||
AVER((res == 0) == (lock->claims == 0));
|
||||
AVER((res == EDEADLK) == (lock->claims > 0));
|
||||
|
||||
++lock->claims;
|
||||
AVER(lock->claims > 0);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* lockw3.c: RECURSIVE LOCKS IN WIN32
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* .design: These are implemented using critical sections.
|
||||
* See the section titled "Synchronization functions" in the Groups
|
||||
|
|
@ -23,8 +23,8 @@
|
|||
|
||||
#include "mpm.h"
|
||||
|
||||
#ifndef MPS_OS_W3
|
||||
#error "lockw3.c is specific to Win32 but MPS_OS_W3 not defined"
|
||||
#if !defined(MPS_OS_W3)
|
||||
#error "lockw3.c is specific to MPS_OS_W3"
|
||||
#endif
|
||||
|
||||
#include "mpswin.h"
|
||||
|
|
@ -185,7 +185,7 @@ void LockSetup(void)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -52,8 +52,6 @@ typedef struct GenDescStruct {
|
|||
|
||||
/* PoolGen -- descriptor of a generation in a pool */
|
||||
|
||||
typedef struct PoolGenStruct *PoolGen;
|
||||
|
||||
#define PoolGenSig ((Sig)0x519B009E) /* SIGnature POOl GEn */
|
||||
|
||||
typedef struct PoolGenStruct {
|
||||
|
|
|
|||
|
|
@ -204,6 +204,11 @@ extern Res PoolDescribe(Pool pool, mps_lib_FILE *stream, Count depth);
|
|||
#define PoolArenaRing(pool) (&(pool)->arenaRing)
|
||||
#define PoolOfArenaRing(node) RING_ELT(Pool, arenaRing, node)
|
||||
#define PoolHasAttr(pool, Attr) ((ClassOfPoly(Pool, pool)->attr & (Attr)) != 0)
|
||||
#define PoolSizeGrains(pool, size) ((size) >> (pool)->alignShift)
|
||||
#define PoolGrainsSize(pool, grains) ((grains) << (pool)->alignShift)
|
||||
#define PoolIndexOfAddr(base, pool, p) \
|
||||
(AddrOffset((base), (p)) >> (pool)->alignShift)
|
||||
#define PoolAddrOfIndex(base, pool, i) AddrAdd(base, PoolGrainsSize(pool, i))
|
||||
|
||||
extern Bool PoolFormat(Format *formatReturn, Pool pool);
|
||||
|
||||
|
|
@ -220,18 +225,8 @@ extern void PoolDestroy(Pool pool);
|
|||
extern BufferClass PoolDefaultBufferClass(Pool pool);
|
||||
extern Res PoolAlloc(Addr *pReturn, Pool pool, Size size);
|
||||
extern void PoolFree(Pool pool, Addr old, Size size);
|
||||
extern PoolGen PoolSegPoolGen(Pool pool, Seg seg);
|
||||
extern Res PoolTraceBegin(Pool pool, Trace trace);
|
||||
extern Res PoolAccess(Pool pool, Seg seg, Addr addr,
|
||||
AccessSet mode, MutatorContext context);
|
||||
extern Res PoolWhiten(Pool pool, Trace trace, Seg seg);
|
||||
extern void PoolGrey(Pool pool, Trace trace, Seg seg);
|
||||
extern void PoolBlacken(Pool pool, TraceSet traceSet, Seg seg);
|
||||
extern Res PoolScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg);
|
||||
extern Res PoolFix(Pool pool, ScanState ss, Seg seg, Addr *refIO);
|
||||
extern Res PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO);
|
||||
extern void PoolReclaim(Pool pool, Trace trace, Seg seg);
|
||||
extern void PoolWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
||||
void *v, size_t s);
|
||||
extern void PoolFreeWalk(Pool pool, FreeBlockVisitor f, void *p);
|
||||
extern Size PoolTotalSize(Pool pool);
|
||||
extern Size PoolFreeSize(Pool pool);
|
||||
|
|
@ -242,6 +237,7 @@ extern Res PoolNoAlloc(Addr *pReturn, Pool pool, Size size);
|
|||
extern Res PoolTrivAlloc(Addr *pReturn, Pool pool, Size size);
|
||||
extern void PoolNoFree(Pool pool, Addr old, Size size);
|
||||
extern void PoolTrivFree(Pool pool, Addr old, Size size);
|
||||
extern PoolGen PoolNoSegPoolGen(Pool pool, Seg seg);
|
||||
extern Res PoolNoBufferFill(Addr *baseReturn, Addr *limitReturn,
|
||||
Pool pool, Buffer buffer, Size size);
|
||||
extern Res PoolTrivBufferFill(Addr *baseReturn, Addr *limitReturn,
|
||||
|
|
@ -253,21 +249,7 @@ extern void PoolTrivBufferEmpty(Pool pool, Buffer buffer,
|
|||
extern Res PoolAbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth);
|
||||
extern Res PoolNoTraceBegin(Pool pool, Trace trace);
|
||||
extern Res PoolTrivTraceBegin(Pool pool, Trace trace);
|
||||
extern Res PoolNoAccess(Pool pool, Seg seg, Addr addr,
|
||||
AccessSet mode, MutatorContext context);
|
||||
extern Res PoolSegAccess(Pool pool, Seg seg, Addr addr,
|
||||
AccessSet mode, MutatorContext context);
|
||||
extern Res PoolSingleAccess(Pool pool, Seg seg, Addr addr,
|
||||
AccessSet mode, MutatorContext context);
|
||||
extern Res PoolNoWhiten(Pool pool, Trace trace, Seg seg);
|
||||
extern Res PoolTrivWhiten(Pool pool, Trace trace, Seg seg);
|
||||
extern void PoolNoGrey(Pool pool, Trace trace, Seg seg);
|
||||
extern void PoolTrivGrey(Pool pool, Trace trace, Seg seg);
|
||||
extern void PoolNoBlacken(Pool pool, TraceSet traceSet, Seg seg);
|
||||
extern void PoolTrivBlacken(Pool pool, TraceSet traceSet, Seg seg);
|
||||
extern Res PoolNoScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg);
|
||||
extern Res PoolNoFix(Pool pool, ScanState ss, Seg seg, Ref *refIO);
|
||||
extern void PoolNoReclaim(Pool pool, Trace trace, Seg seg);
|
||||
extern void PoolNoRampBegin(Pool pool, Buffer buf, Bool collectAll);
|
||||
extern void PoolTrivRampBegin(Pool pool, Buffer buf, Bool collectAll);
|
||||
extern void PoolNoRampEnd(Pool pool, Buffer buf);
|
||||
|
|
@ -276,8 +258,6 @@ extern Res PoolNoFramePush(AllocFrame *frameReturn, Pool pool, Buffer buf);
|
|||
extern Res PoolTrivFramePush(AllocFrame *frameReturn, Pool pool, Buffer buf);
|
||||
extern Res PoolNoFramePop(Pool pool, Buffer buf, AllocFrame frame);
|
||||
extern Res PoolTrivFramePop(Pool pool, Buffer buf, AllocFrame frame);
|
||||
extern void PoolNoWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
||||
void *p, size_t s);
|
||||
extern void PoolTrivFreeWalk(Pool pool, FreeBlockVisitor f, void *p);
|
||||
extern PoolDebugMixin PoolNoDebugMixin(Pool pool);
|
||||
extern BufferClass PoolNoBufferClass(void);
|
||||
|
|
@ -286,17 +266,14 @@ extern Size PoolNoSize(Pool pool);
|
|||
|
||||
/* Abstract Pool Classes Interface -- see <code/poolabs.c> */
|
||||
extern void PoolClassMixInBuffer(PoolClass klass);
|
||||
extern void PoolClassMixInScan(PoolClass klass);
|
||||
extern void PoolClassMixInFormat(PoolClass klass);
|
||||
extern void PoolClassMixInCollect(PoolClass klass);
|
||||
DECLARE_CLASS(Inst, PoolClass, InstClass);
|
||||
DECLARE_CLASS(Pool, AbstractPool, Inst);
|
||||
DECLARE_CLASS(Pool, AbstractBufferPool, AbstractPool);
|
||||
DECLARE_CLASS(Pool, AbstractSegBufPool, AbstractBufferPool);
|
||||
DECLARE_CLASS(Pool, AbstractScanPool, AbstractSegBufPool);
|
||||
typedef Pool AbstractCollectPool;
|
||||
#define AbstractCollectPoolCheck PoolCheck
|
||||
DECLARE_CLASS(Pool, AbstractCollectPool, AbstractScanPool);
|
||||
DECLARE_CLASS(Pool, AbstractCollectPool, AbstractSegBufPool);
|
||||
|
||||
|
||||
/* Message Interface -- see <design/message/> */
|
||||
|
|
@ -667,6 +644,21 @@ extern void SegSetRankSet(Seg seg, RankSet rankSet);
|
|||
extern void SegSetRankAndSummary(Seg seg, RankSet rankSet, RefSet summary);
|
||||
extern Res SegMerge(Seg *mergedSegReturn, Seg segLo, Seg segHi);
|
||||
extern Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at);
|
||||
extern Res SegAccess(Seg seg, Arena arena, Addr addr,
|
||||
AccessSet mode, MutatorContext context);
|
||||
extern Res SegWholeAccess(Seg seg, Arena arena, Addr addr,
|
||||
AccessSet mode, MutatorContext context);
|
||||
extern Res SegSingleAccess(Seg seg, Arena arena, Addr addr,
|
||||
AccessSet mode, MutatorContext context);
|
||||
extern Res SegWhiten(Seg seg, Trace trace);
|
||||
extern void SegGreyen(Seg seg, Trace trace);
|
||||
extern void SegBlacken(Seg seg, TraceSet traceSet);
|
||||
extern Res SegScan(Bool *totalReturn, Seg seg, ScanState ss);
|
||||
extern Res SegFix(Seg seg, ScanState ss, Addr *refIO);
|
||||
extern Res SegFixEmergency(Seg seg, ScanState ss, Addr *refIO);
|
||||
extern void SegReclaim(Seg seg, Trace trace);
|
||||
extern void SegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *v, size_t s);
|
||||
extern Res SegAbsDescribe(Inst seg, mps_lib_FILE *stream, Count depth);
|
||||
extern Res SegDescribe(Seg seg, mps_lib_FILE *stream, Count depth);
|
||||
extern void SegSetSummary(Seg seg, RefSet summary);
|
||||
|
|
|
|||
|
|
@ -57,21 +57,13 @@ typedef struct mps_pool_class_s {
|
|||
PoolInitMethod init; /* initialize the pool descriptor */
|
||||
PoolAllocMethod alloc; /* allocate memory from pool */
|
||||
PoolFreeMethod free; /* free memory to pool */
|
||||
PoolSegPoolGenMethod segPoolGen; /* get pool generation of segment */
|
||||
PoolBufferFillMethod bufferFill; /* out-of-line reserve */
|
||||
PoolBufferEmptyMethod bufferEmpty; /* out-of-line commit */
|
||||
PoolAccessMethod access; /* handles read/write accesses */
|
||||
PoolWhitenMethod whiten; /* whiten objects in a segment */
|
||||
PoolGreyMethod grey; /* grey non-white objects */
|
||||
PoolBlackenMethod blacken; /* blacken grey objects without scanning */
|
||||
PoolScanMethod scan; /* find references during tracing */
|
||||
PoolFixMethod fix; /* referent reachable during tracing */
|
||||
PoolFixMethod fixEmergency; /* as fix, no failure allowed */
|
||||
PoolReclaimMethod reclaim; /* reclaim dead objects after tracing */
|
||||
PoolRampBeginMethod rampBegin;/* begin a ramp pattern */
|
||||
PoolRampEndMethod rampEnd; /* end a ramp pattern */
|
||||
PoolFramePushMethod framePush; /* push an allocation frame */
|
||||
PoolFramePopMethod framePop; /* pop an allocation frame */
|
||||
PoolWalkMethod walk; /* walk over a segment */
|
||||
PoolFreeWalkMethod freewalk; /* walk over free blocks */
|
||||
PoolBufferClassMethod bufferClass; /* default BufferClass of pool */
|
||||
PoolDebugMixinMethod debugMixin; /* find the debug mixin, if any */
|
||||
|
|
@ -102,9 +94,9 @@ typedef struct mps_pool_s { /* generic structure */
|
|||
RingStruct bufferRing; /* allocation buffers are attached to pool */
|
||||
Serial bufferSerial; /* serial of next buffer */
|
||||
RingStruct segRing; /* segs are attached to pool */
|
||||
Align alignment; /* alignment for units */
|
||||
Format format; /* format only if class->attr&AttrFMT */
|
||||
PoolFixMethod fix; /* fix method */
|
||||
Align alignment; /* alignment for grains */
|
||||
Shift alignShift; /* log2(alignment) */
|
||||
Format format; /* format or NULL */
|
||||
} PoolStruct;
|
||||
|
||||
|
||||
|
|
@ -228,6 +220,15 @@ typedef struct SegClassStruct {
|
|||
SegSetRankSummaryMethod setRankSummary; /* change rank set & summary */
|
||||
SegMergeMethod merge; /* merge two adjacent segments */
|
||||
SegSplitMethod split; /* split a segment into two */
|
||||
SegAccessMethod access; /* handles read/write accesses */
|
||||
SegWhitenMethod whiten; /* whiten objects */
|
||||
SegGreyenMethod greyen; /* greyen non-white objects */
|
||||
SegBlackenMethod blacken; /* blacken grey objects without scanning */
|
||||
SegScanMethod scan; /* find references during tracing */
|
||||
SegFixMethod fix; /* referent reachable during tracing */
|
||||
SegFixMethod fixEmergency; /* as fix, no failure allowed */
|
||||
SegReclaimMethod reclaim; /* reclaim dead objects after tracing */
|
||||
SegWalkMethod walk; /* walk over a segment */
|
||||
Sig sig; /* .class.end-sig */
|
||||
} SegClassStruct;
|
||||
|
||||
|
|
@ -417,8 +418,7 @@ typedef struct ScanStateStruct {
|
|||
Sig sig; /* <design/sig/> */
|
||||
struct mps_ss_s ss_s; /* .ss <http://bash.org/?400459> */
|
||||
Arena arena; /* owning arena */
|
||||
PoolFixMethod fix; /* third stage fix function */
|
||||
void *fixClosure; /* closure data for fix */
|
||||
SegFixMethod fix; /* third stage fix function */
|
||||
TraceSet traces; /* traces to scan for */
|
||||
Rank rank; /* reference rank of scanning */
|
||||
Bool wasMarked; /* design.mps.fix.protocol.was-ready */
|
||||
|
|
@ -449,8 +449,7 @@ typedef struct TraceStruct {
|
|||
TraceState state; /* current state of trace */
|
||||
Rank band; /* current band */
|
||||
Bool firstStretch; /* in first stretch of band (see accessor) */
|
||||
PoolFixMethod fix; /* fix method to apply to references */
|
||||
void *fixClosure; /* closure information for fix method */
|
||||
SegFixMethod fix; /* fix method to apply to references */
|
||||
Chain chain; /* chain being incrementally collected */
|
||||
STATISTIC_DECL(Size preTraceArenaReserved) /* ArenaReserved before this trace */
|
||||
Size condemned; /* condemned bytes */
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* mpmtypes.h: MEMORY POOL MANAGER TYPES
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
* Portions copyright (c) 2001 Global Graphics Software.
|
||||
*
|
||||
* .design: <design/type/>
|
||||
|
|
@ -62,7 +62,7 @@ typedef unsigned TraceSet; /* <design/trace/> */
|
|||
typedef unsigned TraceState; /* <design/trace/> */
|
||||
typedef unsigned AccessSet; /* <design/type/#access-set> */
|
||||
typedef unsigned Attr; /* <design/type/#attr> */
|
||||
typedef int RootVar; /* <design/type/#rootvar> */
|
||||
typedef unsigned RootVar; /* <design/type/#rootvar> */
|
||||
|
||||
typedef Word *BT; /* <design/bt/> */
|
||||
typedef struct BootBlockStruct *BootBlock; /* <code/boot.c> */
|
||||
|
|
@ -86,7 +86,7 @@ typedef struct SegStruct *Seg; /* <code/seg.c> */
|
|||
typedef struct GCSegStruct *GCSeg; /* <code/seg.c> */
|
||||
typedef struct SegClassStruct *SegClass; /* <code/seg.c> */
|
||||
typedef struct LocusPrefStruct *LocusPref; /* <design/locus/>, <code/locus.c> */
|
||||
typedef int LocusPrefKind; /* <design/locus/>, <code/locus.c> */
|
||||
typedef unsigned LocusPrefKind; /* <design/locus/>, <code/locus.c> */
|
||||
typedef struct mps_arena_class_s *ArenaClass; /* <design/arena/> */
|
||||
typedef struct mps_arena_s *Arena; /* <design/arena/> */
|
||||
typedef Arena AbstractArena;
|
||||
|
|
@ -105,6 +105,7 @@ typedef struct LandClassStruct *LandClass; /* <design/land/> */
|
|||
typedef unsigned FindDelete; /* <design/land/> */
|
||||
typedef struct ShieldStruct *Shield; /* design.mps.shield */
|
||||
typedef struct HistoryStruct *History; /* design.mps.arena.ld */
|
||||
typedef struct PoolGenStruct *PoolGen; /* <design/strategy/> */
|
||||
|
||||
|
||||
/* Arena*Method -- see <code/mpmst.h#ArenaClassStruct> */
|
||||
|
|
@ -164,6 +165,17 @@ typedef Res (*SegMergeMethod)(Seg seg, Seg segHi,
|
|||
Addr base, Addr mid, Addr limit);
|
||||
typedef Res (*SegSplitMethod)(Seg seg, Seg segHi,
|
||||
Addr base, Addr mid, Addr limit);
|
||||
typedef Res (*SegAccessMethod)(Seg seg, Arena arena, Addr addr,
|
||||
AccessSet mode, MutatorContext context);
|
||||
typedef Res (*SegWhitenMethod)(Seg seg, Trace trace);
|
||||
typedef void (*SegGreyenMethod)(Seg seg, Trace trace);
|
||||
typedef void (*SegBlackenMethod)(Seg seg, TraceSet traceSet);
|
||||
typedef Res (*SegScanMethod)(Bool *totalReturn, Seg seg, ScanState ss);
|
||||
typedef Res (*SegFixMethod)(Seg seg, ScanState ss, Ref *refIO);
|
||||
typedef void (*SegReclaimMethod)(Seg seg, Trace trace);
|
||||
typedef void (*SegWalkMethod)(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *v, size_t s);
|
||||
|
||||
|
||||
/* Buffer*Method -- see <design/buffer/> */
|
||||
|
||||
|
|
@ -186,27 +198,17 @@ typedef void (*PoolVarargsMethod)(ArgStruct args[], va_list varargs);
|
|||
typedef Res (*PoolInitMethod)(Pool pool, Arena arena, PoolClass klass, ArgList args);
|
||||
typedef Res (*PoolAllocMethod)(Addr *pReturn, Pool pool, Size size);
|
||||
typedef void (*PoolFreeMethod)(Pool pool, Addr old, Size size);
|
||||
typedef PoolGen (*PoolSegPoolGenMethod)(Pool pool, Seg seg);
|
||||
typedef Res (*PoolBufferFillMethod)(Addr *baseReturn, Addr *limitReturn,
|
||||
Pool pool, Buffer buffer, Size size);
|
||||
typedef void (*PoolBufferEmptyMethod)(Pool pool, Buffer buffer,
|
||||
Addr init, Addr limit);
|
||||
typedef Res (*PoolAccessMethod)(Pool pool, Seg seg, Addr addr,
|
||||
AccessSet mode, MutatorContext context);
|
||||
typedef Res (*PoolWhitenMethod)(Pool pool, Trace trace, Seg seg);
|
||||
typedef void (*PoolGreyMethod)(Pool pool, Trace trace, Seg seg);
|
||||
typedef void (*PoolBlackenMethod)(Pool pool, TraceSet traceSet, Seg seg);
|
||||
typedef Res (*PoolScanMethod)(Bool *totalReturn, ScanState ss,
|
||||
Pool pool, Seg seg);
|
||||
typedef Res (*PoolFixMethod)(Pool pool, ScanState ss, Seg seg, Ref *refIO);
|
||||
typedef void (*PoolReclaimMethod)(Pool pool, Trace trace, Seg seg);
|
||||
typedef void (*PoolRampBeginMethod)(Pool pool, Buffer buf, Bool collectAll);
|
||||
typedef void (*PoolRampEndMethod)(Pool pool, Buffer buf);
|
||||
typedef Res (*PoolFramePushMethod)(AllocFrame *frameReturn,
|
||||
Pool pool, Buffer buf);
|
||||
typedef Res (*PoolFramePopMethod)(Pool pool, Buffer buf,
|
||||
AllocFrame frame);
|
||||
typedef void (*PoolWalkMethod)(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
||||
void *v, size_t s);
|
||||
typedef void (*PoolFreeWalkMethod)(Pool pool, FreeBlockVisitor f, void *p);
|
||||
typedef BufferClass (*PoolBufferClassMethod)(void);
|
||||
typedef PoolDebugMixin (*PoolDebugMixinMethod)(Pool pool);
|
||||
|
|
@ -272,10 +274,9 @@ typedef Res (*LandFindInZonesMethod)(Bool *foundReturn, Range rangeReturn, Range
|
|||
#define TraceSetUNIV ((TraceSet)((1u << TraceLIMIT) - 1))
|
||||
#define RankSetEMPTY BS_EMPTY(RankSet)
|
||||
#define RankSetUNIV ((RankSet)((1u << RankLIMIT) - 1))
|
||||
#define AttrFMT ((Attr)(1<<0)) /* <design/type/#attr> */
|
||||
#define AttrGC ((Attr)(1<<1))
|
||||
#define AttrMOVINGGC ((Attr)(1<<2))
|
||||
#define AttrMASK (AttrFMT | AttrGC | AttrMOVINGGC)
|
||||
#define AttrGC ((Attr)(1<<0))
|
||||
#define AttrMOVINGGC ((Attr)(1<<1))
|
||||
#define AttrMASK (AttrGC | AttrMOVINGGC)
|
||||
|
||||
|
||||
/* Locus preferences */
|
||||
|
|
@ -426,7 +427,7 @@ typedef double WriteFD;
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -2,12 +2,16 @@
|
|||
*
|
||||
* $Id$
|
||||
*
|
||||
* Copyright (c) 2001 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
#include "mps.h"
|
||||
|
||||
#if !defined(MPS_OS_W3)
|
||||
#error "mpsiw3.c is specific to MPS_OS_W3"
|
||||
#endif
|
||||
|
||||
#include "mps.h"
|
||||
#include "mpswin.h"
|
||||
|
||||
SRCID(mpsiw3, "$Id$");
|
||||
|
|
@ -33,7 +37,7 @@ void mps_SEH_handler(void *p, size_t s)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2002 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
165
mps/code/pool.c
165
mps/code/pool.c
|
|
@ -16,7 +16,7 @@
|
|||
* Pool and PoolClass objects (create, destroy, check, various
|
||||
* accessors, and other miscellaneous functions).
|
||||
* .purpose.dispatch: Dispatch functions that implement the generic
|
||||
* function dispatch mechanism for Pool Classes (PoolAlloc, PoolFix,
|
||||
* function dispatch mechanism for Pool Classes (PoolAlloc, PoolFree,
|
||||
* etc.).
|
||||
*
|
||||
* SOURCES
|
||||
|
|
@ -45,21 +45,13 @@ Bool PoolClassCheck(PoolClass klass)
|
|||
CHECKL(FUNCHECK(klass->init));
|
||||
CHECKL(FUNCHECK(klass->alloc));
|
||||
CHECKL(FUNCHECK(klass->free));
|
||||
CHECKL(FUNCHECK(klass->segPoolGen));
|
||||
CHECKL(FUNCHECK(klass->bufferFill));
|
||||
CHECKL(FUNCHECK(klass->bufferEmpty));
|
||||
CHECKL(FUNCHECK(klass->access));
|
||||
CHECKL(FUNCHECK(klass->whiten));
|
||||
CHECKL(FUNCHECK(klass->grey));
|
||||
CHECKL(FUNCHECK(klass->blacken));
|
||||
CHECKL(FUNCHECK(klass->scan));
|
||||
CHECKL(FUNCHECK(klass->fix));
|
||||
CHECKL(FUNCHECK(klass->fixEmergency));
|
||||
CHECKL(FUNCHECK(klass->reclaim));
|
||||
CHECKL(FUNCHECK(klass->rampBegin));
|
||||
CHECKL(FUNCHECK(klass->rampEnd));
|
||||
CHECKL(FUNCHECK(klass->framePush));
|
||||
CHECKL(FUNCHECK(klass->framePop));
|
||||
CHECKL(FUNCHECK(klass->walk));
|
||||
CHECKL(FUNCHECK(klass->freewalk));
|
||||
CHECKL(FUNCHECK(klass->bufferClass));
|
||||
CHECKL(FUNCHECK(klass->debugMixin));
|
||||
|
|
@ -75,15 +67,6 @@ Bool PoolClassCheck(PoolClass klass)
|
|||
(klass->framePop == PoolNoFramePop));
|
||||
CHECKL((klass->rampBegin == PoolNoRampBegin) ==
|
||||
(klass->rampEnd == PoolNoRampEnd));
|
||||
|
||||
/* Check that pool classes that set attributes also override the
|
||||
methods they imply. */
|
||||
CHECKL(((klass->attr & AttrFMT) == 0) == (klass->walk == PoolNoWalk));
|
||||
if (klass != &CLASS_STATIC(AbstractCollectPool)) {
|
||||
CHECKL(((klass->attr & AttrGC) == 0) == (klass->fix == PoolNoFix));
|
||||
CHECKL(((klass->attr & AttrGC) == 0) == (klass->fixEmergency == PoolNoFix));
|
||||
CHECKL(((klass->attr & AttrGC) == 0) == (klass->reclaim == PoolNoReclaim));
|
||||
}
|
||||
|
||||
CHECKS(PoolClass, klass);
|
||||
return TRUE;
|
||||
|
|
@ -108,9 +91,10 @@ Bool PoolCheck(Pool pool)
|
|||
/* Cannot check pool->bufferSerial */
|
||||
CHECKD_NOSIG(Ring, &pool->segRing);
|
||||
CHECKL(AlignCheck(pool->alignment));
|
||||
/* Normally pool->format iff PoolHasAttr(pool, AttrFMT), but during
|
||||
pool initialization the class may not yet be set. */
|
||||
CHECKL(!PoolHasAttr(pool, AttrFMT) || pool->format != NULL);
|
||||
CHECKL(ShiftCheck(pool->alignShift));
|
||||
CHECKL(pool->alignment == PoolGrainsSize(pool, (Align)1));
|
||||
if (pool->format != NULL)
|
||||
CHECKD(Format, pool->format);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
|
@ -276,145 +260,14 @@ void PoolFree(Pool pool, Addr old, Size size)
|
|||
}
|
||||
|
||||
|
||||
Res PoolAccess(Pool pool, Seg seg, Addr addr,
|
||||
AccessSet mode, MutatorContext context)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVER(SegBase(seg) <= addr);
|
||||
AVER(addr < SegLimit(seg));
|
||||
AVERT(AccessSet, mode);
|
||||
AVERT(MutatorContext, context);
|
||||
/* PoolSegPoolGen -- get pool generation for a segment */
|
||||
|
||||
return Method(Pool, pool, access)(pool, seg, addr, mode, context);
|
||||
}
|
||||
|
||||
|
||||
/* PoolWhiten, PoolGrey, PoolBlacken -- change color of a segment in the pool */
|
||||
|
||||
Res PoolWhiten(Pool pool, Trace trace, Seg seg)
|
||||
PoolGen PoolSegPoolGen(Pool pool, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
AVER(PoolArena(pool) == trace->arena);
|
||||
AVER(SegPool(seg) == pool);
|
||||
return Method(Pool, pool, whiten)(pool, trace, seg);
|
||||
}
|
||||
|
||||
void PoolGrey(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
AVER(pool->arena == trace->arena);
|
||||
AVER(SegPool(seg) == pool);
|
||||
Method(Pool, pool, grey)(pool, trace, seg);
|
||||
}
|
||||
|
||||
void PoolBlacken(Pool pool, TraceSet traceSet, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(TraceSet, traceSet);
|
||||
AVERT(Seg, seg);
|
||||
AVER(SegPool(seg) == pool);
|
||||
Method(Pool, pool, blacken)(pool, traceSet, seg);
|
||||
}
|
||||
|
||||
|
||||
/* PoolScan -- scan a segment in the pool */
|
||||
|
||||
Res PoolScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
||||
{
|
||||
AVER(totalReturn != NULL);
|
||||
AVERT(ScanState, ss);
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVER(ss->arena == pool->arena);
|
||||
|
||||
/* The segment must belong to the pool. */
|
||||
AVER(pool == SegPool(seg));
|
||||
|
||||
/* We check that either ss->rank is in the segment's
|
||||
* ranks, or that ss->rank is exact. The check is more complicated if
|
||||
* we actually have multiple ranks in a seg.
|
||||
* See <code/trace.c#scan.conservative> */
|
||||
AVER(ss->rank == RankEXACT || RankSetIsMember(SegRankSet(seg), ss->rank));
|
||||
|
||||
/* Should only scan segments which contain grey objects. */
|
||||
AVER(TraceSetInter(SegGrey(seg), ss->traces) != TraceSetEMPTY);
|
||||
|
||||
return Method(Pool, pool, scan)(totalReturn, ss, pool, seg);
|
||||
}
|
||||
|
||||
|
||||
/* PoolFix* -- fix a reference to an object in this pool
|
||||
*
|
||||
* See <design/pool/#req.fix>.
|
||||
*/
|
||||
|
||||
Res PoolFix(Pool pool, ScanState ss, Seg seg, Addr *refIO)
|
||||
{
|
||||
AVERT_CRITICAL(Pool, pool);
|
||||
AVERT_CRITICAL(ScanState, ss);
|
||||
AVERT_CRITICAL(Seg, seg);
|
||||
AVER_CRITICAL(pool == SegPool(seg));
|
||||
AVER_CRITICAL(refIO != NULL);
|
||||
|
||||
/* Should only be fixing references to white segments. */
|
||||
AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY);
|
||||
|
||||
return pool->fix(pool, ss, seg, refIO);
|
||||
}
|
||||
|
||||
Res PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO)
|
||||
{
|
||||
Res res;
|
||||
|
||||
AVERT_CRITICAL(Pool, pool);
|
||||
AVERT_CRITICAL(ScanState, ss);
|
||||
AVERT_CRITICAL(Seg, seg);
|
||||
AVER_CRITICAL(pool == SegPool(seg));
|
||||
AVER_CRITICAL(refIO != NULL);
|
||||
|
||||
/* Should only be fixing references to white segments. */
|
||||
AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY);
|
||||
|
||||
res = Method(Pool, pool, fixEmergency)(pool, ss, seg, refIO);
|
||||
AVER_CRITICAL(res == ResOK);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/* PoolReclaim -- reclaim a segment in the pool */
|
||||
|
||||
void PoolReclaim(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
AVERT_CRITICAL(Pool, pool);
|
||||
AVERT_CRITICAL(Trace, trace);
|
||||
AVERT_CRITICAL(Seg, seg);
|
||||
AVER_CRITICAL(pool->arena == trace->arena);
|
||||
AVER_CRITICAL(SegPool(seg) == pool);
|
||||
|
||||
/* There shouldn't be any grey things left for this trace. */
|
||||
AVER_CRITICAL(!TraceSetIsMember(SegGrey(seg), trace));
|
||||
/* Should only be reclaiming segments which are still white. */
|
||||
AVER_CRITICAL(TraceSetIsMember(SegWhite(seg), trace));
|
||||
|
||||
Method(Pool, pool, reclaim)(pool, trace, seg);
|
||||
}
|
||||
|
||||
|
||||
/* PoolWalk -- walk objects in this segment */
|
||||
|
||||
void PoolWalk(Pool pool, Seg seg, FormattedObjectsVisitor f, void *p, size_t s)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVER(FUNCHECK(f));
|
||||
/* p and s are arbitrary values, hence can't be checked. */
|
||||
|
||||
Method(Pool, pool, walk)(pool, seg, f, p, s);
|
||||
return Method(Pool, pool, segPoolGen)(pool, seg);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -20,8 +20,7 @@
|
|||
* AbstractPoolClass - implements init, finish, describe
|
||||
* AbstractBufferPoolClass - implements the buffer protocol
|
||||
* AbstractSegBufPoolClass - uses SegBuf buffer class
|
||||
* AbstractScanPoolClass - implements basic scanning
|
||||
* AbstractCollectPoolClass - implements basic GC
|
||||
* AbstractCollectPoolClass - implements basic GC
|
||||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
|
|
@ -54,44 +53,12 @@ void PoolClassMixInBuffer(PoolClass klass)
|
|||
}
|
||||
|
||||
|
||||
/* PoolClassMixInScan -- mix in the protocol for scanning */
|
||||
|
||||
void PoolClassMixInScan(PoolClass klass)
|
||||
{
|
||||
/* Can't check klass because it's not initialized yet */
|
||||
klass->access = PoolSegAccess;
|
||||
klass->blacken = PoolTrivBlacken;
|
||||
klass->grey = PoolTrivGrey;
|
||||
/* scan is part of the scanning protocol, but there is no useful
|
||||
default method */
|
||||
klass->scan = PoolNoScan;
|
||||
}
|
||||
|
||||
|
||||
/* PoolClassMixInFormat -- mix in the protocol for formatted pools */
|
||||
|
||||
void PoolClassMixInFormat(PoolClass klass)
|
||||
{
|
||||
/* Can't check klass because it's not initialized yet */
|
||||
klass->attr |= AttrFMT;
|
||||
/* walk is part of the format protocol, but there is no useful
|
||||
default method */
|
||||
klass->walk = PoolNoWalk;
|
||||
}
|
||||
|
||||
|
||||
/* PoolClassMixInCollect -- mix in the protocol for GC */
|
||||
|
||||
void PoolClassMixInCollect(PoolClass klass)
|
||||
{
|
||||
/* Can't check klass because it's not initialized yet */
|
||||
klass->attr |= AttrGC;
|
||||
klass->whiten = PoolTrivWhiten;
|
||||
/* fix, fixEmergency and reclaim are part of the collection
|
||||
protocol, but there are no useful default methods for them */
|
||||
klass->fix = PoolNoFix;
|
||||
klass->fixEmergency = PoolNoFix;
|
||||
klass->reclaim = PoolNoReclaim;
|
||||
klass->rampBegin = PoolTrivRampBegin;
|
||||
klass->rampEnd = PoolTrivRampEnd;
|
||||
}
|
||||
|
|
@ -102,8 +69,6 @@ void PoolClassMixInCollect(PoolClass klass)
|
|||
|
||||
/* PoolAbsInit -- initialize an abstract pool instance */
|
||||
|
||||
static Res PoolAutoSetFix(Pool pool, ScanState ss, Seg seg, Ref *refIO);
|
||||
|
||||
Res PoolAbsInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
|
||||
{
|
||||
ArgStruct arg;
|
||||
|
|
@ -122,8 +87,8 @@ Res PoolAbsInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
|
|||
RingInit(&pool->segRing);
|
||||
pool->bufferSerial = (Serial)0;
|
||||
pool->alignment = MPS_PF_ALIGN;
|
||||
pool->alignShift = SizeLog2(pool->alignment);
|
||||
pool->format = NULL;
|
||||
pool->fix = PoolAutoSetFix;
|
||||
|
||||
if (ArgPick(&arg, args, MPS_KEY_FORMAT)) {
|
||||
Format format = arg.val.format;
|
||||
|
|
@ -182,6 +147,7 @@ void PoolAbsFinish(Inst inst)
|
|||
DEFINE_CLASS(Inst, PoolClass, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, PoolClass, InstClass);
|
||||
AVERT(InstClass, klass);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Pool, AbstractPool, klass)
|
||||
|
|
@ -197,65 +163,39 @@ DEFINE_CLASS(Pool, AbstractPool, klass)
|
|||
klass->free = PoolNoFree;
|
||||
klass->bufferFill = PoolNoBufferFill;
|
||||
klass->bufferEmpty = PoolNoBufferEmpty;
|
||||
klass->access = PoolNoAccess;
|
||||
klass->whiten = PoolNoWhiten;
|
||||
klass->grey = PoolNoGrey;
|
||||
klass->blacken = PoolNoBlacken;
|
||||
klass->scan = PoolNoScan;
|
||||
klass->fix = PoolNoFix;
|
||||
klass->fixEmergency = PoolNoFix;
|
||||
klass->reclaim = PoolNoReclaim;
|
||||
klass->rampBegin = PoolNoRampBegin;
|
||||
klass->rampEnd = PoolNoRampEnd;
|
||||
klass->framePush = PoolNoFramePush;
|
||||
klass->framePop = PoolNoFramePop;
|
||||
klass->walk = PoolNoWalk;
|
||||
klass->segPoolGen = PoolNoSegPoolGen;
|
||||
klass->freewalk = PoolTrivFreeWalk;
|
||||
klass->bufferClass = PoolNoBufferClass;
|
||||
klass->debugMixin = PoolNoDebugMixin;
|
||||
klass->totalSize = PoolNoSize;
|
||||
klass->freeSize = PoolNoSize;
|
||||
klass->sig = PoolClassSig;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Pool, AbstractBufferPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, AbstractBufferPool, AbstractPool);
|
||||
PoolClassMixInBuffer(klass);
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Pool, AbstractSegBufPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, AbstractSegBufPool, AbstractBufferPool);
|
||||
klass->bufferClass = SegBufClassGet;
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Pool, AbstractScanPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, AbstractScanPool, AbstractSegBufPool);
|
||||
PoolClassMixInScan(klass);
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Pool, AbstractCollectPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, AbstractCollectPool, AbstractScanPool);
|
||||
INHERIT_CLASS(klass, AbstractCollectPool, AbstractSegBufPool);
|
||||
PoolClassMixInCollect(klass);
|
||||
}
|
||||
|
||||
|
||||
/* PoolAutoSetFix -- set fix method on first call
|
||||
*
|
||||
* The pool structure has a shortcut to the class fix method to avoid
|
||||
* an indirection on the critical path. This is the default value of
|
||||
* that shortcut, which replaces itself on the first call. This
|
||||
* avoids some tricky initialization.
|
||||
*/
|
||||
|
||||
static Res PoolAutoSetFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
||||
{
|
||||
AVERC(AbstractCollectPool, pool);
|
||||
pool->fix = ClassOfPoly(Pool, pool)->fix;
|
||||
return pool->fix(pool, ss, seg, refIO);
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -297,6 +237,14 @@ void PoolTrivFree(Pool pool, Addr old, Size size)
|
|||
NOOP; /* trivial free has no effect */
|
||||
}
|
||||
|
||||
PoolGen PoolNoSegPoolGen(Pool pool, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVER(pool == SegPool(seg));
|
||||
NOTREACHED;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Res PoolNoBufferFill(Addr *baseReturn, Addr *limitReturn,
|
||||
Pool pool, Buffer buffer, Size size)
|
||||
|
|
@ -373,6 +321,7 @@ Res PoolAbsDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
|
|||
"arena $P ($U)\n",
|
||||
(WriteFP)pool->arena, (WriteFU)pool->arena->serial,
|
||||
"alignment $W\n", (WriteFW)pool->alignment,
|
||||
"alignShift $W\n", (WriteFW)pool->alignShift,
|
||||
NULL);
|
||||
if (res != ResOK)
|
||||
return res;
|
||||
|
|
@ -411,224 +360,6 @@ Res PoolTrivTraceBegin(Pool pool, Trace trace)
|
|||
return ResOK;
|
||||
}
|
||||
|
||||
/* NoAccess
|
||||
*
|
||||
* Should be used (for the access method) by Pool Classes which do
|
||||
* not expect to ever have pages which the mutator will fault on.
|
||||
* That is, no protected pages, or only pages which are inaccessible
|
||||
* by the mutator are protected.
|
||||
*/
|
||||
Res PoolNoAccess(Pool pool, Seg seg, Addr addr,
|
||||
AccessSet mode, MutatorContext context)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVER(SegBase(seg) <= addr);
|
||||
AVER(addr < SegLimit(seg));
|
||||
AVERT(AccessSet, mode);
|
||||
AVERT(MutatorContext, context);
|
||||
UNUSED(mode);
|
||||
UNUSED(context);
|
||||
|
||||
NOTREACHED;
|
||||
return ResUNIMPL;
|
||||
}
|
||||
|
||||
|
||||
/* SegAccess
|
||||
*
|
||||
* See also PoolSingleAccess
|
||||
*
|
||||
* Should be used (for the access method) by Pool Classes which intend
|
||||
* to handle page faults by scanning the entire segment and lowering
|
||||
* the barrier.
|
||||
*/
|
||||
Res PoolSegAccess(Pool pool, Seg seg, Addr addr,
|
||||
AccessSet mode, MutatorContext context)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVER(SegBase(seg) <= addr);
|
||||
AVER(addr < SegLimit(seg));
|
||||
AVER(SegPool(seg) == pool);
|
||||
AVERT(AccessSet, mode);
|
||||
AVERT(MutatorContext, context);
|
||||
|
||||
UNUSED(addr);
|
||||
UNUSED(context);
|
||||
TraceSegAccess(PoolArena(pool), seg, mode);
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
|
||||
/* SingleAccess
|
||||
*
|
||||
* See also ArenaRead, and PoolSegAccess.
|
||||
*
|
||||
* Handles page faults by attempting emulation. If the faulting
|
||||
* instruction cannot be emulated then this function returns ResFAIL.
|
||||
*
|
||||
* Due to the assumptions made below, pool classes should only use
|
||||
* this function if all words in an object are tagged or traceable.
|
||||
*
|
||||
* .single-access.assume.ref: It currently assumes that the address
|
||||
* being faulted on contains a plain reference or a tagged non-reference.
|
||||
* .single-access.improve.format: Later this will be abstracted
|
||||
* through the cleint object format interface, so that
|
||||
* no such assumption is necessary.
|
||||
*/
|
||||
Res PoolSingleAccess(Pool pool, Seg seg, Addr addr,
|
||||
AccessSet mode, MutatorContext context)
|
||||
{
|
||||
Arena arena;
|
||||
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVER(SegBase(seg) <= addr);
|
||||
AVER(addr < SegLimit(seg));
|
||||
AVER(SegPool(seg) == pool);
|
||||
AVERT(AccessSet, mode);
|
||||
AVERT(MutatorContext, context);
|
||||
|
||||
arena = PoolArena(pool);
|
||||
|
||||
if (MutatorContextCanStepInstruction(context)) {
|
||||
Ref ref;
|
||||
Res res;
|
||||
|
||||
ShieldExpose(arena, seg);
|
||||
|
||||
if(mode & SegSM(seg) & AccessREAD) {
|
||||
/* Read access. */
|
||||
/* .single-access.assume.ref */
|
||||
/* .single-access.improve.format */
|
||||
ref = *(Ref *)addr;
|
||||
/* .tagging: Check that the reference is aligned to a word boundary */
|
||||
/* (we assume it is not a reference otherwise). */
|
||||
if(WordIsAligned((Word)ref, sizeof(Word))) {
|
||||
Rank rank;
|
||||
/* See the note in TraceRankForAccess */
|
||||
/* (<code/trace.c#scan.conservative>). */
|
||||
|
||||
rank = TraceRankForAccess(arena, seg);
|
||||
TraceScanSingleRef(arena->flippedTraces, rank, arena,
|
||||
seg, (Ref *)addr);
|
||||
}
|
||||
}
|
||||
res = MutatorContextStepInstruction(context);
|
||||
AVER(res == ResOK);
|
||||
|
||||
/* Update SegSummary according to the possibly changed reference. */
|
||||
ref = *(Ref *)addr;
|
||||
/* .tagging: ought to check the reference for a tag. But
|
||||
* this is conservative. */
|
||||
SegSetSummary(seg, RefSetAdd(arena, SegSummary(seg), ref));
|
||||
|
||||
ShieldCover(arena, seg);
|
||||
|
||||
return ResOK;
|
||||
} else {
|
||||
/* couldn't single-step instruction */
|
||||
return ResFAIL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Res PoolTrivWhiten(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
|
||||
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
|
||||
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
Res PoolNoWhiten(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
NOTREACHED;
|
||||
return ResUNIMPL;
|
||||
}
|
||||
|
||||
|
||||
void PoolNoGrey(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
NOTREACHED;
|
||||
}
|
||||
|
||||
void PoolTrivGrey(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
|
||||
/* If we had a (partially) white seg, then other parts of the */
|
||||
/* same seg might need to get greyed. In fact, all current pools */
|
||||
/* only ever Whiten a whole seg, so we never need to Greyen any */
|
||||
/* part of an already Whitened seg. So we hereby exclude white */
|
||||
/* segs. */
|
||||
/* @@@@ This should not really be called 'trivial'! */
|
||||
if(!TraceSetIsMember(SegWhite(seg), trace))
|
||||
SegSetGrey(seg, TraceSetSingle(trace));
|
||||
}
|
||||
|
||||
|
||||
void PoolNoBlacken(Pool pool, TraceSet traceSet, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(TraceSet, traceSet);
|
||||
AVERT(Seg, seg);
|
||||
NOTREACHED;
|
||||
}
|
||||
|
||||
void PoolTrivBlacken(Pool pool, TraceSet traceSet, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(TraceSet, traceSet);
|
||||
AVERT(Seg, seg);
|
||||
|
||||
/* The trivial blacken method does nothing; for pool classes which do */
|
||||
/* not keep additional colour information. */
|
||||
NOOP;
|
||||
}
|
||||
|
||||
|
||||
Res PoolNoScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
||||
{
|
||||
AVER(totalReturn != NULL);
|
||||
AVERT(ScanState, ss);
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
NOTREACHED;
|
||||
return ResUNIMPL;
|
||||
}
|
||||
|
||||
Res PoolNoFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(ScanState, ss);
|
||||
AVERT(Seg, seg);
|
||||
AVER(refIO != NULL);
|
||||
NOTREACHED;
|
||||
return ResUNIMPL;
|
||||
}
|
||||
|
||||
void PoolNoReclaim(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
NOTREACHED;
|
||||
}
|
||||
|
||||
|
||||
void PoolNoRampBegin(Pool pool, Buffer buf, Bool collectAll)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
|
|
@ -701,20 +432,6 @@ Res PoolTrivFramePop(Pool pool, Buffer buf, AllocFrame frame)
|
|||
}
|
||||
|
||||
|
||||
void PoolNoWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVER(FUNCHECK(f));
|
||||
/* p and s are arbitrary, hence can't be checked */
|
||||
UNUSED(p);
|
||||
UNUSED(s);
|
||||
|
||||
NOTREACHED;
|
||||
}
|
||||
|
||||
|
||||
void PoolTrivFreeWalk(Pool pool, FreeBlockVisitor f, void *p)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
|
|
|
|||
|
|
@ -23,10 +23,16 @@ typedef Bool (*amcPinnedFunction)(AMC amc, Nailboard board, Addr base, Addr limi
|
|||
|
||||
/* forward declarations */
|
||||
|
||||
static Res amcSegWhiten(Seg seg, Trace trace);
|
||||
static Res amcSegScan(Bool *totalReturn, Seg seg, ScanState ss);
|
||||
static void amcSegReclaim(Seg seg, Trace trace);
|
||||
static Bool amcSegHasNailboard(Seg seg);
|
||||
static Nailboard amcSegNailboard(Seg seg);
|
||||
static Bool AMCCheck(AMC amc);
|
||||
static Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO);
|
||||
static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO);
|
||||
static Res amcSegFixEmergency(Seg seg, ScanState ss, Ref *refIO);
|
||||
static void amcSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s);
|
||||
|
||||
/* local class declations */
|
||||
|
||||
|
|
@ -162,6 +168,20 @@ static Res AMCSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
|
|||
}
|
||||
|
||||
|
||||
/* amcSegFinish -- finish an AMC segment */
|
||||
|
||||
static void amcSegFinish(Inst inst)
|
||||
{
|
||||
Seg seg = MustBeA(Seg, inst);
|
||||
amcSeg amcseg = MustBeA(amcSeg, seg);
|
||||
|
||||
amcseg->sig = SigInvalid;
|
||||
|
||||
/* finish the superclass fields last */
|
||||
NextMethod(Inst, amcSeg, finish)(inst);
|
||||
}
|
||||
|
||||
|
||||
/* AMCSegSketch -- summarise the segment state for a human reader
|
||||
*
|
||||
* Write a short human-readable text representation of the segment
|
||||
|
|
@ -334,8 +354,16 @@ DEFINE_CLASS(Seg, amcSeg, klass)
|
|||
INHERIT_CLASS(klass, amcSeg, GCSeg);
|
||||
SegClassMixInNoSplitMerge(klass); /* no support for this (yet) */
|
||||
klass->instClassStruct.describe = AMCSegDescribe;
|
||||
klass->instClassStruct.finish = amcSegFinish;
|
||||
klass->size = sizeof(amcSegStruct);
|
||||
klass->init = AMCSegInit;
|
||||
klass->whiten = amcSegWhiten;
|
||||
klass->scan = amcSegScan;
|
||||
klass->fix = amcSegFix;
|
||||
klass->fixEmergency = amcSegFixEmergency;
|
||||
klass->reclaim = amcSegReclaim;
|
||||
klass->walk = amcSegWalk;
|
||||
AVERT(SegClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -527,6 +555,7 @@ DEFINE_CLASS(Buffer, amcBuf, klass)
|
|||
klass->instClassStruct.finish = AMCBufFinish;
|
||||
klass->size = sizeof(amcBufStruct);
|
||||
klass->init = AMCBufInit;
|
||||
AVERT(BufferClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -622,9 +651,10 @@ static Res amcGenDescribe(amcGen gen, mps_lib_FILE *stream, Count depth)
|
|||
|
||||
/* amcSegCreateNailboard -- create nailboard for segment */
|
||||
|
||||
static Res amcSegCreateNailboard(Seg seg, Pool pool)
|
||||
static Res amcSegCreateNailboard(Seg seg)
|
||||
{
|
||||
amcSeg amcseg = MustBeA(amcSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
Nailboard board;
|
||||
Arena arena;
|
||||
Res res;
|
||||
|
|
@ -731,6 +761,7 @@ static Res amcInitComm(Pool pool, Arena arena, PoolClass klass,
|
|||
AVER(pool->format != NULL);
|
||||
|
||||
pool->alignment = pool->format->alignment;
|
||||
pool->alignShift = SizeLog2(pool->alignment);
|
||||
amc->rankSet = rankSet;
|
||||
|
||||
RingInit(&amc->genRing);
|
||||
|
|
@ -1101,18 +1132,30 @@ static void AMCRampEnd(Pool pool, Buffer buf)
|
|||
}
|
||||
|
||||
|
||||
/* AMCWhiten -- condemn the segment for the trace
|
||||
/* amcSegPoolGen -- get pool generation for a segment */
|
||||
|
||||
static PoolGen amcSegPoolGen(Pool pool, Seg seg)
|
||||
{
|
||||
amcSeg amcseg = MustBeA(amcSeg, seg);
|
||||
AVERT(Pool, pool);
|
||||
AVER(pool == SegPool(seg));
|
||||
return &amcseg->gen->pgen;
|
||||
}
|
||||
|
||||
|
||||
/* amcSegWhiten -- condemn the segment for the trace
|
||||
*
|
||||
* If the segment has a mutator buffer on it, we nail the buffer,
|
||||
* because we can't scan or reclaim uncommitted buffers.
|
||||
*/
|
||||
static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
|
||||
static Res amcSegWhiten(Seg seg, Trace trace)
|
||||
{
|
||||
Size condemned = 0;
|
||||
amcGen gen;
|
||||
AMC amc = MustBeA(AMCZPool, pool);
|
||||
Buffer buffer;
|
||||
amcSeg amcseg = MustBeA(amcSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
AMC amc = MustBeA(AMCZPool, pool);
|
||||
Res res;
|
||||
|
||||
AVERT(Trace, trace);
|
||||
|
|
@ -1141,7 +1184,7 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
|
|||
/* There is an active buffer, make sure it's nailed. */
|
||||
if(!amcSegHasNailboard(seg)) {
|
||||
if(SegNailed(seg) == TraceSetEMPTY) {
|
||||
res = amcSegCreateNailboard(seg, pool);
|
||||
res = amcSegCreateNailboard(seg);
|
||||
if(res != ResOK) {
|
||||
/* Can't create nailboard, don't condemn. */
|
||||
return ResOK;
|
||||
|
|
@ -1170,7 +1213,7 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
|
|||
/* Move the buffer's base up to the scan limit, so that we can
|
||||
* detect allocation that happens during the trace, and
|
||||
* account for it correctly in AMCBufferEmpty and
|
||||
* amcReclaimNailed. */
|
||||
* amcSegReclaimNailed. */
|
||||
buffer->base = bufferScanLimit;
|
||||
/* We didn't condemn the buffer, subtract it from the count. */
|
||||
/* Relies on unsigned arithmetic wrapping round */
|
||||
|
|
@ -1216,16 +1259,15 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
|
|||
}
|
||||
|
||||
|
||||
/* amcScanNailedRange -- make one scanning pass over a range of
|
||||
/* amcSegScanNailedRange -- make one scanning pass over a range of
|
||||
* addresses in a nailed segment.
|
||||
*
|
||||
* *totalReturn is set to FALSE if not all the objects between base and
|
||||
* limit have been scanned. It is not touched otherwise.
|
||||
*/
|
||||
static Res amcScanNailedRange(Bool *totalReturn, Bool *moreReturn,
|
||||
ScanState ss,
|
||||
AMC amc, Nailboard board,
|
||||
Addr base, Addr limit)
|
||||
static Res amcSegScanNailedRange(Bool *totalReturn, Bool *moreReturn,
|
||||
ScanState ss, AMC amc, Nailboard board,
|
||||
Addr base, Addr limit)
|
||||
{
|
||||
Format format;
|
||||
Size headerSize;
|
||||
|
|
@ -1256,7 +1298,7 @@ static Res amcScanNailedRange(Bool *totalReturn, Bool *moreReturn,
|
|||
}
|
||||
|
||||
|
||||
/* amcScanNailedOnce -- make one scanning pass over a nailed segment
|
||||
/* amcSegScanNailedOnce -- make one scanning pass over a nailed segment
|
||||
*
|
||||
* *totalReturn is set to TRUE iff all objects in segment scanned.
|
||||
* *moreReturn is set to FALSE only if there are no more objects
|
||||
|
|
@ -1265,8 +1307,8 @@ static Res amcScanNailedRange(Bool *totalReturn, Bool *moreReturn,
|
|||
* also if during emergency fixing any new marks got added to the
|
||||
* nailboard.
|
||||
*/
|
||||
static Res amcScanNailedOnce(Bool *totalReturn, Bool *moreReturn,
|
||||
ScanState ss, Seg seg, AMC amc)
|
||||
static Res amcSegScanNailedOnce(Bool *totalReturn, Bool *moreReturn,
|
||||
ScanState ss, Seg seg, AMC amc)
|
||||
{
|
||||
Addr p, limit;
|
||||
Nailboard board;
|
||||
|
|
@ -1286,8 +1328,8 @@ static Res amcScanNailedOnce(Bool *totalReturn, Bool *moreReturn,
|
|||
AVER(p == limit);
|
||||
goto returnGood;
|
||||
}
|
||||
res = amcScanNailedRange(totalReturn, moreReturn,
|
||||
ss, amc, board, p, limit);
|
||||
res = amcSegScanNailedRange(totalReturn, moreReturn,
|
||||
ss, amc, board, p, limit);
|
||||
if (res != ResOK)
|
||||
return res;
|
||||
p = limit;
|
||||
|
|
@ -1295,8 +1337,8 @@ static Res amcScanNailedOnce(Bool *totalReturn, Bool *moreReturn,
|
|||
|
||||
limit = SegLimit(seg);
|
||||
/* @@@@ Shouldn't p be set to BufferLimit here?! */
|
||||
res = amcScanNailedRange(totalReturn, moreReturn,
|
||||
ss, amc, board, p, limit);
|
||||
res = amcSegScanNailedRange(totalReturn, moreReturn,
|
||||
ss, amc, board, p, limit);
|
||||
if (res != ResOK)
|
||||
return res;
|
||||
|
||||
|
|
@ -1308,17 +1350,17 @@ returnGood:
|
|||
}
|
||||
|
||||
|
||||
/* amcScanNailed -- scan a nailed segment */
|
||||
/* amcSegScanNailed -- scan a nailed segment */
|
||||
|
||||
static Res amcScanNailed(Bool *totalReturn, ScanState ss, Pool pool,
|
||||
Seg seg, AMC amc)
|
||||
static Res amcSegScanNailed(Bool *totalReturn, ScanState ss, Pool pool,
|
||||
Seg seg, AMC amc)
|
||||
{
|
||||
Bool total, moreScanning;
|
||||
size_t loops = 0;
|
||||
|
||||
do {
|
||||
Res res;
|
||||
res = amcScanNailedOnce(&total, &moreScanning, ss, seg, amc);
|
||||
res = amcSegScanNailedOnce(&total, &moreScanning, ss, seg, amc);
|
||||
if(res != ResOK) {
|
||||
*totalReturn = FALSE;
|
||||
return res;
|
||||
|
|
@ -1354,27 +1396,29 @@ static Res amcScanNailed(Bool *totalReturn, ScanState ss, Pool pool,
|
|||
}
|
||||
|
||||
|
||||
/* AMCScan -- scan a single seg, turning it black
|
||||
/* amcSegScan -- scan a single seg, turning it black
|
||||
*
|
||||
* See <design/poolamc/#seg-scan>.
|
||||
*/
|
||||
static Res AMCScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
||||
static Res amcSegScan(Bool *totalReturn, Seg seg, ScanState ss)
|
||||
{
|
||||
Addr base, limit;
|
||||
Format format;
|
||||
AMC amc = MustBeA(AMCZPool, pool);
|
||||
Pool pool;
|
||||
AMC amc;
|
||||
Res res;
|
||||
Buffer buffer;
|
||||
|
||||
AVER(totalReturn != NULL);
|
||||
AVERT(ScanState, ss);
|
||||
AVERT(Seg, seg);
|
||||
AVERT(ScanState, ss);
|
||||
|
||||
|
||||
pool = SegPool(seg);
|
||||
amc = MustBeA(AMCZPool, pool);
|
||||
format = pool->format;
|
||||
|
||||
if(amcSegHasNailboard(seg)) {
|
||||
return amcScanNailed(totalReturn, ss, pool, seg, amc);
|
||||
return amcSegScanNailed(totalReturn, ss, pool, seg, amc);
|
||||
}
|
||||
|
||||
EVENT3(AMCScanBegin, amc, seg, ss);
|
||||
|
|
@ -1418,7 +1462,7 @@ static Res AMCScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
}
|
||||
|
||||
|
||||
/* amcFixInPlace -- fix an reference without moving the object
|
||||
/* amcSegFixInPlace -- fix a reference without moving the object
|
||||
*
|
||||
* Usually this function is used for ambiguous references, but during
|
||||
* emergency tracing may be used for references of any rank.
|
||||
|
|
@ -1426,12 +1470,10 @@ static Res AMCScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
* If the segment has a nailboard then we use that to record the fix.
|
||||
* Otherwise we simply grey and nail the entire segment.
|
||||
*/
|
||||
static void amcFixInPlace(Pool pool, Seg seg, ScanState ss, Ref *refIO)
|
||||
static void amcSegFixInPlace(Seg seg, ScanState ss, Ref *refIO)
|
||||
{
|
||||
Addr ref;
|
||||
|
||||
UNUSED(pool);
|
||||
|
||||
ref = (Addr)*refIO;
|
||||
/* An ambiguous reference can point before the header. */
|
||||
AVER(SegBase(seg) <= ref);
|
||||
|
|
@ -1459,25 +1501,23 @@ static void amcFixInPlace(Pool pool, Seg seg, ScanState ss, Ref *refIO)
|
|||
}
|
||||
|
||||
|
||||
/* AMCFixEmergency -- fix a reference, without allocating
|
||||
/* amcSegFixEmergency -- fix a reference, without allocating
|
||||
*
|
||||
* See <design/poolamc/#emergency.fix>.
|
||||
*/
|
||||
static Res AMCFixEmergency(Pool pool, ScanState ss, Seg seg,
|
||||
Ref *refIO)
|
||||
static Res amcSegFixEmergency(Seg seg, ScanState ss, Ref *refIO)
|
||||
{
|
||||
Arena arena;
|
||||
Addr newRef;
|
||||
Pool pool;
|
||||
|
||||
AVERC(AMCZPool, pool);
|
||||
AVERT(ScanState, ss);
|
||||
AVERT(Seg, seg);
|
||||
AVERT(ScanState, ss);
|
||||
AVER(refIO != NULL);
|
||||
|
||||
pool = SegPool(seg);
|
||||
arena = PoolArena(pool);
|
||||
|
||||
ss->wasMarked = TRUE;
|
||||
|
||||
if(ss->rank == RankAMBIG)
|
||||
goto fixInPlace;
|
||||
|
||||
|
|
@ -1494,18 +1534,19 @@ static Res AMCFixEmergency(Pool pool, ScanState ss, Seg seg,
|
|||
}
|
||||
|
||||
fixInPlace: /* see <design/poolamc/>.Nailboard.emergency */
|
||||
amcFixInPlace(pool, seg, ss, refIO);
|
||||
amcSegFixInPlace(seg, ss, refIO);
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
|
||||
/* AMCFix -- fix a reference to the pool
|
||||
/* amcSegFix -- fix a reference to the segment
|
||||
*
|
||||
* See <design/poolamc/#fix>.
|
||||
*/
|
||||
static Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
||||
static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO)
|
||||
{
|
||||
Arena arena;
|
||||
Pool pool;
|
||||
AMC amc;
|
||||
Res res;
|
||||
Format format; /* cache of pool->format */
|
||||
|
|
@ -1523,16 +1564,11 @@ static Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
Trace trace;
|
||||
|
||||
/* <design/trace/#fix.noaver> */
|
||||
AVERT_CRITICAL(Pool, pool);
|
||||
AVERT_CRITICAL(ScanState, ss);
|
||||
AVERT_CRITICAL(Seg, seg);
|
||||
AVER_CRITICAL(refIO != NULL);
|
||||
EVENT0(AMCFix);
|
||||
|
||||
/* For the moment, assume that the object was already marked. */
|
||||
/* (See <design/fix/#protocol.was-marked>.) */
|
||||
ss->wasMarked = TRUE;
|
||||
|
||||
/* If the reference is ambiguous, set up the datastructures for */
|
||||
/* managing a nailed segment. This involves marking the segment */
|
||||
/* as nailed, and setting up a per-word mark table */
|
||||
|
|
@ -1542,20 +1578,21 @@ static Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
/* rather than "!amcSegHasNailboard(seg)" because this avoids */
|
||||
/* setting up a new nailboard when the segment was nailed, but */
|
||||
/* had no nailboard. This must be avoided because otherwise */
|
||||
/* assumptions in AMCFixEmergency will be wrong (essentially */
|
||||
/* assumptions in amcSegFixEmergency will be wrong (essentially */
|
||||
/* we will lose some pointer fixes because we introduced a */
|
||||
/* nailboard). */
|
||||
if(SegNailed(seg) == TraceSetEMPTY) {
|
||||
res = amcSegCreateNailboard(seg, pool);
|
||||
res = amcSegCreateNailboard(seg);
|
||||
if(res != ResOK)
|
||||
return res;
|
||||
STATISTIC(++ss->nailCount);
|
||||
SegSetNailed(seg, TraceSetUnion(SegNailed(seg), ss->traces));
|
||||
}
|
||||
amcFixInPlace(pool, seg, ss, refIO);
|
||||
amcSegFixInPlace(seg, ss, refIO);
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
pool = SegPool(seg);
|
||||
amc = MustBeA_CRITICAL(AMCZPool, pool);
|
||||
AVERT_CRITICAL(AMC, amc);
|
||||
format = pool->format;
|
||||
|
|
@ -1598,8 +1635,7 @@ static Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
/* Object is not preserved yet (neither moved, nor nailed) */
|
||||
/* so should be preserved by forwarding. */
|
||||
|
||||
/* <design/fix/#protocol.was-marked> */
|
||||
ss->wasMarked = FALSE;
|
||||
ss->wasMarked = FALSE; /* <design/fix/#was-marked.not> */
|
||||
|
||||
/* Get the forwarding buffer from the object's generation. */
|
||||
gen = amcSegGen(seg);
|
||||
|
|
@ -1660,9 +1696,9 @@ returnRes:
|
|||
}
|
||||
|
||||
|
||||
/* amcReclaimNailed -- reclaim what you can from a nailed segment */
|
||||
/* amcSegReclaimNailed -- reclaim what you can from a nailed segment */
|
||||
|
||||
static void amcReclaimNailed(Pool pool, Trace trace, Seg seg)
|
||||
static void amcSegReclaimNailed(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
Addr p, limit;
|
||||
Arena arena;
|
||||
|
|
@ -1768,19 +1804,18 @@ static void amcReclaimNailed(Pool pool, Trace trace, Seg seg)
|
|||
}
|
||||
|
||||
|
||||
/* AMCReclaim -- recycle a segment if it is still white
|
||||
/* amcSegReclaim -- recycle a segment if it is still white
|
||||
*
|
||||
* See <design/poolamc/#reclaim>.
|
||||
*/
|
||||
static void AMCReclaim(Pool pool, Trace trace, Seg seg)
|
||||
static void amcSegReclaim(Seg seg, Trace trace)
|
||||
{
|
||||
amcSeg amcseg = MustBeA_CRITICAL(amcSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
AMC amc = MustBeA_CRITICAL(AMCZPool, pool);
|
||||
amcGen gen;
|
||||
amcSeg amcseg;
|
||||
|
||||
AVERT_CRITICAL(Trace, trace);
|
||||
AVERT_CRITICAL(Seg, seg);
|
||||
amcseg = MustBeA_CRITICAL(amcSeg, seg);
|
||||
gen = amcSegGen(seg);
|
||||
AVERT_CRITICAL(amcGen, gen);
|
||||
|
||||
|
|
@ -1798,7 +1833,7 @@ static void AMCReclaim(Pool pool, Trace trace, Seg seg)
|
|||
}
|
||||
|
||||
if(SegNailed(seg) != TraceSetEMPTY) {
|
||||
amcReclaimNailed(pool, trace, seg);
|
||||
amcSegReclaimNailed(pool, trace, seg);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1813,16 +1848,13 @@ static void AMCReclaim(Pool pool, Trace trace, Seg seg)
|
|||
}
|
||||
|
||||
|
||||
/* AMCWalk -- Apply function to (black) objects in segment */
|
||||
/* amcSegWalk -- Apply function to (black) objects in segment */
|
||||
|
||||
static void AMCWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
static void amcSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
{
|
||||
Addr object, nextObject, limit;
|
||||
Format format;
|
||||
|
||||
AVERC(AMCZPool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Format, format);
|
||||
AVER(FUNCHECK(f));
|
||||
/* p and s are arbitrary closures so can't be checked */
|
||||
|
||||
|
|
@ -1836,15 +1868,16 @@ static void AMCWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
|||
if(SegWhite(seg) == TraceSetEMPTY && SegGrey(seg) == TraceSetEMPTY
|
||||
&& SegNailed(seg) == TraceSetEMPTY)
|
||||
{
|
||||
format = pool->format;
|
||||
Addr object, nextObject, limit;
|
||||
Pool pool = SegPool(seg);
|
||||
|
||||
limit = AddrAdd(SegBufferScanLimit(seg), format->headerSize);
|
||||
object = AddrAdd(SegBase(seg), format->headerSize);
|
||||
while(object < limit) {
|
||||
/* Check not a broken heart. */
|
||||
AVER((*format->isMoved)(object) == NULL);
|
||||
(*f)(object, pool->format, pool, p, s);
|
||||
nextObject = (*pool->format->skip)(object);
|
||||
(*f)(object, format, pool, p, s);
|
||||
nextObject = (*format->skip)(object);
|
||||
AVER(nextObject > object);
|
||||
object = nextObject;
|
||||
}
|
||||
|
|
@ -1859,8 +1892,12 @@ static void amcWalkAll(Pool pool, FormattedObjectsVisitor f, void *p, size_t s)
|
|||
{
|
||||
Arena arena;
|
||||
Ring ring, next, node;
|
||||
Format format = NULL;
|
||||
Bool b;
|
||||
|
||||
AVER(IsA(AMCZPool, pool));
|
||||
b = PoolFormat(&format, pool);
|
||||
AVER(b);
|
||||
|
||||
arena = PoolArena(pool);
|
||||
ring = PoolSegRing(pool);
|
||||
|
|
@ -1869,7 +1906,7 @@ static void amcWalkAll(Pool pool, FormattedObjectsVisitor f, void *p, size_t s)
|
|||
Seg seg = SegOfPoolRing(node);
|
||||
|
||||
ShieldExpose(arena, seg);
|
||||
AMCWalk(pool, seg, f, p, s);
|
||||
amcSegWalk(seg, format, f, p, s);
|
||||
ShieldCover(arena, seg);
|
||||
}
|
||||
}
|
||||
|
|
@ -1976,7 +2013,6 @@ static Res AMCDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
|
|||
DEFINE_CLASS(Pool, AMCZPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, AMCZPool, AbstractSegBufPool);
|
||||
PoolClassMixInFormat(klass);
|
||||
PoolClassMixInCollect(klass);
|
||||
klass->instClassStruct.describe = AMCDescribe;
|
||||
klass->instClassStruct.finish = AMCFinish;
|
||||
|
|
@ -1986,16 +2022,13 @@ DEFINE_CLASS(Pool, AMCZPool, klass)
|
|||
klass->init = AMCZInit;
|
||||
klass->bufferFill = AMCBufferFill;
|
||||
klass->bufferEmpty = AMCBufferEmpty;
|
||||
klass->whiten = AMCWhiten;
|
||||
klass->fix = AMCFix;
|
||||
klass->fixEmergency = AMCFixEmergency;
|
||||
klass->reclaim = AMCReclaim;
|
||||
klass->rampBegin = AMCRampBegin;
|
||||
klass->rampEnd = AMCRampEnd;
|
||||
klass->walk = AMCWalk;
|
||||
klass->segPoolGen = amcSegPoolGen;
|
||||
klass->bufferClass = amcBufClassGet;
|
||||
klass->totalSize = AMCTotalSize;
|
||||
klass->freeSize = AMCFreeSize;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -2004,9 +2037,8 @@ DEFINE_CLASS(Pool, AMCZPool, klass)
|
|||
DEFINE_CLASS(Pool, AMCPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, AMCPool, AMCZPool);
|
||||
PoolClassMixInScan(klass);
|
||||
klass->init = AMCInit;
|
||||
klass->scan = AMCScan;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,14 @@ SRCID(poolams, "$Id$");
|
|||
#define AMSSig ((Sig)0x519A3599) /* SIGnature AMS */
|
||||
#define AMSSegSig ((Sig)0x519A3559) /* SIGnature AMS SeG */
|
||||
|
||||
static void amsSegBlacken(Seg seg, TraceSet traceSet);
|
||||
static Res amsSegWhiten(Seg seg, Trace trace);
|
||||
static Res amsSegScan(Bool *totalReturn, Seg seg, ScanState ss);
|
||||
static Res amsSegFix(Seg seg, ScanState ss, Ref *refIO);
|
||||
static void amsSegReclaim(Seg seg, Trace trace);
|
||||
static void amsSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s);
|
||||
|
||||
|
||||
/* AMSDebugStruct -- structure for a debug subclass */
|
||||
|
||||
|
|
@ -46,13 +54,14 @@ typedef struct AMSDebugStruct *AMSDebug;
|
|||
|
||||
Bool AMSSegCheck(AMSSeg amsseg)
|
||||
{
|
||||
Seg seg = AMSSeg2Seg(amsseg);
|
||||
Seg seg = MustBeA(Seg, amsseg);
|
||||
Pool pool = SegPool(seg);
|
||||
CHECKS(AMSSeg, amsseg);
|
||||
CHECKD(GCSeg, &amsseg->gcSegStruct);
|
||||
CHECKU(AMS, amsseg->ams);
|
||||
CHECKL(AMSPool(amsseg->ams) == SegPool(seg));
|
||||
|
||||
CHECKL(amsseg->grains == AMSGrains(amsseg->ams, SegSize(seg)));
|
||||
CHECKL(amsseg->grains == PoolSizeGrains(pool, SegSize(seg)));
|
||||
CHECKL(amsseg->grains > 0);
|
||||
CHECKL(amsseg->grains == amsseg->freeGrains + amsseg->bufferedGrains
|
||||
+ amsseg->oldGrains + amsseg->newGrains);
|
||||
|
|
@ -105,11 +114,13 @@ void AMSSegFreeWalk(AMSSeg amsseg, FreeBlockVisitor f, void *p)
|
|||
next, amsseg->grains, 1);
|
||||
if (!found)
|
||||
break;
|
||||
(*f)(AMS_INDEX_ADDR(seg, base), AMS_INDEX_ADDR(seg, limit), pool, p);
|
||||
(*f)(PoolAddrOfIndex(SegBase(seg), pool, base),
|
||||
PoolAddrOfIndex(SegBase(seg), pool, limit), pool, p);
|
||||
next = limit + 1;
|
||||
}
|
||||
} else if (amsseg->firstFree < amsseg->grains)
|
||||
(*f)(AMS_INDEX_ADDR(seg, amsseg->firstFree), SegLimit(seg), pool, p);
|
||||
(*f)(PoolAddrOfIndex(SegBase(seg), pool, amsseg->firstFree),
|
||||
SegLimit(seg), pool, p);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -234,7 +245,7 @@ static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
|
|||
arena = PoolArena(pool);
|
||||
/* no useful checks for base and size */
|
||||
|
||||
amsseg->grains = size >> ams->grainShift;
|
||||
amsseg->grains = PoolSizeGrains(pool, size);
|
||||
amsseg->freeGrains = amsseg->grains;
|
||||
amsseg->bufferedGrains = (Count)0;
|
||||
amsseg->newGrains = (Count)0;
|
||||
|
|
@ -316,6 +327,7 @@ static Res AMSSegMerge(Seg seg, Seg segHi,
|
|||
{
|
||||
Count loGrains, hiGrains, allGrains;
|
||||
AMSSeg amsseg, amssegHi;
|
||||
Pool pool;
|
||||
Arena arena;
|
||||
AMS ams;
|
||||
BT allocTable, nongreyTable, nonwhiteTable; /* .table-names */
|
||||
|
|
@ -328,15 +340,16 @@ static Res AMSSegMerge(Seg seg, Seg segHi,
|
|||
AVERT(AMSSeg, amsseg);
|
||||
AVERT(AMSSeg, amssegHi);
|
||||
/* other parameters are checked by next-method */
|
||||
arena = PoolArena(SegPool(seg));
|
||||
ams = PoolAMS(SegPool(seg));
|
||||
pool = SegPool(seg);
|
||||
arena = PoolArena(pool);
|
||||
ams = PoolAMS(pool);
|
||||
|
||||
loGrains = amsseg->grains;
|
||||
hiGrains = amssegHi->grains;
|
||||
allGrains = loGrains + hiGrains;
|
||||
|
||||
/* checks for .grain-align */
|
||||
AVER(allGrains == AddrOffset(base, limit) >> ams->grainShift);
|
||||
AVER(allGrains == PoolSizeGrains(pool, AddrOffset(base, limit)));
|
||||
/* checks for .empty */
|
||||
AVER(amssegHi->freeGrains == hiGrains);
|
||||
AVER(!amssegHi->marksChanged);
|
||||
|
|
@ -397,6 +410,7 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
|
|||
{
|
||||
Count loGrains, hiGrains, allGrains;
|
||||
AMSSeg amsseg, amssegHi;
|
||||
Pool pool;
|
||||
Arena arena;
|
||||
AMS ams;
|
||||
BT allocTableLo, nongreyTableLo, nonwhiteTableLo; /* .table-names */
|
||||
|
|
@ -409,11 +423,12 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
|
|||
amssegHi = Seg2AMSSeg(segHi);
|
||||
AVERT(AMSSeg, amsseg);
|
||||
/* other parameters are checked by next-method */
|
||||
arena = PoolArena(SegPool(seg));
|
||||
ams = PoolAMS(SegPool(seg));
|
||||
pool = SegPool(seg);
|
||||
arena = PoolArena(pool);
|
||||
ams = PoolAMS(pool);
|
||||
|
||||
loGrains = AMSGrains(ams, AddrOffset(base, mid));
|
||||
hiGrains = AMSGrains(ams, AddrOffset(mid, limit));
|
||||
loGrains = PoolSizeGrains(pool, AddrOffset(base, mid));
|
||||
hiGrains = PoolSizeGrains(pool, AddrOffset(mid, limit));
|
||||
allGrains = loGrains + hiGrains;
|
||||
|
||||
/* checks for .grain-align */
|
||||
|
|
@ -496,7 +511,9 @@ failCreateTablesLo:
|
|||
|
||||
#define WRITE_BUFFER_LIMIT(i, accessor, code) \
|
||||
BEGIN \
|
||||
if (hasBuffer && (i) == AMS_ADDR_INDEX(seg, accessor(buffer))) { \
|
||||
if (hasBuffer && \
|
||||
(i) == PoolIndexOfAddr(SegBase(seg), SegPool(seg), accessor(buffer))) \
|
||||
{ \
|
||||
Res _res = WriteF(stream, 0, code, NULL); \
|
||||
if (_res != ResOK) return _res; \
|
||||
} \
|
||||
|
|
@ -605,6 +622,13 @@ DEFINE_CLASS(Seg, AMSSeg, klass)
|
|||
klass->init = AMSSegInit;
|
||||
klass->merge = AMSSegMerge;
|
||||
klass->split = AMSSegSplit;
|
||||
klass->whiten = amsSegWhiten;
|
||||
klass->blacken = amsSegBlacken;
|
||||
klass->scan = amsSegScan;
|
||||
klass->fix = amsSegFix;
|
||||
klass->fixEmergency = amsSegFix;
|
||||
klass->reclaim = amsSegReclaim;
|
||||
klass->walk = amsSegWalk;
|
||||
AVERT(SegClass, klass);
|
||||
}
|
||||
|
||||
|
|
@ -695,6 +719,7 @@ failSize:
|
|||
|
||||
static void AMSSegsDestroy(AMS ams)
|
||||
{
|
||||
Pool pool = AMSPool(ams);
|
||||
Ring ring, node, next; /* for iterating over the segments */
|
||||
|
||||
ring = PoolSegRing(AMSPool(ams));
|
||||
|
|
@ -707,9 +732,9 @@ static void AMSSegsDestroy(AMS ams)
|
|||
AVER(amsseg->bufferedGrains == 0);
|
||||
AMSSegFreeCheck(amsseg);
|
||||
PoolGenFree(ams->pgen, seg,
|
||||
AMSGrainsSize(ams, amsseg->freeGrains),
|
||||
AMSGrainsSize(ams, amsseg->oldGrains),
|
||||
AMSGrainsSize(ams, amsseg->newGrains),
|
||||
PoolGrainsSize(pool, amsseg->freeGrains),
|
||||
PoolGrainsSize(pool, amsseg->oldGrains),
|
||||
PoolGrainsSize(pool, amsseg->newGrains),
|
||||
FALSE);
|
||||
}
|
||||
}
|
||||
|
|
@ -782,7 +807,7 @@ static Res AMSInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
|
|||
/* Ensure a format was supplied in the argument list. */
|
||||
AVER(pool->format != NULL);
|
||||
pool->alignment = pool->format->alignment;
|
||||
ams->grainShift = SizeLog2(PoolAlignment(pool));
|
||||
pool->alignShift = SizeLog2(pool->alignment);
|
||||
/* .ambiguous.noshare: If the pool is required to support ambiguous */
|
||||
/* references, the alloc and white tables cannot be shared. */
|
||||
ams->shareAllocTable = !supportAmbiguous;
|
||||
|
|
@ -844,7 +869,7 @@ void AMSFinish(Inst inst)
|
|||
static Bool amsSegAlloc(Index *baseReturn, Index *limitReturn,
|
||||
Seg seg, Size size)
|
||||
{
|
||||
AMS ams;
|
||||
Pool pool;
|
||||
AMSSeg amsseg;
|
||||
Size grains;
|
||||
Bool canAlloc; /* can we allocate in this segment? */
|
||||
|
|
@ -855,13 +880,12 @@ static Bool amsSegAlloc(Index *baseReturn, Index *limitReturn,
|
|||
/* seg has already been checked, in AMSBufferFill. */
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
|
||||
ams = amsseg->ams;
|
||||
AVERT(AMS, ams);
|
||||
pool = SegPool(seg);
|
||||
|
||||
AVER(size > 0);
|
||||
AVER(SizeIsAligned(size, PoolAlignment(AMSPool(ams))));
|
||||
AVER(SizeIsAligned(size, PoolAlignment(pool)));
|
||||
|
||||
grains = AMSGrains(ams, size);
|
||||
grains = PoolSizeGrains(pool, size);
|
||||
AVER(grains > 0);
|
||||
if (grains > amsseg->grains)
|
||||
return FALSE;
|
||||
|
|
@ -931,7 +955,7 @@ static Res AMSBufferFill(Addr *baseReturn, Addr *limitReturn,
|
|||
seg = SegOfPoolRing(node);
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
AVERT_CRITICAL(AMSSeg, amsseg);
|
||||
if (amsseg->freeGrains >= AMSGrains(ams, size)) {
|
||||
if (amsseg->freeGrains >= PoolSizeGrains(pool, size)) {
|
||||
if (SegRankSet(seg) == rankSet
|
||||
&& !SegHasBuffer(seg)
|
||||
/* Can't use a white or grey segment, see d.m.p.fill.colour. */
|
||||
|
|
@ -953,7 +977,8 @@ static Res AMSBufferFill(Addr *baseReturn, Addr *limitReturn,
|
|||
|
||||
found:
|
||||
AVER(b);
|
||||
baseAddr = AMS_INDEX_ADDR(seg, base); limitAddr = AMS_INDEX_ADDR(seg, limit);
|
||||
baseAddr = PoolAddrOfIndex(SegBase(seg), pool, base);
|
||||
limitAddr = PoolAddrOfIndex(SegBase(seg), pool, limit);
|
||||
DebugPoolFreeCheck(pool, baseAddr, limitAddr);
|
||||
allocatedSize = AddrOffset(baseAddr, limitAddr);
|
||||
|
||||
|
|
@ -992,8 +1017,8 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
|
|||
amsseg = Seg2AMSSeg(seg);
|
||||
AVERT(AMSSeg, amsseg);
|
||||
|
||||
initIndex = AMS_ADDR_INDEX(seg, init);
|
||||
limitIndex = AMS_ADDR_INDEX(seg, limit);
|
||||
initIndex = PoolIndexOfAddr(SegBase(seg), pool, init);
|
||||
limitIndex = PoolIndexOfAddr(SegBase(seg), pool, limit);
|
||||
AVER(initIndex <= limitIndex);
|
||||
|
||||
if (init < limit) {
|
||||
|
|
@ -1012,14 +1037,13 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
|
|||
} else if (ams->shareAllocTable && amsseg->colourTablesInUse) {
|
||||
/* The nonwhiteTable is shared with allocTable and in use, so we
|
||||
* mustn't start using allocTable. In this case we know: 1. the
|
||||
* segment has been condemned (because colour tables are turned
|
||||
* on in AMSWhiten); 2. the segment has not yet been reclaimed
|
||||
* (because colour tables are turned off in AMSReclaim); 3. the
|
||||
* unused portion of the buffer is black (see AMSWhiten). So we
|
||||
* need to whiten the unused portion of the buffer. The
|
||||
* allocTable will be turned back on (if necessary) in
|
||||
* AMSReclaim, when we know that the nonwhite grains are exactly
|
||||
* the allocated grains.
|
||||
* segment has been condemned (because colour tables are turned on
|
||||
* in amsSegWhiten); 2. the segment has not yet been reclaimed
|
||||
* (because colour tables are turned off in amsSegReclaim); 3. the
|
||||
* unused portion of the buffer is black (see amsSegWhiten). So we
|
||||
* need to whiten the unused portion of the buffer. The allocTable
|
||||
* will be turned back on (if necessary) in amsSegReclaim, when we
|
||||
* know that the nonwhite grains are exactly the allocated grains.
|
||||
*/
|
||||
} else {
|
||||
/* start using allocTable */
|
||||
|
|
@ -1041,15 +1065,25 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
|
|||
amsseg->freeGrains += unusedGrains;
|
||||
amsseg->bufferedGrains = 0;
|
||||
amsseg->newGrains += usedGrains;
|
||||
PoolGenAccountForEmpty(ams->pgen, AMSGrainsSize(ams, usedGrains),
|
||||
AMSGrainsSize(ams, unusedGrains), FALSE);
|
||||
PoolGenAccountForEmpty(ams->pgen, PoolGrainsSize(pool, usedGrains),
|
||||
PoolGrainsSize(pool, unusedGrains), FALSE);
|
||||
}
|
||||
|
||||
|
||||
/* amsRangeWhiten -- Condemn a part of an AMS segment
|
||||
/* amsSegPoolGen -- get pool generation for an AMS segment */
|
||||
|
||||
static PoolGen amsSegPoolGen(Pool pool, Seg seg)
|
||||
{
|
||||
AMS ams = MustBeA(AMSPool, pool);
|
||||
AVERT(Seg, seg);
|
||||
return ams->pgen;
|
||||
}
|
||||
|
||||
|
||||
/* amsSegRangeWhiten -- Condemn a part of an AMS segment
|
||||
* Allow calling it with base = limit, to simplify the callers.
|
||||
*/
|
||||
static void amsRangeWhiten(Seg seg, Index base, Index limit)
|
||||
static void amsSegRangeWhiten(Seg seg, Index base, Index limit)
|
||||
{
|
||||
if (base != limit) {
|
||||
AMSSeg amsseg = Seg2AMSSeg(seg);
|
||||
|
|
@ -1062,24 +1096,17 @@ static void amsRangeWhiten(Seg seg, Index base, Index limit)
|
|||
}
|
||||
|
||||
|
||||
/* AMSWhiten -- the pool class segment condemning method */
|
||||
/* amsSegWhiten -- the pool class segment condemning method */
|
||||
|
||||
static Res AMSWhiten(Pool pool, Trace trace, Seg seg)
|
||||
static Res amsSegWhiten(Seg seg, Trace trace)
|
||||
{
|
||||
AMS ams;
|
||||
AMSSeg amsseg;
|
||||
Buffer buffer; /* the seg's buffer, if it has one */
|
||||
Count agedGrains, uncondemnedGrains;
|
||||
|
||||
AVERT(Pool, pool);
|
||||
ams = PoolAMS(pool);
|
||||
AVERT(AMS, ams);
|
||||
AMSSeg amsseg = MustBeA(AMSSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
PoolGen pgen = PoolSegPoolGen(pool, seg);
|
||||
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
AVERT(AMSSeg, amsseg);
|
||||
|
||||
/* <design/poolams/#colour.single> */
|
||||
AVER(SegWhite(seg) == TraceSetEMPTY);
|
||||
|
|
@ -1096,7 +1123,7 @@ static Res AMSWhiten(Pool pool, Trace trace, Seg seg)
|
|||
}
|
||||
|
||||
/* Start using allocTable as the white table, if so configured. */
|
||||
if (ams->shareAllocTable) {
|
||||
if (amsseg->ams->shareAllocTable) {
|
||||
if (amsseg->allocTableInUse) {
|
||||
/* During the collection, it can't use allocTable for AMS_ALLOCED, so */
|
||||
/* make it use firstFree. */
|
||||
|
|
@ -1110,25 +1137,25 @@ static Res AMSWhiten(Pool pool, Trace trace, Seg seg)
|
|||
|
||||
if (SegBuffer(&buffer, seg)) { /* <design/poolams/#condemn.buffer> */
|
||||
Index scanLimitIndex, limitIndex;
|
||||
scanLimitIndex = AMS_ADDR_INDEX(seg, BufferScanLimit(buffer));
|
||||
limitIndex = AMS_ADDR_INDEX(seg, BufferLimit(buffer));
|
||||
scanLimitIndex = PoolIndexOfAddr(SegBase(seg), pool, BufferScanLimit(buffer));
|
||||
limitIndex = PoolIndexOfAddr(SegBase(seg), pool, BufferLimit(buffer));
|
||||
|
||||
amsRangeWhiten(seg, 0, scanLimitIndex);
|
||||
amsSegRangeWhiten(seg, 0, scanLimitIndex);
|
||||
if (scanLimitIndex < limitIndex)
|
||||
AMS_RANGE_BLACKEN(seg, scanLimitIndex, limitIndex);
|
||||
amsRangeWhiten(seg, limitIndex, amsseg->grains);
|
||||
amsSegRangeWhiten(seg, limitIndex, amsseg->grains);
|
||||
/* We didn't condemn the buffer, subtract it from the count. */
|
||||
uncondemnedGrains = limitIndex - scanLimitIndex;
|
||||
} else { /* condemn whole seg */
|
||||
amsRangeWhiten(seg, 0, amsseg->grains);
|
||||
amsSegRangeWhiten(seg, 0, amsseg->grains);
|
||||
uncondemnedGrains = (Count)0;
|
||||
}
|
||||
|
||||
/* The unused part of the buffer remains buffered: the rest becomes old. */
|
||||
AVER(amsseg->bufferedGrains >= uncondemnedGrains);
|
||||
agedGrains = amsseg->bufferedGrains - uncondemnedGrains;
|
||||
PoolGenAccountForAge(ams->pgen, AMSGrainsSize(ams, agedGrains),
|
||||
AMSGrainsSize(ams, amsseg->newGrains), FALSE);
|
||||
PoolGenAccountForAge(pgen, PoolGrainsSize(pool, agedGrains),
|
||||
PoolGrainsSize(pool, amsseg->newGrains), FALSE);
|
||||
amsseg->oldGrains += agedGrains + amsseg->newGrains;
|
||||
amsseg->bufferedGrains = uncondemnedGrains;
|
||||
amsseg->newGrains = 0;
|
||||
|
|
@ -1136,8 +1163,8 @@ static Res AMSWhiten(Pool pool, Trace trace, Seg seg)
|
|||
amsseg->ambiguousFixes = FALSE;
|
||||
|
||||
if (amsseg->oldGrains > 0) {
|
||||
GenDescCondemned(ams->pgen->gen, trace,
|
||||
AMSGrainsSize(ams, amsseg->oldGrains));
|
||||
GenDescCondemned(pgen->gen, trace,
|
||||
PoolGrainsSize(pool, amsseg->oldGrains));
|
||||
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
|
||||
} else {
|
||||
amsseg->colourTablesInUse = FALSE;
|
||||
|
|
@ -1160,16 +1187,16 @@ typedef Res (*AMSObjectFunction)(
|
|||
((f) != NULL) /* that's the best we can do */
|
||||
|
||||
|
||||
/* amsIterate -- applies a function to each object in a segment
|
||||
/* semSegIterate -- applies a function to each object in a segment
|
||||
*
|
||||
* amsIterate(seg, f, closure) applies f to all the objects in the
|
||||
* semSegIterate(seg, f, closure) applies f to all the objects in the
|
||||
* segment. It skips the buffer, if any (from BufferScanLimit to
|
||||
* BufferLimit). */
|
||||
|
||||
static Res amsIterate(Seg seg, AMSObjectFunction f, void *closure)
|
||||
static Res semSegIterate(Seg seg, AMSObjectFunction f, void *closure)
|
||||
{
|
||||
Res res;
|
||||
AMS ams;
|
||||
Pool pool;
|
||||
AMSSeg amsseg;
|
||||
Format format;
|
||||
Align alignment;
|
||||
|
|
@ -1184,15 +1211,15 @@ static Res amsIterate(Seg seg, AMSObjectFunction f, void *closure)
|
|||
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
AVERT(AMSSeg, amsseg);
|
||||
ams = amsseg->ams;
|
||||
AVERT(AMS, ams);
|
||||
format = AMSPool(ams)->format;
|
||||
pool = SegPool(seg);
|
||||
AVERT(Pool, pool);
|
||||
format = pool->format;
|
||||
AVERT(Format, format);
|
||||
alignment = PoolAlignment(AMSPool(ams));
|
||||
alignment = PoolAlignment(pool);
|
||||
|
||||
/* If we're using the alloc table as a white table, we can't use it to */
|
||||
/* determine where there are objects. */
|
||||
AVER(!(ams->shareAllocTable && amsseg->colourTablesInUse));
|
||||
AVER(!amsseg->ams->shareAllocTable || !amsseg->colourTablesInUse);
|
||||
|
||||
p = SegBase(seg);
|
||||
limit = SegLimit(seg);
|
||||
|
|
@ -1208,7 +1235,7 @@ static Res amsIterate(Seg seg, AMSObjectFunction f, void *closure)
|
|||
|| (p < BufferScanLimit(buffer))
|
||||
|| (p >= BufferLimit(buffer))); /* not in the buffer */
|
||||
|
||||
i = AMS_ADDR_INDEX(seg, p);
|
||||
i = PoolIndexOfAddr(SegBase(seg), pool, p);
|
||||
if (!AMS_ALLOCED(seg, i)) { /* no object here */
|
||||
if (amsseg->allocTableInUse) {
|
||||
Index dummy, nextIndex;
|
||||
|
|
@ -1219,7 +1246,7 @@ static Res amsIterate(Seg seg, AMSObjectFunction f, void *closure)
|
|||
i, amsseg->grains, 1);
|
||||
AVER(more);
|
||||
AVER(dummy == i);
|
||||
next = AMS_INDEX_ADDR(seg, nextIndex);
|
||||
next = PoolAddrOfIndex(SegBase(seg), pool, nextIndex);
|
||||
} else {
|
||||
/* If there's no allocTable, this is the free block at the end. */
|
||||
next = limit;
|
||||
|
|
@ -1247,7 +1274,7 @@ static Res amsIterate(Seg seg, AMSObjectFunction f, void *closure)
|
|||
|
||||
/* amsScanObject -- scan a single object
|
||||
*
|
||||
* This is the object function passed to amsIterate by AMSScan. */
|
||||
* This is the object function passed to semSegIterate by amsSegScan. */
|
||||
|
||||
struct amsScanClosureStruct {
|
||||
ScanState ss;
|
||||
|
|
@ -1264,7 +1291,7 @@ static Res amsScanObject(Seg seg, Index i, Addr p, Addr next, void *clos)
|
|||
Res res;
|
||||
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
/* seg & amsseg have already been checked, in amsIterate. */
|
||||
/* seg & amsseg have already been checked, in semSegIterate. */
|
||||
AVER(i < amsseg->grains);
|
||||
AVER(p != 0);
|
||||
AVER(p < next);
|
||||
|
|
@ -1285,7 +1312,7 @@ static Res amsScanObject(Seg seg, Index i, Addr p, Addr next, void *clos)
|
|||
if (res != ResOK)
|
||||
return res;
|
||||
if (!closure->scanAllObjects) {
|
||||
Index j = AMS_ADDR_INDEX(seg, next);
|
||||
Index j = PoolIndexOfAddr(SegBase(seg), SegPool(seg), next);
|
||||
AVER(!AMS_IS_INVALID_COLOUR(seg, i));
|
||||
AMS_GREY_BLACKEN(seg, i);
|
||||
if (i+1 < j)
|
||||
|
|
@ -1297,29 +1324,23 @@ static Res amsScanObject(Seg seg, Index i, Addr p, Addr next, void *clos)
|
|||
}
|
||||
|
||||
|
||||
/* AMSScan -- the pool class segment scanning method
|
||||
/* amsSegScan -- the segment scanning method
|
||||
*
|
||||
* See <design/poolams/#scan>
|
||||
*/
|
||||
Res AMSScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
||||
static Res amsSegScan(Bool *totalReturn, Seg seg, ScanState ss)
|
||||
{
|
||||
Res res;
|
||||
AMS ams;
|
||||
Arena arena;
|
||||
AMSSeg amsseg;
|
||||
AMSSeg amsseg = MustBeA(AMSSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
AMS ams = MustBeA(AMSPool, pool);
|
||||
Arena arena = PoolArena(pool);
|
||||
struct amsScanClosureStruct closureStruct;
|
||||
Format format;
|
||||
Align alignment;
|
||||
|
||||
AVER(totalReturn != NULL);
|
||||
AVERT(ScanState, ss);
|
||||
AVERT(Pool, pool);
|
||||
ams = PoolAMS(pool);
|
||||
AVERT(AMS, ams);
|
||||
arena = PoolArena(pool);
|
||||
AVERT(Seg, seg);
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
AVERT(AMSSeg, amsseg);
|
||||
|
||||
/* Check that we're not in the grey mutator phase (see */
|
||||
/* <design/poolams/#not-req.grey>). */
|
||||
|
|
@ -1331,7 +1352,7 @@ Res AMSScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
/* @@@@ This isn't quite right for multiple traces. */
|
||||
if (closureStruct.scanAllObjects) {
|
||||
/* The whole seg (except the buffer) is grey for some trace. */
|
||||
res = amsIterate(seg, amsScanObject, &closureStruct);
|
||||
res = semSegIterate(seg, amsScanObject, &closureStruct);
|
||||
if (res != ResOK) {
|
||||
*totalReturn = FALSE;
|
||||
return res;
|
||||
|
|
@ -1347,7 +1368,7 @@ Res AMSScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
amsseg->marksChanged = FALSE; /* <design/poolams/#marked.scan> */
|
||||
/* <design/poolams/#ambiguous.middle> */
|
||||
if (amsseg->ambiguousFixes) {
|
||||
res = amsIterate(seg, amsScanObject, &closureStruct);
|
||||
res = semSegIterate(seg, amsScanObject, &closureStruct);
|
||||
if (res != ResOK) {
|
||||
/* <design/poolams/#marked.scan.fail> */
|
||||
amsseg->marksChanged = TRUE;
|
||||
|
|
@ -1362,7 +1383,7 @@ Res AMSScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
&& AMSFindGrey(&i, &j, seg, j, amsseg->grains)) {
|
||||
Addr clientP, clientNext;
|
||||
AVER(!AMS_IS_INVALID_COLOUR(seg, i));
|
||||
p = AMS_INDEX_ADDR(seg, i);
|
||||
p = PoolAddrOfIndex(SegBase(seg), pool, i);
|
||||
clientP = AddrAdd(p, format->headerSize);
|
||||
if (format->skip != NULL) {
|
||||
clientNext = (*format->skip)(clientP);
|
||||
|
|
@ -1371,7 +1392,7 @@ Res AMSScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
clientNext = AddrAdd(clientP, alignment);
|
||||
next = AddrAdd(p, alignment);
|
||||
}
|
||||
j = AMS_ADDR_INDEX(seg, next);
|
||||
j = PoolIndexOfAddr(SegBase(seg), pool, next);
|
||||
res = FormatScan(format, ss, clientP, clientNext);
|
||||
if (res != ResOK) {
|
||||
/* <design/poolams/#marked.scan.fail> */
|
||||
|
|
@ -1395,20 +1416,18 @@ Res AMSScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
}
|
||||
|
||||
|
||||
/* AMSFix -- the pool class fixing method */
|
||||
/* amsSegFix -- the segment fixing method */
|
||||
|
||||
static Res AMSFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
||||
static Res amsSegFix(Seg seg, ScanState ss, Ref *refIO)
|
||||
{
|
||||
AMSSeg amsseg;
|
||||
AMSSeg amsseg = MustBeA_CRITICAL(AMSSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
Index i; /* the index of the fixed grain */
|
||||
Addr base;
|
||||
Ref clientRef;
|
||||
Format format;
|
||||
|
||||
AVERT_CRITICAL(Pool, pool);
|
||||
AVER_CRITICAL(TESTT(AMS, PoolAMS(pool)));
|
||||
AVERT_CRITICAL(ScanState, ss);
|
||||
AVERT_CRITICAL(Seg, seg);
|
||||
AVER_CRITICAL(refIO != NULL);
|
||||
|
||||
format = pool->format;
|
||||
|
|
@ -1442,12 +1461,10 @@ static Res AMSFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
return ResOK;
|
||||
}
|
||||
|
||||
i = AMS_ADDR_INDEX(seg, base);
|
||||
i = PoolIndexOfAddr(SegBase(seg), pool, base);
|
||||
AVER_CRITICAL(i < amsseg->grains);
|
||||
AVER_CRITICAL(!AMS_IS_INVALID_COLOUR(seg, i));
|
||||
|
||||
ss->wasMarked = TRUE;
|
||||
|
||||
/* Not a real reference if unallocated. */
|
||||
if (!AMS_ALLOCED(seg, i)) {
|
||||
AVER(ss->rank == RankAMBIG);
|
||||
|
|
@ -1466,7 +1483,7 @@ static Res AMSFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
case RankFINAL:
|
||||
case RankWEAK:
|
||||
if (AMS_IS_WHITE(seg, i)) {
|
||||
ss->wasMarked = FALSE;
|
||||
ss->wasMarked = FALSE; /* <design/fix/#was-marked.not> */
|
||||
if (ss->rank == RankWEAK) { /* then splat the reference */
|
||||
*refIO = (Ref)0;
|
||||
} else {
|
||||
|
|
@ -1481,7 +1498,7 @@ static Res AMSFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
next = AddrSub(clientNext, format->headerSize);
|
||||
/* Part of the object might be grey, because of ambiguous */
|
||||
/* fixes, but that's OK, because scan will ignore that. */
|
||||
AMS_RANGE_WHITE_BLACKEN(seg, i, AMS_ADDR_INDEX(seg, next));
|
||||
AMS_RANGE_WHITE_BLACKEN(seg, i, PoolIndexOfAddr(SegBase(seg), pool, next));
|
||||
} else { /* turn it grey */
|
||||
AMS_WHITE_GREYEN(seg, i);
|
||||
SegSetGrey(seg, TraceSetUnion(SegGrey(seg), ss->traces));
|
||||
|
|
@ -1499,18 +1516,17 @@ static Res AMSFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
}
|
||||
|
||||
|
||||
/* AMSBlacken -- the pool class blackening method
|
||||
/* amsSegBlacken -- the segment blackening method
|
||||
*
|
||||
* Turn all grey objects black. */
|
||||
|
||||
|
||||
static Res amsBlackenObject(Seg seg, Index i, Addr p, Addr next, void *clos)
|
||||
static Res amsSegBlackenObject(Seg seg, Index i, Addr p, Addr next, void *clos)
|
||||
{
|
||||
UNUSED(p);
|
||||
AVER(clos == NULL);
|
||||
AVER(clos == UNUSED_POINTER);
|
||||
/* Do what amsScanObject does, minus the scanning. */
|
||||
if (AMS_IS_GREY(seg, i)) {
|
||||
Index j = AMS_ADDR_INDEX(seg, next);
|
||||
Index j = PoolIndexOfAddr(SegBase(seg), SegPool(seg), next);
|
||||
AVER(!AMS_IS_INVALID_COLOUR(seg, i));
|
||||
AMS_GREY_BLACKEN(seg, i);
|
||||
if (i+1 < j)
|
||||
|
|
@ -1519,15 +1535,10 @@ static Res amsBlackenObject(Seg seg, Index i, Addr p, Addr next, void *clos)
|
|||
return ResOK;
|
||||
}
|
||||
|
||||
|
||||
static void AMSBlacken(Pool pool, TraceSet traceSet, Seg seg)
|
||||
static void amsSegBlacken(Seg seg, TraceSet traceSet)
|
||||
{
|
||||
AMS ams;
|
||||
Res res;
|
||||
|
||||
AVERT(Pool, pool);
|
||||
ams = PoolAMS(pool);
|
||||
AVERT(AMS, ams);
|
||||
AVERT(TraceSet, traceSet);
|
||||
AVERT(Seg, seg);
|
||||
|
||||
|
|
@ -1537,29 +1548,25 @@ static void AMSBlacken(Pool pool, TraceSet traceSet, Seg seg)
|
|||
AVERT(AMSSeg, amsseg);
|
||||
AVER(amsseg->marksChanged); /* there must be something grey */
|
||||
amsseg->marksChanged = FALSE;
|
||||
res = amsIterate(seg, amsBlackenObject, NULL);
|
||||
res = semSegIterate(seg, amsSegBlackenObject, UNUSED_POINTER);
|
||||
AVER(res == ResOK);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* AMSReclaim -- the pool class reclamation method */
|
||||
/* amsSegReclaim -- the segment reclamation method */
|
||||
|
||||
static void AMSReclaim(Pool pool, Trace trace, Seg seg)
|
||||
static void amsSegReclaim(Seg seg, Trace trace)
|
||||
{
|
||||
AMS ams;
|
||||
AMSSeg amsseg;
|
||||
AMSSeg amsseg = MustBeA(AMSSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
PoolGen pgen = PoolSegPoolGen(pool, seg);
|
||||
Count nowFree, grains, reclaimedGrains;
|
||||
Size preservedInPlaceSize;
|
||||
PoolDebugMixin debug;
|
||||
|
||||
AVERT(Pool, pool);
|
||||
ams = PoolAMS(pool);
|
||||
AVERT(AMS, ams);
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
/* It's a white seg, so it must have colour tables. */
|
||||
AVER(amsseg->colourTablesInUse);
|
||||
AVER(!amsseg->marksChanged); /* there must be nothing grey */
|
||||
|
|
@ -1572,7 +1579,8 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
|
|||
|
||||
while(j < grains && AMS_FIND_WHITE_RANGE(&i, &j, seg, j, grains)) {
|
||||
AVER(!AMS_IS_INVALID_COLOUR(seg, i));
|
||||
DebugPoolFreeSplat(pool, AMS_INDEX_ADDR(seg, i), AMS_INDEX_ADDR(seg, j));
|
||||
DebugPoolFreeSplat(pool, PoolAddrOfIndex(SegBase(seg), pool, i),
|
||||
PoolAddrOfIndex(SegBase(seg), pool, j));
|
||||
++j; /* we know next grain is not white */
|
||||
}
|
||||
}
|
||||
|
|
@ -1586,7 +1594,7 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
|
|||
|| BTIsResRange(amsseg->nonwhiteTable,
|
||||
amsseg->firstFree, grains));
|
||||
} else {
|
||||
if (ams->shareAllocTable) {
|
||||
if (amsseg->ams->shareAllocTable) {
|
||||
/* Stop using allocTable as the white table. */
|
||||
amsseg->allocTableInUse = TRUE;
|
||||
} else {
|
||||
|
|
@ -1599,11 +1607,11 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
|
|||
AVER(amsseg->oldGrains >= reclaimedGrains);
|
||||
amsseg->oldGrains -= reclaimedGrains;
|
||||
amsseg->freeGrains += reclaimedGrains;
|
||||
PoolGenAccountForReclaim(ams->pgen, AMSGrainsSize(ams, reclaimedGrains), FALSE);
|
||||
STATISTIC(trace->reclaimSize += AMSGrainsSize(ams, reclaimedGrains));
|
||||
PoolGenAccountForReclaim(pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE);
|
||||
STATISTIC(trace->reclaimSize += PoolGrainsSize(pool, reclaimedGrains));
|
||||
/* preservedInPlaceCount is updated on fix */
|
||||
preservedInPlaceSize = AMSGrainsSize(ams, amsseg->oldGrains);
|
||||
GenDescSurvived(ams->pgen->gen, trace, 0, preservedInPlaceSize);
|
||||
preservedInPlaceSize = PoolGrainsSize(pool, amsseg->oldGrains);
|
||||
GenDescSurvived(pgen->gen, trace, 0, preservedInPlaceSize);
|
||||
|
||||
/* Ensure consistency of segment even if are just about to free it */
|
||||
amsseg->colourTablesInUse = FALSE;
|
||||
|
|
@ -1612,37 +1620,28 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
|
|||
if (amsseg->freeGrains == grains && !SegHasBuffer(seg)) {
|
||||
/* No survivors */
|
||||
AVER(amsseg->bufferedGrains == 0);
|
||||
PoolGenFree(ams->pgen, seg,
|
||||
AMSGrainsSize(ams, amsseg->freeGrains),
|
||||
AMSGrainsSize(ams, amsseg->oldGrains),
|
||||
AMSGrainsSize(ams, amsseg->newGrains),
|
||||
PoolGenFree(pgen, seg,
|
||||
PoolGrainsSize(pool, amsseg->freeGrains),
|
||||
PoolGrainsSize(pool, amsseg->oldGrains),
|
||||
PoolGrainsSize(pool, amsseg->newGrains),
|
||||
FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* AMSWalk -- walk formatted objects in AMC pool */
|
||||
/* amsSegWalk -- walk formatted objects in AMC segment */
|
||||
|
||||
static void AMSWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
static void amsSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
{
|
||||
AMS ams;
|
||||
AMSSeg amsseg;
|
||||
AMSSeg amsseg = MustBeA(AMSSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
Addr object, base, limit;
|
||||
Format format;
|
||||
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Format, format);
|
||||
AVER(FUNCHECK(f));
|
||||
/* p and s are arbitrary closures and can't be checked */
|
||||
|
||||
ams = PoolAMS(pool);
|
||||
AVERT(AMS, ams);
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
AVERT(AMSSeg, amsseg);
|
||||
|
||||
format = pool->format;
|
||||
|
||||
base = SegBase(seg);
|
||||
object = base;
|
||||
limit = SegLimit(seg);
|
||||
|
|
@ -1664,7 +1663,7 @@ static void AMSWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
|||
/* either before the buffer, or after it, never in it */
|
||||
AVER(object < BufferGetInit(buffer) || BufferLimit(buffer) <= object);
|
||||
}
|
||||
i = AMS_ADDR_INDEX(seg, object);
|
||||
i = PoolIndexOfAddr(SegBase(seg), pool, object);
|
||||
if (!AMS_ALLOCED(seg, i)) {
|
||||
/* This grain is free */
|
||||
object = AddrAdd(object, PoolAlignment(pool));
|
||||
|
|
@ -1748,12 +1747,6 @@ static Res AMSDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
|
|||
if (res != ResOK)
|
||||
return res;
|
||||
|
||||
res = WriteF(stream, depth + 2,
|
||||
"grain shift $U\n", (WriteFU)ams->grainShift,
|
||||
NULL);
|
||||
if (res != ResOK)
|
||||
return res;
|
||||
|
||||
res = WriteF(stream, depth + 2,
|
||||
"segments: * black + grey - white . alloc ! bad\n"
|
||||
"buffers: [ base < scan limit | init > alloc ] limit\n",
|
||||
|
|
@ -1780,7 +1773,6 @@ static Res AMSDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
|
|||
DEFINE_CLASS(Pool, AMSPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, AMSPool, AbstractCollectPool);
|
||||
PoolClassMixInFormat(klass);
|
||||
klass->instClassStruct.describe = AMSDescribe;
|
||||
klass->instClassStruct.finish = AMSFinish;
|
||||
klass->size = sizeof(AMSStruct);
|
||||
|
|
@ -1789,13 +1781,7 @@ DEFINE_CLASS(Pool, AMSPool, klass)
|
|||
klass->bufferClass = RankBufClassGet;
|
||||
klass->bufferFill = AMSBufferFill;
|
||||
klass->bufferEmpty = AMSBufferEmpty;
|
||||
klass->whiten = AMSWhiten;
|
||||
klass->blacken = AMSBlacken;
|
||||
klass->scan = AMSScan;
|
||||
klass->fix = AMSFix;
|
||||
klass->fixEmergency = AMSFix;
|
||||
klass->reclaim = AMSReclaim;
|
||||
klass->walk = AMSWalk;
|
||||
klass->segPoolGen = amsSegPoolGen;
|
||||
klass->freewalk = AMSFreeWalk;
|
||||
klass->totalSize = AMSTotalSize;
|
||||
klass->freeSize = AMSFreeSize;
|
||||
|
|
@ -1826,6 +1812,7 @@ DEFINE_CLASS(Pool, AMSDebugPool, klass)
|
|||
klass->size = sizeof(AMSDebugStruct);
|
||||
klass->varargs = AMSDebugVarargs;
|
||||
klass->debugMixin = AMSDebugMixin;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1853,7 +1840,6 @@ Bool AMSCheck(AMS ams)
|
|||
CHECKC(AMSPool, ams);
|
||||
CHECKD(Pool, AMSPool(ams));
|
||||
CHECKL(IsA(AMSPool, ams));
|
||||
CHECKL(PoolAlignment(AMSPool(ams)) == AMSGrainsSize(ams, (Size)1));
|
||||
CHECKL(PoolAlignment(AMSPool(ams)) == AMSPool(ams)->format->alignment);
|
||||
if (ams->pgen != NULL) {
|
||||
CHECKL(ams->pgen == &ams->pgenStruct);
|
||||
|
|
|
|||
|
|
@ -41,7 +41,6 @@ typedef Res (*AMSSegSizePolicyFunction)(Size *sizeReturn,
|
|||
|
||||
typedef struct AMSStruct {
|
||||
PoolStruct poolStruct; /* generic pool structure */
|
||||
Shift grainShift; /* log2 of grain size */
|
||||
PoolGenStruct pgenStruct; /* generation representing the pool */
|
||||
PoolGen pgen; /* NULL or pointer to pgenStruct field */
|
||||
Size size; /* total segment size of the pool */
|
||||
|
|
@ -83,22 +82,6 @@ typedef struct AMSSegStruct {
|
|||
#define AMSPool(ams) (&(ams)->poolStruct)
|
||||
|
||||
|
||||
/* macros for abstracting index/address computations */
|
||||
/* <design/poolams/#addr-index.slow> */
|
||||
|
||||
/* only use when size is a multiple of the grain size */
|
||||
#define AMSGrains(ams, size) ((size) >> (ams)->grainShift)
|
||||
|
||||
#define AMSGrainsSize(ams, grains) ((grains) << (ams)->grainShift)
|
||||
|
||||
#define AMSSegShift(seg) (Seg2AMSSeg(seg)->ams->grainShift)
|
||||
|
||||
#define AMS_ADDR_INDEX(seg, addr) \
|
||||
((Index)(AddrOffset(SegBase(seg), addr) >> AMSSegShift(seg)))
|
||||
#define AMS_INDEX_ADDR(seg, index) \
|
||||
AddrAdd(SegBase(seg), (Size)(index) << AMSSegShift(seg))
|
||||
|
||||
|
||||
/* colour ops */
|
||||
|
||||
#define AMS_IS_WHITE(seg, index) \
|
||||
|
|
@ -172,8 +155,6 @@ extern Res AMSInitInternal(AMS ams, Arena arena, PoolClass klass,
|
|||
extern void AMSFinish(Inst inst);
|
||||
extern Bool AMSCheck(AMS ams);
|
||||
|
||||
extern Res AMSScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg);
|
||||
|
||||
#define AMSChain(ams) ((ams)->chain)
|
||||
|
||||
extern void AMSSegFreeWalk(AMSSeg amsseg, FreeBlockVisitor f, void *p);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* poolawl.c: AUTOMATIC WEAK LINKED POOL CLASS
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
*
|
||||
* DESIGN
|
||||
|
|
@ -48,6 +48,17 @@ SRCID(poolawl, "$Id$");
|
|||
|
||||
#define AWLSig ((Sig)0x519B7A37) /* SIGnature PooL AWL */
|
||||
|
||||
static Res awlSegAccess(Seg seg, Arena arena, Addr addr,
|
||||
AccessSet mode, MutatorContext context);
|
||||
static Res awlSegWhiten(Seg seg, Trace trace);
|
||||
static void awlSegGreyen(Seg seg, Trace trace);
|
||||
static void awlSegBlacken(Seg seg, TraceSet traceSet);
|
||||
static Res awlSegScan(Bool *totalReturn, Seg seg, ScanState ss);
|
||||
static Res awlSegFix(Seg seg, ScanState ss, Ref *refIO);
|
||||
static void awlSegReclaim(Seg seg, Trace trace);
|
||||
static void awlSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s);
|
||||
|
||||
|
||||
/* awlStat* -- Statistics gathering about instruction emulation
|
||||
*
|
||||
|
|
@ -83,17 +94,14 @@ typedef Addr (*FindDependentFunction)(Addr object);
|
|||
|
||||
typedef struct AWLPoolStruct {
|
||||
PoolStruct poolStruct;
|
||||
Shift alignShift;
|
||||
PoolGenStruct pgenStruct; /* generation representing the pool */
|
||||
PoolGen pgen; /* NULL or pointer to pgenStruct */
|
||||
Count succAccesses; /* number of successive single accesses */
|
||||
FindDependentFunction findDependent; /* to find a dependent object */
|
||||
awlStatTotalStruct stats;
|
||||
Sig sig;
|
||||
Sig sig; /* <code/misc.h#sig> */
|
||||
} AWLPoolStruct, *AWL;
|
||||
|
||||
#define AWLGrainsSize(awl, grains) ((grains) << (awl)->alignShift)
|
||||
|
||||
|
||||
static Bool AWLCheck(AWL awl);
|
||||
|
||||
|
|
@ -103,13 +111,6 @@ typedef AWL AWLPool;
|
|||
DECLARE_CLASS(Pool, AWLPool, AbstractCollectPool);
|
||||
|
||||
|
||||
/* Conversion between indexes and Addrs */
|
||||
#define awlIndexOfAddr(base, awl, p) \
|
||||
(AddrOffset((base), (p)) >> (awl)->alignShift)
|
||||
#define awlAddrOfIndex(base, awl, i) \
|
||||
AddrAdd(base, AWLGrainsSize(awl, i))
|
||||
|
||||
|
||||
/* AWLSegStruct -- AWL segment subclass
|
||||
*
|
||||
* Subclass of GCSeg
|
||||
|
|
@ -130,7 +131,7 @@ typedef struct AWLSegStruct {
|
|||
Count oldGrains; /* grains allocated prior to last collection */
|
||||
Count singleAccesses; /* number of accesses processed singly */
|
||||
awlStatSegStruct stats;
|
||||
Sig sig;
|
||||
Sig sig; /* <code/misc.h#sig> */
|
||||
} AWLSegStruct, *AWLSeg;
|
||||
|
||||
DECLARE_CLASS(Seg, AWLSeg, GCSeg);
|
||||
|
|
@ -176,7 +177,6 @@ ARG_DEFINE_KEY(awl_seg_rank_set, RankSet);
|
|||
static Res AWLSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
|
||||
{
|
||||
AWLSeg awlseg;
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
Arena arena;
|
||||
RankSet rankSet;
|
||||
Count bits; /* number of grains */
|
||||
|
|
@ -203,7 +203,7 @@ static Res AWLSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
|
|||
arena = PoolArena(pool);
|
||||
/* no useful checks for base and size */
|
||||
|
||||
bits = size >> awl->alignShift;
|
||||
bits = PoolSizeGrains(pool, size);
|
||||
tableSize = BTSize(bits);
|
||||
res = ControlAlloc(&v, arena, tableSize);
|
||||
if (res != ResOK)
|
||||
|
|
@ -254,14 +254,13 @@ static void AWLSegFinish(Inst inst)
|
|||
Seg seg = MustBeA(Seg, inst);
|
||||
AWLSeg awlseg = MustBeA(AWLSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
Arena arena = PoolArena(pool);
|
||||
Size tableSize;
|
||||
Count segGrains;
|
||||
|
||||
/* This is one of the few places where it is easy to check */
|
||||
/* awlseg->grains, so we do */
|
||||
segGrains = SegSize(seg) >> awl->alignShift;
|
||||
segGrains = PoolSizeGrains(pool, SegSize(seg));
|
||||
AVER(segGrains == awlseg->grains);
|
||||
tableSize = BTSize(segGrains);
|
||||
ControlFree(arena, awlseg->alloc, tableSize);
|
||||
|
|
@ -283,6 +282,16 @@ DEFINE_CLASS(Seg, AWLSeg, klass)
|
|||
klass->instClassStruct.finish = AWLSegFinish;
|
||||
klass->size = sizeof(AWLSegStruct);
|
||||
klass->init = AWLSegInit;
|
||||
klass->access = awlSegAccess;
|
||||
klass->whiten = awlSegWhiten;
|
||||
klass->greyen = awlSegGreyen;
|
||||
klass->blacken = awlSegBlacken;
|
||||
klass->scan = awlSegScan;
|
||||
klass->fix = awlSegFix;
|
||||
klass->fixEmergency = awlSegFix;
|
||||
klass->reclaim = awlSegReclaim;
|
||||
klass->walk = awlSegWalk;
|
||||
AVERT(SegClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -295,7 +304,7 @@ DEFINE_CLASS(Seg, AWLSeg, klass)
|
|||
* AWLSegSALimit is the number of accesses for a single segment in a GC cycle.
|
||||
* AWLTotalSALimit is the total number of accesses during a GC cycle.
|
||||
*
|
||||
* These should be set in config.h, but are here in static variables so that
|
||||
* These should be set in config.h, but are here in global variables so that
|
||||
* it's possible to tweak them in a debugger.
|
||||
*/
|
||||
|
||||
|
|
@ -312,11 +321,12 @@ Bool AWLHaveTotalSALimit = AWL_HAVE_TOTAL_SA_LIMIT;
|
|||
|
||||
/* Determine whether to permit scanning a single ref. */
|
||||
|
||||
static Bool AWLCanTrySingleAccess(Arena arena, AWL awl, Seg seg, Addr addr)
|
||||
static Bool awlSegCanTrySingleAccess(Arena arena, Seg seg, Addr addr)
|
||||
{
|
||||
AWLSeg awlseg;
|
||||
AWL awl;
|
||||
|
||||
AVERT(AWL, awl);
|
||||
AVERT(Arena, arena);
|
||||
AVERT(Seg, seg);
|
||||
AVER(addr != NULL);
|
||||
|
||||
|
|
@ -339,6 +349,7 @@ static Bool AWLCanTrySingleAccess(Arena arena, AWL awl, Seg seg, Addr addr)
|
|||
return FALSE;
|
||||
|
||||
awlseg = MustBeA(AWLSeg, seg);
|
||||
awl = MustBeA(AWLPool, SegPool(seg));
|
||||
|
||||
/* If there have been too many single accesses in a row then don't
|
||||
keep trying them, even if it means retaining objects. */
|
||||
|
|
@ -375,11 +386,13 @@ static void AWLNoteRefAccess(AWL awl, Seg seg, Addr addr)
|
|||
AVER(addr != NULL);
|
||||
|
||||
awlseg->singleAccesses++; /* increment seg count of ref accesses */
|
||||
if (addr == awlseg->stats.lastAccess) {
|
||||
/* If this is a repeated access, increment count */
|
||||
STATISTIC(awlseg->stats.sameAccesses++);
|
||||
}
|
||||
STATISTIC(awlseg->stats.lastAccess = addr);
|
||||
STATISTIC({
|
||||
if (addr == awlseg->stats.lastAccess) {
|
||||
/* If this is a repeated access, increment count */
|
||||
++ awlseg->stats.sameAccesses;
|
||||
}
|
||||
awlseg->stats.lastAccess = addr;
|
||||
});
|
||||
awl->succAccesses++; /* Note a new successive access */
|
||||
}
|
||||
|
||||
|
|
@ -398,71 +411,43 @@ static void AWLNoteSegAccess(AWL awl, Seg seg, Addr addr)
|
|||
|
||||
/* Record a scan of a segment which wasn't provoked by an access */
|
||||
|
||||
static void AWLNoteScan(AWL awl, Seg seg, ScanState ss)
|
||||
static void AWLNoteScan(Seg seg, ScanState ss)
|
||||
{
|
||||
AWLSeg awlseg = MustBeA(AWLSeg, seg);
|
||||
|
||||
AVERT(AWL, awl);
|
||||
UNUSED(ss);
|
||||
|
||||
/* .assume.mixedrank */
|
||||
/* .assume.samerank */
|
||||
/* If this segment has any RankWEAK references, then */
|
||||
/* record statistics about whether weak splatting is being lost. */
|
||||
if (RankSetIsMember(SegRankSet(seg), RankWEAK)) {
|
||||
if (RankWEAK == ss->rank) {
|
||||
/* This is "successful" scan at proper rank. */
|
||||
STATISTIC(awl->stats.goodScans++);
|
||||
if (0 < awlseg->singleAccesses) {
|
||||
/* Accesses have been proceesed singly */
|
||||
/* Record that we genuinely did save a protection-provoked scan */
|
||||
STATISTIC(awl->stats.savedScans++);
|
||||
STATISTIC(awl->stats.savedAccesses += awlseg->singleAccesses);
|
||||
STATISTIC({
|
||||
/* If this segment has any RankWEAK references, then record
|
||||
* statistics about whether weak splatting is being lost. */
|
||||
AWL awl = MustBeA(AWLPool, SegPool(seg));
|
||||
if (RankWEAK == ss->rank) {
|
||||
/* This is "successful" scan at proper rank. */
|
||||
++ awl->stats.goodScans;
|
||||
if (0 < awlseg->singleAccesses) {
|
||||
/* Accesses have been proceesed singly. Record that we
|
||||
* genuinely did save a protection-provoked scan */
|
||||
++ awl->stats.savedScans;
|
||||
awl->stats.savedAccesses += awlseg->singleAccesses;
|
||||
}
|
||||
} else {
|
||||
/* This is "failed" scan at improper rank. */
|
||||
++ awl->stats.badScans;
|
||||
}
|
||||
} else {
|
||||
/* This is "failed" scan at improper rank. */
|
||||
STATISTIC(awl->stats.badScans++);
|
||||
}
|
||||
awlStatSegInit(awlseg);
|
||||
});
|
||||
/* Reinitialize the segment statistics */
|
||||
awlseg->singleAccesses = 0;
|
||||
STATISTIC(awlStatSegInit(awlseg));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* AWLSegCreate -- Create a new segment of at least given size */
|
||||
|
||||
static Res AWLSegCreate(AWLSeg *awlsegReturn,
|
||||
RankSet rankSet, Pool pool, Size size)
|
||||
{
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
Arena arena = PoolArena(pool);
|
||||
Seg seg;
|
||||
Res res;
|
||||
|
||||
AVER(awlsegReturn != NULL);
|
||||
AVERT(RankSet, rankSet);
|
||||
AVER(size > 0);
|
||||
|
||||
size = SizeArenaGrains(size, arena);
|
||||
/* beware of large sizes overflowing upon rounding */
|
||||
if (size == 0)
|
||||
return ResMEMORY;
|
||||
MPS_ARGS_BEGIN(args) {
|
||||
MPS_ARGS_ADD_FIELD(args, awlKeySegRankSet, u, rankSet);
|
||||
res = PoolGenAlloc(&seg, awl->pgen, CLASS(AWLSeg), size, args);
|
||||
} MPS_ARGS_END(args);
|
||||
if (res != ResOK)
|
||||
return res;
|
||||
|
||||
*awlsegReturn = MustBeA(AWLSeg, seg);
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
|
||||
/* AWLSegAlloc -- allocate an object in a given segment */
|
||||
|
||||
static Bool AWLSegAlloc(Addr *baseReturn, Addr *limitReturn,
|
||||
AWLSeg awlseg, AWL awl, Size size)
|
||||
AWLSeg awlseg, Pool pool, Size size)
|
||||
{
|
||||
Count n; /* number of grains equivalent to alloc size */
|
||||
Index i, j;
|
||||
|
|
@ -470,17 +455,17 @@ static Bool AWLSegAlloc(Addr *baseReturn, Addr *limitReturn,
|
|||
|
||||
AVER(baseReturn != NULL);
|
||||
AVER(limitReturn != NULL);
|
||||
AVERT(AWL, awl);
|
||||
AVERT(Pool, pool);
|
||||
AVER(size > 0);
|
||||
AVER(AWLGrainsSize(awl, size) >= size);
|
||||
AVER(PoolGrainsSize(pool, size) >= size);
|
||||
|
||||
if (size > SegSize(seg))
|
||||
return FALSE;
|
||||
n = size >> awl->alignShift;
|
||||
n = PoolSizeGrains(pool, size);
|
||||
if (!BTFindLongResRange(&i, &j, awlseg->alloc, 0, awlseg->grains, n))
|
||||
return FALSE;
|
||||
*baseReturn = awlAddrOfIndex(SegBase(seg), awl, i);
|
||||
*limitReturn = awlAddrOfIndex(SegBase(seg),awl, j);
|
||||
*baseReturn = PoolAddrOfIndex(SegBase(seg), pool, i);
|
||||
*limitReturn = PoolAddrOfIndex(SegBase(seg), pool, j);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
|
@ -544,6 +529,7 @@ static Res AWLInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
|
|||
/* Ensure a format was supplied in the argument list. */
|
||||
AVER(pool->format != NULL);
|
||||
pool->alignment = pool->format->alignment;
|
||||
pool->alignShift = SizeLog2(pool->alignment);
|
||||
|
||||
AVER(FUNCHECK(findDependent));
|
||||
awl->findDependent = findDependent;
|
||||
|
|
@ -554,7 +540,6 @@ static Res AWLInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
|
|||
|
||||
awl->pgen = NULL;
|
||||
|
||||
awl->alignShift = SizeLog2(PoolAlignment(pool));
|
||||
awl->succAccesses = 0;
|
||||
awlStatTotalInit(awl);
|
||||
|
||||
|
|
@ -595,9 +580,9 @@ static void AWLFinish(Inst inst)
|
|||
AVERT(AWLSeg, awlseg);
|
||||
AVER(awlseg->bufferedGrains == 0);
|
||||
PoolGenFree(awl->pgen, seg,
|
||||
AWLGrainsSize(awl, awlseg->freeGrains),
|
||||
AWLGrainsSize(awl, awlseg->oldGrains),
|
||||
AWLGrainsSize(awl, awlseg->newGrains),
|
||||
PoolGrainsSize(pool, awlseg->freeGrains),
|
||||
PoolGrainsSize(pool, awlseg->oldGrains),
|
||||
PoolGrainsSize(pool, awlseg->newGrains),
|
||||
FALSE);
|
||||
}
|
||||
awl->sig = SigInvalid;
|
||||
|
|
@ -612,19 +597,21 @@ static void AWLFinish(Inst inst)
|
|||
static Res AWLBufferFill(Addr *baseReturn, Addr *limitReturn,
|
||||
Pool pool, Buffer buffer, Size size)
|
||||
{
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
Addr base, limit;
|
||||
Res res;
|
||||
Ring node, nextNode;
|
||||
Seg seg;
|
||||
AWLSeg awlseg;
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
|
||||
AVER(baseReturn != NULL);
|
||||
AVER(limitReturn != NULL);
|
||||
AVERC(Pool, pool);
|
||||
AVERC(Buffer, buffer);
|
||||
AVER(size > 0);
|
||||
|
||||
RING_FOR(node, &pool->segRing, nextNode) {
|
||||
Seg seg = SegOfPoolRing(node);
|
||||
seg = SegOfPoolRing(node);
|
||||
|
||||
awlseg = MustBeA(AWLSeg, seg);
|
||||
|
||||
|
|
@ -632,25 +619,28 @@ static Res AWLBufferFill(Addr *baseReturn, Addr *limitReturn,
|
|||
/* buffered, and has the same ranks as the buffer. */
|
||||
if (!SegHasBuffer(seg)
|
||||
&& SegRankSet(seg) == BufferRankSet(buffer)
|
||||
&& AWLGrainsSize(awl, awlseg->freeGrains) >= size
|
||||
&& AWLSegAlloc(&base, &limit, awlseg, awl, size))
|
||||
&& PoolGrainsSize(pool, awlseg->freeGrains) >= size
|
||||
&& AWLSegAlloc(&base, &limit, awlseg, pool, size))
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* No free space in existing awlsegs, so create new awlseg */
|
||||
|
||||
res = AWLSegCreate(&awlseg, BufferRankSet(buffer), pool, size);
|
||||
MPS_ARGS_BEGIN(args) {
|
||||
MPS_ARGS_ADD_FIELD(args, awlKeySegRankSet, u, BufferRankSet(buffer));
|
||||
res = PoolGenAlloc(&seg, awl->pgen, CLASS(AWLSeg),
|
||||
SizeArenaGrains(size, PoolArena(pool)), args);
|
||||
} MPS_ARGS_END(args);
|
||||
if (res != ResOK)
|
||||
return res;
|
||||
awlseg = MustBeA(AWLSeg, seg);
|
||||
base = SegBase(MustBeA(Seg, awlseg));
|
||||
limit = SegLimit(MustBeA(Seg, awlseg));
|
||||
|
||||
found:
|
||||
{
|
||||
Index i, j;
|
||||
Seg seg = MustBeA(Seg, awlseg);
|
||||
i = awlIndexOfAddr(SegBase(seg), awl, base);
|
||||
j = awlIndexOfAddr(SegBase(seg), awl, limit);
|
||||
i = PoolIndexOfAddr(SegBase(seg), pool, base);
|
||||
j = PoolIndexOfAddr(SegBase(seg), pool, limit);
|
||||
AVER(i < j);
|
||||
BTSetRange(awlseg->alloc, i, j);
|
||||
/* Objects are allocated black. */
|
||||
|
|
@ -681,8 +671,8 @@ static void AWLBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
|
|||
|
||||
AVER(init <= limit);
|
||||
|
||||
i = awlIndexOfAddr(segBase, awl, init);
|
||||
j = awlIndexOfAddr(segBase, awl, limit);
|
||||
i = PoolIndexOfAddr(segBase, pool, init);
|
||||
j = PoolIndexOfAddr(segBase, pool, limit);
|
||||
AVER(i <= j);
|
||||
if (i < j)
|
||||
BTResRange(awlseg->alloc, i, j);
|
||||
|
|
@ -693,18 +683,28 @@ static void AWLBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
|
|||
awlseg->freeGrains += unusedGrains;
|
||||
awlseg->bufferedGrains = 0;
|
||||
awlseg->newGrains += usedGrains;
|
||||
PoolGenAccountForEmpty(awl->pgen, AWLGrainsSize(awl, usedGrains),
|
||||
AWLGrainsSize(awl, unusedGrains), FALSE);
|
||||
PoolGenAccountForEmpty(awl->pgen, PoolGrainsSize(pool, usedGrains),
|
||||
PoolGrainsSize(pool, unusedGrains), FALSE);
|
||||
}
|
||||
|
||||
|
||||
/* AWLWhiten -- segment condemning method */
|
||||
/* awlSegPoolGen -- get pool generation for an AWL segment */
|
||||
|
||||
/* awlRangeWhiten -- helper function that works on a range.
|
||||
static PoolGen awlSegPoolGen(Pool pool, Seg seg)
|
||||
{
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
AVERT(Seg, seg);
|
||||
return awl->pgen;
|
||||
}
|
||||
|
||||
|
||||
/* awlSegWhiten -- segment condemning method */
|
||||
|
||||
/* awlSegRangeWhiten -- helper function that works on a range.
|
||||
*
|
||||
* This function abstracts common code from AWLWhiten.
|
||||
* This function abstracts common code from awlSegWhiten.
|
||||
*/
|
||||
static void awlRangeWhiten(AWLSeg awlseg, Index base, Index limit)
|
||||
static void awlSegRangeWhiten(AWLSeg awlseg, Index base, Index limit)
|
||||
{
|
||||
if(base != limit) {
|
||||
AVER(base < limit);
|
||||
|
|
@ -714,30 +714,31 @@ static void awlRangeWhiten(AWLSeg awlseg, Index base, Index limit)
|
|||
}
|
||||
}
|
||||
|
||||
static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
|
||||
static Res awlSegWhiten(Seg seg, Trace trace)
|
||||
{
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
AWLSeg awlseg = MustBeA(AWLSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
PoolGen pgen = PoolSegPoolGen(pool, seg);
|
||||
Buffer buffer;
|
||||
Count agedGrains, uncondemnedGrains;
|
||||
|
||||
/* All parameters checked by generic PoolWhiten. */
|
||||
/* All parameters checked by generic SegWhiten. */
|
||||
|
||||
/* Can only whiten for a single trace, */
|
||||
/* see <design/poolawl/#fun.condemn> */
|
||||
AVER(SegWhite(seg) == TraceSetEMPTY);
|
||||
|
||||
if (!SegBuffer(&buffer, seg)) {
|
||||
awlRangeWhiten(awlseg, 0, awlseg->grains);
|
||||
awlSegRangeWhiten(awlseg, 0, awlseg->grains);
|
||||
uncondemnedGrains = (Count)0;
|
||||
} else {
|
||||
/* Whiten everything except the buffer. */
|
||||
Addr base = SegBase(seg);
|
||||
Index scanLimitIndex = awlIndexOfAddr(base, awl, BufferScanLimit(buffer));
|
||||
Index limitIndex = awlIndexOfAddr(base, awl, BufferLimit(buffer));
|
||||
Index scanLimitIndex = PoolIndexOfAddr(base, pool, BufferScanLimit(buffer));
|
||||
Index limitIndex = PoolIndexOfAddr(base, pool, BufferLimit(buffer));
|
||||
uncondemnedGrains = limitIndex - scanLimitIndex;
|
||||
awlRangeWhiten(awlseg, 0, scanLimitIndex);
|
||||
awlRangeWhiten(awlseg, limitIndex, awlseg->grains);
|
||||
awlSegRangeWhiten(awlseg, 0, scanLimitIndex);
|
||||
awlSegRangeWhiten(awlseg, limitIndex, awlseg->grains);
|
||||
|
||||
/* Check the buffer is black. */
|
||||
/* This really ought to change when we have a non-trivial */
|
||||
|
|
@ -751,15 +752,15 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
|
|||
/* The unused part of the buffer remains buffered: the rest becomes old. */
|
||||
AVER(awlseg->bufferedGrains >= uncondemnedGrains);
|
||||
agedGrains = awlseg->bufferedGrains - uncondemnedGrains;
|
||||
PoolGenAccountForAge(awl->pgen, AWLGrainsSize(awl, agedGrains),
|
||||
AWLGrainsSize(awl, awlseg->newGrains), FALSE);
|
||||
PoolGenAccountForAge(pgen, PoolGrainsSize(pool, agedGrains),
|
||||
PoolGrainsSize(pool, awlseg->newGrains), FALSE);
|
||||
awlseg->oldGrains += agedGrains + awlseg->newGrains;
|
||||
awlseg->bufferedGrains = uncondemnedGrains;
|
||||
awlseg->newGrains = 0;
|
||||
|
||||
if (awlseg->oldGrains > 0) {
|
||||
GenDescCondemned(awl->pgen->gen, trace,
|
||||
AWLGrainsSize(awl, awlseg->oldGrains));
|
||||
GenDescCondemned(pgen->gen, trace,
|
||||
PoolGrainsSize(pool, awlseg->oldGrains));
|
||||
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
|
||||
}
|
||||
|
||||
|
|
@ -767,10 +768,10 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
|
|||
}
|
||||
|
||||
|
||||
/* AWLGrey -- Grey method for AWL pools */
|
||||
/* awlSegGreyen -- Greyen method for AWL segments */
|
||||
|
||||
/* AWLRangeGrey -- subroutine for AWLGrey */
|
||||
static void AWLRangeGrey(AWLSeg awlseg, Index base, Index limit)
|
||||
/* awlSegRangeGreyen -- subroutine for awlSegGreyen */
|
||||
static void awlSegRangeGreyen(AWLSeg awlseg, Index base, Index limit)
|
||||
{
|
||||
/* AWLSeg not checked as that's already been done */
|
||||
AVER(limit <= awlseg->grains);
|
||||
|
|
@ -783,43 +784,42 @@ static void AWLRangeGrey(AWLSeg awlseg, Index base, Index limit)
|
|||
}
|
||||
}
|
||||
|
||||
static void AWLGrey(Pool pool, Trace trace, Seg seg)
|
||||
static void awlSegGreyen(Seg seg, Trace trace)
|
||||
{
|
||||
Buffer buffer;
|
||||
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Trace, trace);
|
||||
Pool pool;
|
||||
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Trace, trace);
|
||||
pool = SegPool(seg);
|
||||
AVER(PoolArena(pool) == trace->arena);
|
||||
|
||||
if (!TraceSetIsMember(SegWhite(seg), trace)) {
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
AWLSeg awlseg = MustBeA(AWLSeg, seg);
|
||||
|
||||
SegSetGrey(seg, TraceSetAdd(SegGrey(seg), trace));
|
||||
if (SegBuffer(&buffer, seg)) {
|
||||
Addr base = SegBase(seg);
|
||||
|
||||
AWLRangeGrey(awlseg,
|
||||
0,
|
||||
awlIndexOfAddr(base, awl, BufferScanLimit(buffer)));
|
||||
AWLRangeGrey(awlseg,
|
||||
awlIndexOfAddr(base, awl, BufferLimit(buffer)),
|
||||
awlseg->grains);
|
||||
awlSegRangeGreyen(awlseg,
|
||||
0,
|
||||
PoolIndexOfAddr(base, pool, BufferScanLimit(buffer)));
|
||||
awlSegRangeGreyen(awlseg,
|
||||
PoolIndexOfAddr(base, pool, BufferLimit(buffer)),
|
||||
awlseg->grains);
|
||||
} else {
|
||||
AWLRangeGrey(awlseg, 0, awlseg->grains);
|
||||
awlSegRangeGreyen(awlseg, 0, awlseg->grains);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* AWLBlacken -- Blacken method for AWL pools */
|
||||
/* awlSegBlacken -- Blacken method for AWL segments */
|
||||
|
||||
static void AWLBlacken(Pool pool, TraceSet traceSet, Seg seg)
|
||||
static void awlSegBlacken(Seg seg, TraceSet traceSet)
|
||||
{
|
||||
AWLSeg awlseg = MustBeA(AWLSeg, seg);
|
||||
|
||||
UNUSED(pool);
|
||||
|
||||
AVERT(TraceSet, traceSet);
|
||||
|
||||
BTSetRange(awlseg->scanned, 0, awlseg->grains);
|
||||
|
|
@ -862,14 +862,14 @@ static Res awlScanObject(Arena arena, AWL awl, ScanState ss,
|
|||
}
|
||||
|
||||
|
||||
/* awlScanSinglePass -- a single scan pass over a segment */
|
||||
/* awlSegScanSinglePass -- a single scan pass over a segment */
|
||||
|
||||
static Res awlScanSinglePass(Bool *anyScannedReturn,
|
||||
ScanState ss, Pool pool,
|
||||
Seg seg, Bool scanAllObjects)
|
||||
static Res awlSegScanSinglePass(Bool *anyScannedReturn, ScanState ss,
|
||||
Seg seg, Bool scanAllObjects)
|
||||
{
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
AWLSeg awlseg = MustBeA(AWLSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
Arena arena = PoolArena(pool);
|
||||
Buffer buffer;
|
||||
Format format = pool->format;
|
||||
|
|
@ -899,7 +899,7 @@ static Res awlScanSinglePass(Bool *anyScannedReturn,
|
|||
continue;
|
||||
}
|
||||
|
||||
i = awlIndexOfAddr(base, awl, p);
|
||||
i = PoolIndexOfAddr(base, pool, p);
|
||||
if (!BTGet(awlseg->alloc, i)) {
|
||||
p = AddrAdd(p, PoolAlignment(pool));
|
||||
continue;
|
||||
|
|
@ -927,17 +927,17 @@ static Res awlScanSinglePass(Bool *anyScannedReturn,
|
|||
}
|
||||
|
||||
|
||||
/* AWLScan -- segment scan method for AWL */
|
||||
/* awlSegScan -- segment scan method for AWL */
|
||||
|
||||
static Res AWLScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
||||
static Res awlSegScan(Bool *totalReturn, Seg seg, ScanState ss)
|
||||
{
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
Bool anyScanned;
|
||||
Bool scanAllObjects;
|
||||
Res res;
|
||||
|
||||
AVER(totalReturn != NULL);
|
||||
AVERT(ScanState, ss);
|
||||
AVERT(Seg, seg);
|
||||
|
||||
/* If the scanner isn't going to scan all the objects then the */
|
||||
/* summary of the unscanned objects must be added into the scan */
|
||||
|
|
@ -954,7 +954,7 @@ static Res AWLScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
(TraceSetDiff(ss->traces, SegWhite(seg)) != TraceSetEMPTY);
|
||||
|
||||
do {
|
||||
res = awlScanSinglePass(&anyScanned, ss, pool, seg, scanAllObjects);
|
||||
res = awlSegScanSinglePass(&anyScanned, ss, seg, scanAllObjects);
|
||||
if (res != ResOK) {
|
||||
*totalReturn = FALSE;
|
||||
return res;
|
||||
|
|
@ -965,27 +965,26 @@ static Res AWLScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
} while(!scanAllObjects && anyScanned);
|
||||
|
||||
*totalReturn = scanAllObjects;
|
||||
AWLNoteScan(awl, seg, ss);
|
||||
AWLNoteScan(seg, ss);
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
|
||||
/* AWLFix -- Fix method for AWL */
|
||||
/* awlSegFix -- Fix method for AWL segments */
|
||||
|
||||
static Res AWLFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
||||
static Res awlSegFix(Seg seg, ScanState ss, Ref *refIO)
|
||||
{
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
AWLSeg awlseg = MustBeA(AWLSeg, seg);
|
||||
AWLSeg awlseg = MustBeA_CRITICAL(AWLSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
Ref clientRef;
|
||||
Addr base;
|
||||
Index i;
|
||||
|
||||
AVERT(ScanState, ss);
|
||||
AVER(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY);
|
||||
AVER(refIO != NULL);
|
||||
AVERT_CRITICAL(ScanState, ss);
|
||||
AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY);
|
||||
AVER_CRITICAL(refIO != NULL);
|
||||
|
||||
clientRef = *refIO;
|
||||
ss->wasMarked = TRUE;
|
||||
|
||||
base = AddrSub((Addr)clientRef, pool->format->headerSize);
|
||||
|
||||
|
|
@ -1003,7 +1002,7 @@ static Res AWLFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
return ResOK;
|
||||
}
|
||||
|
||||
i = awlIndexOfAddr(SegBase(seg), awl, base);
|
||||
i = PoolIndexOfAddr(SegBase(seg), pool, base);
|
||||
|
||||
/* Not a real reference if unallocated. */
|
||||
if (!BTGet(awlseg->alloc, i)) {
|
||||
|
|
@ -1012,7 +1011,7 @@ static Res AWLFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
}
|
||||
|
||||
if (!BTGet(awlseg->mark, i)) {
|
||||
ss->wasMarked = FALSE;
|
||||
ss->wasMarked = FALSE; /* <design/fix/#was-marked.not> */
|
||||
if (ss->rank == RankWEAK) {
|
||||
*refIO = (Ref)0;
|
||||
} else {
|
||||
|
|
@ -1025,12 +1024,13 @@ static Res AWLFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
}
|
||||
|
||||
|
||||
/* AWLReclaim -- reclaim dead objects in an AWL segment */
|
||||
/* awlSegReclaim -- reclaim dead objects in an AWL segment */
|
||||
|
||||
static void AWLReclaim(Pool pool, Trace trace, Seg seg)
|
||||
static void awlSegReclaim(Seg seg, Trace trace)
|
||||
{
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
AWLSeg awlseg = MustBeA(AWLSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
PoolGen pgen = PoolSegPoolGen(pool, seg);
|
||||
Addr base = SegBase(seg);
|
||||
Buffer buffer;
|
||||
Bool hasBuffer = SegBuffer(&buffer, seg);
|
||||
|
|
@ -1051,18 +1051,18 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
|
|||
++i;
|
||||
continue;
|
||||
}
|
||||
p = awlAddrOfIndex(base, awl, i);
|
||||
p = PoolAddrOfIndex(base, pool, i);
|
||||
if (hasBuffer
|
||||
&& p == BufferScanLimit(buffer)
|
||||
&& BufferScanLimit(buffer) != BufferLimit(buffer))
|
||||
{
|
||||
i = awlIndexOfAddr(base, awl, BufferLimit(buffer));
|
||||
i = PoolIndexOfAddr(base, pool, BufferLimit(buffer));
|
||||
continue;
|
||||
}
|
||||
q = format->skip(AddrAdd(p, format->headerSize));
|
||||
q = AddrSub(q, format->headerSize);
|
||||
AVER(AddrIsAligned(q, PoolAlignment(pool)));
|
||||
j = awlIndexOfAddr(base, awl, q);
|
||||
j = PoolIndexOfAddr(base, pool, q);
|
||||
AVER(j <= awlseg->grains);
|
||||
if(BTGet(awlseg->mark, i)) {
|
||||
AVER(BTGet(awlseg->scanned, i));
|
||||
|
|
@ -1084,43 +1084,44 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
|
|||
AVER(awlseg->oldGrains >= reclaimedGrains);
|
||||
awlseg->oldGrains -= reclaimedGrains;
|
||||
awlseg->freeGrains += reclaimedGrains;
|
||||
PoolGenAccountForReclaim(awl->pgen, AWLGrainsSize(awl, reclaimedGrains), FALSE);
|
||||
PoolGenAccountForReclaim(pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE);
|
||||
|
||||
STATISTIC(trace->reclaimSize += AWLGrainsSize(awl, reclaimedGrains));
|
||||
STATISTIC(trace->reclaimSize += PoolGrainsSize(pool, reclaimedGrains));
|
||||
STATISTIC(trace->preservedInPlaceCount += preservedInPlaceCount);
|
||||
GenDescSurvived(awl->pgen->gen, trace, 0, preservedInPlaceSize);
|
||||
GenDescSurvived(pgen->gen, trace, 0, preservedInPlaceSize);
|
||||
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
|
||||
|
||||
if (awlseg->freeGrains == awlseg->grains && !hasBuffer) {
|
||||
/* No survivors */
|
||||
AVER(awlseg->bufferedGrains == 0);
|
||||
PoolGenFree(awl->pgen, seg,
|
||||
AWLGrainsSize(awl, awlseg->freeGrains),
|
||||
AWLGrainsSize(awl, awlseg->oldGrains),
|
||||
AWLGrainsSize(awl, awlseg->newGrains),
|
||||
PoolGenFree(pgen, seg,
|
||||
PoolGrainsSize(pool, awlseg->freeGrains),
|
||||
PoolGrainsSize(pool, awlseg->oldGrains),
|
||||
PoolGrainsSize(pool, awlseg->newGrains),
|
||||
FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* AWLAccess -- handle a barrier hit */
|
||||
/* awlSegAccess -- handle a barrier hit */
|
||||
|
||||
static Res AWLAccess(Pool pool, Seg seg, Addr addr,
|
||||
AccessSet mode, MutatorContext context)
|
||||
static Res awlSegAccess(Seg seg, Arena arena, Addr addr,
|
||||
AccessSet mode, MutatorContext context)
|
||||
{
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
AWL awl;
|
||||
Res res;
|
||||
|
||||
AVERT(Seg, seg);
|
||||
AVER(SegBase(seg) <= addr);
|
||||
AVER(addr < SegLimit(seg));
|
||||
AVER(SegPool(seg) == pool);
|
||||
AVERT(AccessSet, mode);
|
||||
AVERT(MutatorContext, context);
|
||||
|
||||
awl = MustBeA(AWLPool, SegPool(seg));
|
||||
|
||||
/* Attempt scanning a single reference if permitted */
|
||||
if(AWLCanTrySingleAccess(PoolArena(pool), awl, seg, addr)) {
|
||||
res = PoolSingleAccess(pool, seg, addr, mode, context);
|
||||
if(awlSegCanTrySingleAccess(arena, seg, addr)) {
|
||||
res = SegSingleAccess(seg, arena, addr, mode, context);
|
||||
switch(res) {
|
||||
case ResOK:
|
||||
AWLNoteRefAccess(awl, seg, addr);
|
||||
|
|
@ -1134,7 +1135,7 @@ static Res AWLAccess(Pool pool, Seg seg, Addr addr,
|
|||
}
|
||||
|
||||
/* Have to scan the entire seg anyway. */
|
||||
res = PoolSegAccess(pool, seg, addr, mode, context);
|
||||
res = SegWholeAccess(seg, arena, addr, mode, context);
|
||||
if(ResOK == res) {
|
||||
AWLNoteSegAccess(awl, seg, addr);
|
||||
}
|
||||
|
|
@ -1143,16 +1144,16 @@ static Res AWLAccess(Pool pool, Seg seg, Addr addr,
|
|||
}
|
||||
|
||||
|
||||
/* AWLWalk -- walk all objects */
|
||||
/* awlSegWalk -- walk all objects */
|
||||
|
||||
static void AWLWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
static void awlSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
{
|
||||
AWL awl = MustBeA(AWLPool, pool);
|
||||
AWLSeg awlseg = MustBeA(AWLSeg, seg);
|
||||
Format format = pool->format;
|
||||
Pool pool = SegPool(seg);
|
||||
Addr object, base, limit;
|
||||
|
||||
AVERT(Format, format);
|
||||
AVER(FUNCHECK(f));
|
||||
/* p and s are arbitrary closures and can't be checked */
|
||||
|
||||
|
|
@ -1178,7 +1179,7 @@ static void AWLWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
|||
/* either before the buffer, or after it, never in it */
|
||||
AVER(object < BufferGetInit(buffer) || BufferLimit(buffer) <= object);
|
||||
}
|
||||
i = awlIndexOfAddr(base, awl, object);
|
||||
i = PoolIndexOfAddr(base, pool, object);
|
||||
if (!BTGet(awlseg->alloc, i)) {
|
||||
/* This grain is free */
|
||||
object = AddrAdd(object, PoolAlignment(pool));
|
||||
|
|
@ -1220,7 +1221,6 @@ static Size AWLFreeSize(Pool pool)
|
|||
DEFINE_CLASS(Pool, AWLPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, AWLPool, AbstractCollectPool);
|
||||
PoolClassMixInFormat(klass);
|
||||
klass->instClassStruct.finish = AWLFinish;
|
||||
klass->size = sizeof(AWLPoolStruct);
|
||||
klass->varargs = AWLVarargs;
|
||||
|
|
@ -1228,17 +1228,10 @@ DEFINE_CLASS(Pool, AWLPool, klass)
|
|||
klass->bufferClass = RankBufClassGet;
|
||||
klass->bufferFill = AWLBufferFill;
|
||||
klass->bufferEmpty = AWLBufferEmpty;
|
||||
klass->access = AWLAccess;
|
||||
klass->whiten = AWLWhiten;
|
||||
klass->grey = AWLGrey;
|
||||
klass->blacken = AWLBlacken;
|
||||
klass->scan = AWLScan;
|
||||
klass->fix = AWLFix;
|
||||
klass->fixEmergency = AWLFix;
|
||||
klass->reclaim = AWLReclaim;
|
||||
klass->walk = AWLWalk;
|
||||
klass->segPoolGen = awlSegPoolGen;
|
||||
klass->totalSize = AWLTotalSize;
|
||||
klass->freeSize = AWLFreeSize;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1256,7 +1249,8 @@ static Bool AWLCheck(AWL awl)
|
|||
CHECKS(AWL, awl);
|
||||
CHECKC(AWLPool, awl);
|
||||
CHECKD(Pool, CouldBeA(Pool, awl));
|
||||
CHECKL(AWLGrainsSize(awl, (Count)1) == PoolAlignment(CouldBeA(Pool, awl)));
|
||||
if (awl->pgen != NULL)
|
||||
CHECKD(PoolGen, awl->pgen);
|
||||
/* Nothing to check about succAccesses. */
|
||||
CHECKL(FUNCHECK(awl->findDependent));
|
||||
/* Don't bother to check stats. */
|
||||
|
|
@ -1266,7 +1260,7 @@ static Bool AWLCheck(AWL awl)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* poollo.c: LEAF POOL CLASS
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* DESIGN
|
||||
*
|
||||
|
|
@ -23,14 +23,11 @@ typedef struct LOStruct *LO;
|
|||
|
||||
typedef struct LOStruct {
|
||||
PoolStruct poolStruct; /* generic pool structure */
|
||||
Shift alignShift; /* log_2 of pool alignment */
|
||||
PoolGenStruct pgenStruct; /* generation representing the pool */
|
||||
PoolGen pgen; /* NULL or pointer to pgenStruct */
|
||||
Sig sig;
|
||||
Sig sig; /* <code/misc.h#sig> */
|
||||
} LOStruct;
|
||||
|
||||
#define LOGrainsSize(lo, grains) ((grains) << (lo)->alignShift)
|
||||
|
||||
typedef LO LOPool;
|
||||
#define LOPoolCheck LOCheck
|
||||
DECLARE_CLASS(Pool, LOPool, AbstractSegBufPool);
|
||||
|
|
@ -63,6 +60,11 @@ typedef struct LOSegStruct {
|
|||
static Res loSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args);
|
||||
static void loSegFinish(Inst inst);
|
||||
static Count loSegGrains(LOSeg loseg);
|
||||
static Res loSegWhiten(Seg seg, Trace trace);
|
||||
static Res loSegFix(Seg seg, ScanState ss, Ref *refIO);
|
||||
static void loSegReclaim(Seg seg, Trace trace);
|
||||
static void loSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s);
|
||||
|
||||
|
||||
/* LOSegClass -- Class definition for LO segments */
|
||||
|
|
@ -74,6 +76,12 @@ DEFINE_CLASS(Seg, LOSeg, klass)
|
|||
klass->instClassStruct.finish = loSegFinish;
|
||||
klass->size = sizeof(LOSegStruct);
|
||||
klass->init = loSegInit;
|
||||
klass->whiten = loSegWhiten;
|
||||
klass->fix = loSegFix;
|
||||
klass->fixEmergency = loSegFix;
|
||||
klass->reclaim = loSegReclaim;
|
||||
klass->walk = loSegWalk;
|
||||
AVERT(SegClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -83,7 +91,7 @@ ATTRIBUTE_UNUSED
|
|||
static Bool LOSegCheck(LOSeg loseg)
|
||||
{
|
||||
Seg seg = MustBeA(Seg, loseg);
|
||||
LO lo = MustBeA(LOPool, SegPool(seg));
|
||||
Pool pool = SegPool(seg);
|
||||
CHECKS(LOSeg, loseg);
|
||||
CHECKD(GCSeg, &loseg->gcSegStruct);
|
||||
CHECKL(loseg->mark != NULL);
|
||||
|
|
@ -91,7 +99,7 @@ static Bool LOSegCheck(LOSeg loseg)
|
|||
/* Could check exactly how many bits are set in the alloc table. */
|
||||
CHECKL(loseg->freeGrains + loseg->bufferedGrains + loseg->newGrains
|
||||
+ loseg->oldGrains
|
||||
== SegSize(seg) >> lo->alignShift);
|
||||
== PoolSizeGrains(pool, SegSize(seg)));
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
|
@ -101,7 +109,6 @@ static Bool LOSegCheck(LOSeg loseg)
|
|||
static Res loSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
|
||||
{
|
||||
LOSeg loseg;
|
||||
LO lo = MustBeA(LOPool, pool);
|
||||
Res res;
|
||||
Size tablebytes; /* # bytes in each control array */
|
||||
Arena arena = PoolArena(pool);
|
||||
|
|
@ -117,7 +124,7 @@ static Res loSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
|
|||
|
||||
AVER(SegWhite(seg) == TraceSetEMPTY);
|
||||
|
||||
grains = size >> lo->alignShift;
|
||||
grains = PoolSizeGrains(pool, size);
|
||||
tablebytes = BTSize(grains);
|
||||
res = ControlAlloc(&p, arena, tablebytes);
|
||||
if(res != ResOK)
|
||||
|
|
@ -176,20 +183,12 @@ ATTRIBUTE_UNUSED
|
|||
static Count loSegGrains(LOSeg loseg)
|
||||
{
|
||||
Seg seg = MustBeA(Seg, loseg);
|
||||
LO lo = MustBeA(LOPool, SegPool(seg));
|
||||
Pool pool = SegPool(seg);
|
||||
Size size = SegSize(seg);
|
||||
return size >> lo->alignShift;
|
||||
return PoolSizeGrains(pool, size);
|
||||
}
|
||||
|
||||
|
||||
/* Conversion between indexes and Addrs */
|
||||
#define loIndexOfAddr(base, lo, p) \
|
||||
(AddrOffset((base), (p)) >> (lo)->alignShift)
|
||||
|
||||
#define loAddrOfIndex(base, lo, i) \
|
||||
(AddrAdd((base), LOGrainsSize((lo), (i))))
|
||||
|
||||
|
||||
/* loSegFree -- mark block from baseIndex to limitIndex free */
|
||||
|
||||
static void loSegFree(LOSeg loseg, Index baseIndex, Index limitIndex)
|
||||
|
|
@ -214,7 +213,6 @@ static Bool loSegFindFree(Addr *bReturn, Addr *lReturn,
|
|||
Index baseIndex, limitIndex;
|
||||
Seg seg = MustBeA(Seg, loseg);
|
||||
Pool pool = SegPool(seg);
|
||||
LO lo = MustBeA(LOPool, pool);
|
||||
Count agrains;
|
||||
Count grains;
|
||||
Addr segBase;
|
||||
|
|
@ -227,7 +225,7 @@ static Bool loSegFindFree(Addr *bReturn, Addr *lReturn,
|
|||
|
||||
/* agrains is the number of grains corresponding to the size */
|
||||
/* of the allocation request */
|
||||
agrains = size >> lo->alignShift;
|
||||
agrains = PoolSizeGrains(pool, size);
|
||||
AVER(agrains >= 1);
|
||||
AVER(agrains <= loseg->freeGrains);
|
||||
AVER(size <= SegSize(seg));
|
||||
|
|
@ -244,53 +242,28 @@ static Bool loSegFindFree(Addr *bReturn, Addr *lReturn,
|
|||
|
||||
/* check that BTFindLongResRange really did find enough space */
|
||||
AVER(baseIndex < limitIndex);
|
||||
AVER(LOGrainsSize(lo, limitIndex - baseIndex) >= size);
|
||||
AVER(PoolGrainsSize(pool, limitIndex - baseIndex) >= size);
|
||||
segBase = SegBase(seg);
|
||||
*bReturn = loAddrOfIndex(segBase, lo, baseIndex);
|
||||
*lReturn = loAddrOfIndex(segBase, lo, limitIndex);
|
||||
*bReturn = PoolAddrOfIndex(segBase, pool, baseIndex);
|
||||
*lReturn = PoolAddrOfIndex(segBase, pool, limitIndex);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
||||
/* loSegCreate -- Creates a segment of size at least size.
|
||||
*
|
||||
* Segments will be multiples of ArenaGrainSize.
|
||||
*/
|
||||
|
||||
static Res loSegCreate(LOSeg *loSegReturn, Pool pool, Size size)
|
||||
{
|
||||
LO lo = MustBeA(LOPool, pool);
|
||||
Seg seg;
|
||||
Res res;
|
||||
|
||||
AVER(loSegReturn != NULL);
|
||||
AVER(size > 0);
|
||||
|
||||
res = PoolGenAlloc(&seg, lo->pgen, CLASS(LOSeg),
|
||||
SizeArenaGrains(size, PoolArena(pool)),
|
||||
argsNone);
|
||||
if (res != ResOK)
|
||||
return res;
|
||||
|
||||
*loSegReturn = MustBeA(LOSeg, seg);
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
|
||||
/* loSegReclaim -- reclaim white objects in an LO segment
|
||||
*
|
||||
* Could consider implementing this using Walk.
|
||||
*/
|
||||
|
||||
static void loSegReclaim(LOSeg loseg, Trace trace)
|
||||
static void loSegReclaim(Seg seg, Trace trace)
|
||||
{
|
||||
Addr p, base, limit;
|
||||
Bool marked;
|
||||
Count reclaimedGrains = (Count)0;
|
||||
Seg seg = MustBeA(Seg, loseg);
|
||||
LOSeg loseg = MustBeA(LOSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
LO lo = MustBeA(LOPool, pool);
|
||||
PoolGen pgen = PoolSegPoolGen(pool, seg);
|
||||
Format format = NULL; /* supress "may be used uninitialized" warning */
|
||||
Count preservedInPlaceCount = (Count)0;
|
||||
Size preservedInPlaceSize = (Size)0;
|
||||
|
|
@ -330,7 +303,7 @@ static void loSegReclaim(LOSeg loseg, Trace trace)
|
|||
/* either before the buffer, or after it, never in it */
|
||||
AVER(p < BufferGetInit(buffer) || BufferLimit(buffer) <= p);
|
||||
}
|
||||
i = loIndexOfAddr(base, lo, p);
|
||||
i = PoolIndexOfAddr(base, pool, p);
|
||||
if(!BTGet(loseg->alloc, i)) {
|
||||
/* This grain is free */
|
||||
p = AddrAdd(p, pool->alignment);
|
||||
|
|
@ -343,7 +316,7 @@ static void loSegReclaim(LOSeg loseg, Trace trace)
|
|||
++preservedInPlaceCount;
|
||||
preservedInPlaceSize += AddrOffset(p, q);
|
||||
} else {
|
||||
Index j = loIndexOfAddr(base, lo, q);
|
||||
Index j = PoolIndexOfAddr(base, pool, q);
|
||||
/* This object is not marked, so free it */
|
||||
loSegFree(loseg, i, j);
|
||||
reclaimedGrains += j - i;
|
||||
|
|
@ -356,44 +329,39 @@ static void loSegReclaim(LOSeg loseg, Trace trace)
|
|||
AVER(loseg->oldGrains >= reclaimedGrains);
|
||||
loseg->oldGrains -= reclaimedGrains;
|
||||
loseg->freeGrains += reclaimedGrains;
|
||||
PoolGenAccountForReclaim(lo->pgen, LOGrainsSize(lo, reclaimedGrains), FALSE);
|
||||
PoolGenAccountForReclaim(pgen, PoolGrainsSize(pool, reclaimedGrains), FALSE);
|
||||
|
||||
STATISTIC(trace->reclaimSize += LOGrainsSize(lo, reclaimedGrains));
|
||||
STATISTIC(trace->reclaimSize += PoolGrainsSize(pool, reclaimedGrains));
|
||||
STATISTIC(trace->preservedInPlaceCount += preservedInPlaceCount);
|
||||
GenDescSurvived(lo->pgen->gen, trace, 0, preservedInPlaceSize);
|
||||
GenDescSurvived(pgen->gen, trace, 0, preservedInPlaceSize);
|
||||
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
|
||||
|
||||
if (!marked) {
|
||||
AVER(loseg->bufferedGrains == 0);
|
||||
PoolGenFree(lo->pgen, seg,
|
||||
LOGrainsSize(lo, loseg->freeGrains),
|
||||
LOGrainsSize(lo, loseg->oldGrains),
|
||||
LOGrainsSize(lo, loseg->newGrains),
|
||||
PoolGenFree(pgen, seg,
|
||||
PoolGrainsSize(pool, loseg->freeGrains),
|
||||
PoolGrainsSize(pool, loseg->oldGrains),
|
||||
PoolGrainsSize(pool, loseg->newGrains),
|
||||
FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
/* This walks over _all_ objects in the heap, whether they are */
|
||||
/* black or white, they are still validly formatted as this is */
|
||||
/* a leaf pool, so there can't be any dangling references */
|
||||
static void LOWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
/* Walks over _all_ objects in the segnent: whether they are black or
|
||||
* white, they are still validly formatted as this is a leaf pool, so
|
||||
* there can't be any dangling references.
|
||||
*/
|
||||
static void loSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
{
|
||||
Addr base;
|
||||
LO lo = MustBeA(LOPool, pool);
|
||||
LOSeg loseg = MustBeA(LOSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
Index i, grains;
|
||||
Format format = NULL; /* suppress "may be used uninitialized" warning */
|
||||
Bool b;
|
||||
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Format, format);
|
||||
AVER(FUNCHECK(f));
|
||||
/* p and s are arbitrary closures and can't be checked */
|
||||
|
||||
b = PoolFormat(&format, pool);
|
||||
AVER(b);
|
||||
|
||||
base = SegBase(seg);
|
||||
grains = loSegGrains(loseg);
|
||||
i = 0;
|
||||
|
|
@ -401,7 +369,7 @@ static void LOWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
|||
while(i < grains) {
|
||||
/* object is a slight misnomer because it might point to a */
|
||||
/* free grain */
|
||||
Addr object = loAddrOfIndex(base, lo, i);
|
||||
Addr object = PoolAddrOfIndex(base, pool, i);
|
||||
Addr next;
|
||||
Index j;
|
||||
Buffer buffer;
|
||||
|
|
@ -411,7 +379,7 @@ static void LOWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
|||
BufferScanLimit(buffer) != BufferLimit(buffer)) {
|
||||
/* skip over buffered area */
|
||||
object = BufferLimit(buffer);
|
||||
i = loIndexOfAddr(base, lo, object);
|
||||
i = PoolIndexOfAddr(base, pool, object);
|
||||
continue;
|
||||
}
|
||||
/* since we skip over the buffered area we are always */
|
||||
|
|
@ -426,7 +394,7 @@ static void LOWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
|||
object = AddrAdd(object, format->headerSize);
|
||||
next = (*format->skip)(object);
|
||||
next = AddrSub(next, format->headerSize);
|
||||
j = loIndexOfAddr(base, lo, next);
|
||||
j = PoolIndexOfAddr(base, pool, next);
|
||||
AVER(i < j);
|
||||
(*f)(object, pool->format, pool, p, s);
|
||||
i = j;
|
||||
|
|
@ -484,7 +452,7 @@ static Res LOInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
|
|||
AVER(chain->arena == arena);
|
||||
|
||||
pool->alignment = pool->format->alignment;
|
||||
lo->alignShift = SizeLog2((Size)PoolAlignment(pool));
|
||||
pool->alignShift = SizeLog2(pool->alignment);
|
||||
|
||||
lo->pgen = NULL;
|
||||
|
||||
|
|
@ -524,9 +492,9 @@ static void LOFinish(Inst inst)
|
|||
AVERT(LOSeg, loseg);
|
||||
AVER(loseg->bufferedGrains == 0);
|
||||
PoolGenFree(lo->pgen, seg,
|
||||
LOGrainsSize(lo, loseg->freeGrains),
|
||||
LOGrainsSize(lo, loseg->oldGrains),
|
||||
LOGrainsSize(lo, loseg->newGrains),
|
||||
PoolGrainsSize(pool, loseg->freeGrains),
|
||||
PoolGrainsSize(pool, loseg->oldGrains),
|
||||
PoolGrainsSize(pool, loseg->newGrains),
|
||||
FALSE);
|
||||
}
|
||||
PoolGenFinish(lo->pgen);
|
||||
|
|
@ -561,16 +529,18 @@ static Res LOBufferFill(Addr *baseReturn, Addr *limitReturn,
|
|||
seg = SegOfPoolRing(node);
|
||||
loseg = MustBeA(LOSeg, seg);
|
||||
AVERT(LOSeg, loseg);
|
||||
if(LOGrainsSize(lo, loseg->freeGrains) >= size
|
||||
if(PoolGrainsSize(pool, loseg->freeGrains) >= size
|
||||
&& loSegFindFree(&base, &limit, loseg, size))
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* No segment had enough space, so make a new one. */
|
||||
res = loSegCreate(&loseg, pool, size);
|
||||
if(res != ResOK)
|
||||
res = PoolGenAlloc(&seg, lo->pgen, CLASS(LOSeg),
|
||||
SizeArenaGrains(size, PoolArena(pool)),
|
||||
argsNone);
|
||||
if (res != ResOK)
|
||||
return res;
|
||||
seg = MustBeA(Seg, loseg);
|
||||
loseg = MustBeA(LOSeg, seg);
|
||||
base = SegBase(seg);
|
||||
limit = SegLimit(seg);
|
||||
|
||||
|
|
@ -581,8 +551,8 @@ found:
|
|||
|
||||
segBase = SegBase(seg);
|
||||
/* mark the newly buffered region as allocated */
|
||||
baseIndex = loIndexOfAddr(segBase, lo, base);
|
||||
limitIndex = loIndexOfAddr(segBase, lo, limit);
|
||||
baseIndex = PoolIndexOfAddr(segBase, pool, base);
|
||||
limitIndex = PoolIndexOfAddr(segBase, pool, limit);
|
||||
AVER(BTIsResRange(loseg->alloc, baseIndex, limitIndex));
|
||||
AVER(BTIsSetRange(loseg->mark, baseIndex, limitIndex));
|
||||
BTSetRange(loseg->alloc, baseIndex, limitIndex);
|
||||
|
|
@ -628,8 +598,8 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
|
|||
AVER(init <= SegLimit(seg));
|
||||
|
||||
/* convert base, init, and limit, to quantum positions */
|
||||
initIndex = loIndexOfAddr(segBase, lo, init);
|
||||
limitIndex = loIndexOfAddr(segBase, lo, limit);
|
||||
initIndex = PoolIndexOfAddr(segBase, pool, init);
|
||||
limitIndex = PoolIndexOfAddr(segBase, pool, limit);
|
||||
|
||||
AVER(initIndex <= limitIndex);
|
||||
if (initIndex < limitIndex)
|
||||
|
|
@ -641,17 +611,28 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
|
|||
loseg->freeGrains += unusedGrains;
|
||||
loseg->bufferedGrains = 0;
|
||||
loseg->newGrains += usedGrains;
|
||||
PoolGenAccountForEmpty(lo->pgen, LOGrainsSize(lo, usedGrains),
|
||||
LOGrainsSize(lo, unusedGrains), FALSE);
|
||||
PoolGenAccountForEmpty(lo->pgen, PoolGrainsSize(pool, usedGrains),
|
||||
PoolGrainsSize(pool, unusedGrains), FALSE);
|
||||
}
|
||||
|
||||
|
||||
/* LOWhiten -- whiten a segment */
|
||||
/* loSegPoolGen -- get pool generation for an LO segment */
|
||||
|
||||
static Res LOWhiten(Pool pool, Trace trace, Seg seg)
|
||||
static PoolGen loSegPoolGen(Pool pool, Seg seg)
|
||||
{
|
||||
LO lo = MustBeA(LOPool, pool);
|
||||
AVERT(Seg, seg);
|
||||
return lo->pgen;
|
||||
}
|
||||
|
||||
|
||||
/* loSegWhiten -- whiten a segment */
|
||||
|
||||
static Res loSegWhiten(Seg seg, Trace trace)
|
||||
{
|
||||
LOSeg loseg = MustBeA(LOSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
PoolGen pgen = PoolSegPoolGen(pool, seg);
|
||||
Buffer buffer;
|
||||
Count grains, agedGrains, uncondemnedGrains;
|
||||
|
||||
|
|
@ -663,8 +644,8 @@ static Res LOWhiten(Pool pool, Trace trace, Seg seg)
|
|||
/* Whiten allocated objects; leave free areas black. */
|
||||
if (SegBuffer(&buffer, seg)) {
|
||||
Addr base = SegBase(seg);
|
||||
Index scanLimitIndex = loIndexOfAddr(base, lo, BufferScanLimit(buffer));
|
||||
Index limitIndex = loIndexOfAddr(base, lo, BufferLimit(buffer));
|
||||
Index scanLimitIndex = PoolIndexOfAddr(base, pool, BufferScanLimit(buffer));
|
||||
Index limitIndex = PoolIndexOfAddr(base, pool, BufferLimit(buffer));
|
||||
uncondemnedGrains = limitIndex - scanLimitIndex;
|
||||
if (0 < scanLimitIndex)
|
||||
BTCopyInvertRange(loseg->alloc, loseg->mark, 0, scanLimitIndex);
|
||||
|
|
@ -678,14 +659,15 @@ static Res LOWhiten(Pool pool, Trace trace, Seg seg)
|
|||
/* The unused part of the buffer remains buffered: the rest becomes old. */
|
||||
AVER(loseg->bufferedGrains >= uncondemnedGrains);
|
||||
agedGrains = loseg->bufferedGrains - uncondemnedGrains;
|
||||
PoolGenAccountForAge(lo->pgen, LOGrainsSize(lo, agedGrains),
|
||||
LOGrainsSize(lo, loseg->newGrains), FALSE);
|
||||
PoolGenAccountForAge(pgen, PoolGrainsSize(pool, agedGrains),
|
||||
PoolGrainsSize(pool, loseg->newGrains), FALSE);
|
||||
loseg->oldGrains += agedGrains + loseg->newGrains;
|
||||
loseg->bufferedGrains = uncondemnedGrains;
|
||||
loseg->newGrains = 0;
|
||||
|
||||
if (loseg->oldGrains > 0) {
|
||||
GenDescCondemned(lo->pgen->gen, trace, LOGrainsSize(lo, loseg->oldGrains));
|
||||
GenDescCondemned(pgen->gen, trace,
|
||||
PoolGrainsSize(pool, loseg->oldGrains));
|
||||
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
|
||||
}
|
||||
|
||||
|
|
@ -693,10 +675,10 @@ static Res LOWhiten(Pool pool, Trace trace, Seg seg)
|
|||
}
|
||||
|
||||
|
||||
static Res LOFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
||||
static Res loSegFix(Seg seg, ScanState ss, Ref *refIO)
|
||||
{
|
||||
LO lo = MustBeA_CRITICAL(LOPool, pool);
|
||||
LOSeg loseg = MustBeA_CRITICAL(LOSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
Ref clientRef;
|
||||
Addr base;
|
||||
Index i;
|
||||
|
|
@ -705,8 +687,6 @@ static Res LOFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY);
|
||||
AVER_CRITICAL(refIO != NULL);
|
||||
|
||||
ss->wasMarked = TRUE; /* <design/fix/#protocol.was-marked> */
|
||||
|
||||
clientRef = *refIO;
|
||||
base = AddrSub((Addr)clientRef, pool->format->headerSize);
|
||||
|
||||
|
|
@ -724,7 +704,7 @@ static Res LOFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
return ResOK;
|
||||
}
|
||||
|
||||
i = AddrOffset(SegBase(seg), base) >> lo->alignShift;
|
||||
i = PoolIndexOfAddr(SegBase(seg), pool, base);
|
||||
|
||||
/* Not a real reference if unallocated. */
|
||||
if (!BTGet(loseg->alloc, i)) {
|
||||
|
|
@ -733,7 +713,7 @@ static Res LOFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
}
|
||||
|
||||
if(!BTGet(loseg->mark, i)) {
|
||||
ss->wasMarked = FALSE; /* <design/fix/#protocol.was-marked> */
|
||||
ss->wasMarked = FALSE; /* <design/fix/#was-marked.not> */
|
||||
if(ss->rank == RankWEAK) {
|
||||
*refIO = (Addr)0;
|
||||
} else {
|
||||
|
|
@ -745,18 +725,6 @@ static Res LOFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
|||
}
|
||||
|
||||
|
||||
static void LOReclaim(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
LOSeg loseg = MustBeA(LOSeg, seg);
|
||||
|
||||
AVERT(Trace, trace);
|
||||
AVER(TraceSetIsMember(SegWhite(seg), trace));
|
||||
UNUSED(pool);
|
||||
|
||||
loSegReclaim(loseg, trace);
|
||||
}
|
||||
|
||||
|
||||
/* LOTotalSize -- total memory allocated from the arena */
|
||||
/* TODO: This code is repeated in AMS */
|
||||
|
||||
|
|
@ -782,7 +750,6 @@ static Size LOFreeSize(Pool pool)
|
|||
DEFINE_CLASS(Pool, LOPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, LOPool, AbstractSegBufPool);
|
||||
PoolClassMixInFormat(klass);
|
||||
PoolClassMixInCollect(klass);
|
||||
klass->instClassStruct.finish = LOFinish;
|
||||
klass->size = sizeof(LOStruct);
|
||||
|
|
@ -790,13 +757,10 @@ DEFINE_CLASS(Pool, LOPool, klass)
|
|||
klass->init = LOInit;
|
||||
klass->bufferFill = LOBufferFill;
|
||||
klass->bufferEmpty = LOBufferEmpty;
|
||||
klass->whiten = LOWhiten;
|
||||
klass->fix = LOFix;
|
||||
klass->fixEmergency = LOFix;
|
||||
klass->reclaim = LOReclaim;
|
||||
klass->walk = LOWalk;
|
||||
klass->segPoolGen = loSegPoolGen;
|
||||
klass->totalSize = LOTotalSize;
|
||||
klass->freeSize = LOFreeSize;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -817,8 +781,6 @@ static Bool LOCheck(LO lo)
|
|||
CHECKC(LOPool, lo);
|
||||
CHECKD(Pool, &lo->poolStruct);
|
||||
CHECKC(LOPool, lo);
|
||||
CHECKL(ShiftCheck(lo->alignShift));
|
||||
CHECKL(LOGrainsSize(lo, (Count)1) == PoolAlignment(MustBeA(AbstractPool, lo)));
|
||||
if (lo->pgen != NULL) {
|
||||
CHECKL(lo->pgen == &lo->pgenStruct);
|
||||
CHECKD(PoolGen, lo->pgen);
|
||||
|
|
@ -829,7 +791,7 @@ static Bool LOCheck(LO lo)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -349,6 +349,7 @@ DEFINE_CLASS(Pool, MFSPool, klass)
|
|||
klass->free = MFSFree;
|
||||
klass->totalSize = MFSTotalSize;
|
||||
klass->freeSize = MFSFreeSize;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -167,6 +167,7 @@ typedef struct MRGRefSegStruct {
|
|||
|
||||
DECLARE_CLASS(Seg, MRGLinkSeg, Seg);
|
||||
DECLARE_CLASS(Seg, MRGRefSeg, GCSeg);
|
||||
static Res mrgRefSegScan(Bool *totalReturn, Seg seg, ScanState ss);
|
||||
|
||||
|
||||
/* MRGLinkSegCheck -- check a link segment
|
||||
|
|
@ -233,6 +234,20 @@ static Res MRGLinkSegInit(Seg seg, Pool pool, Addr base, Size size,
|
|||
}
|
||||
|
||||
|
||||
/* MRGLinkSegFinish -- finish a link segment */
|
||||
|
||||
static void mrgLinkSegFinish(Inst inst)
|
||||
{
|
||||
Seg seg = MustBeA(Seg, inst);
|
||||
MRGLinkSeg linkseg = MustBeA(MRGLinkSeg, seg);
|
||||
|
||||
linkseg->sig = SigInvalid;
|
||||
|
||||
/* finish the superclass fields last */
|
||||
NextMethod(Inst, MRGLinkSeg, finish)(inst);
|
||||
}
|
||||
|
||||
|
||||
/* MRGRefSegInit -- initialise a ref segment */
|
||||
|
||||
ARG_DEFINE_KEY(mrg_seg_link_seg, Pointer);
|
||||
|
|
@ -281,14 +296,30 @@ static Res MRGRefSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
|
|||
}
|
||||
|
||||
|
||||
/* MRGRefSegFinish -- finish a ref segment */
|
||||
|
||||
static void mrgRefSegFinish(Inst inst)
|
||||
{
|
||||
Seg seg = MustBeA(Seg, inst);
|
||||
MRGRefSeg refseg = MustBeA(MRGRefSeg, seg);
|
||||
|
||||
refseg->sig = SigInvalid;
|
||||
|
||||
/* finish the superclass fields last */
|
||||
NextMethod(Inst, MRGRefSeg, finish)(inst);
|
||||
}
|
||||
|
||||
|
||||
/* MRGLinkSegClass -- Class definition */
|
||||
|
||||
DEFINE_CLASS(Seg, MRGLinkSeg, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, MRGLinkSeg, Seg);
|
||||
SegClassMixInNoSplitMerge(klass); /* no support for this */
|
||||
klass->instClassStruct.finish = mrgLinkSegFinish;
|
||||
klass->size = sizeof(MRGLinkSegStruct);
|
||||
klass->init = MRGLinkSegInit;
|
||||
AVERT(SegClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -298,8 +329,11 @@ DEFINE_CLASS(Seg, MRGRefSeg, klass)
|
|||
{
|
||||
INHERIT_CLASS(klass, MRGRefSeg, GCSeg);
|
||||
SegClassMixInNoSplitMerge(klass); /* no support for this */
|
||||
klass->instClassStruct.finish = mrgRefSegFinish;
|
||||
klass->size = sizeof(MRGRefSegStruct);
|
||||
klass->init = MRGRefSegInit;
|
||||
klass->scan = mrgRefSegScan;
|
||||
AVERT(SegClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -470,7 +504,6 @@ static void MRGSegPairDestroy(MRGRefSeg refseg)
|
|||
{
|
||||
RingRemove(&refseg->mrgRing);
|
||||
RingFinish(&refseg->mrgRing);
|
||||
refseg->sig = SigInvalid;
|
||||
SegFree(MustBeA(Seg, refseg->linkSeg));
|
||||
SegFree(MustBeA(Seg, refseg));
|
||||
}
|
||||
|
|
@ -558,21 +591,22 @@ static void MRGFinalize(Arena arena, MRGLinkSeg linkseg, Index indx)
|
|||
}
|
||||
|
||||
|
||||
static Res MRGRefSegScan(ScanState ss, MRGRefSeg refseg, MRG mrg)
|
||||
static Res mrgRefSegScan(Bool *totalReturn, Seg seg, ScanState ss)
|
||||
{
|
||||
MRGRefSeg refseg = MustBeA(MRGRefSeg, seg);
|
||||
Pool pool = SegPool(seg);
|
||||
MRG mrg = MustBeA(MRGPool, pool);
|
||||
|
||||
Res res;
|
||||
Arena arena;
|
||||
MRGLinkSeg linkseg;
|
||||
|
||||
RefPart refPart;
|
||||
Index i;
|
||||
Count nGuardians;
|
||||
|
||||
AVERT(ScanState, ss);
|
||||
AVERT(MRGRefSeg, refseg);
|
||||
AVERT(MRG, mrg);
|
||||
|
||||
arena = PoolArena(MustBeA(AbstractPool, mrg));
|
||||
arena = PoolArena(pool);
|
||||
linkseg = refseg->linkSeg;
|
||||
|
||||
nGuardians = MRGGuardiansPerSeg(mrg);
|
||||
|
|
@ -588,8 +622,10 @@ static Res MRGRefSegScan(ScanState ss, MRGRefSeg refseg, MRG mrg)
|
|||
/* because we are in a scan and the shield is exposed. */
|
||||
if (TRACE_FIX1(ss, refPart->ref)) {
|
||||
res = TRACE_FIX2(ss, &(refPart->ref));
|
||||
if (res != ResOK)
|
||||
if (res != ResOK) {
|
||||
*totalReturn = FALSE;
|
||||
return res;
|
||||
}
|
||||
|
||||
if (ss->rank == RankFINAL && !ss->wasMarked) { /* .improve.rank */
|
||||
MRGFinalize(arena, linkseg, i);
|
||||
|
|
@ -600,6 +636,7 @@ static Res MRGRefSegScan(ScanState ss, MRGRefSeg refseg, MRG mrg)
|
|||
}
|
||||
} TRACE_SCAN_END(ss);
|
||||
|
||||
*totalReturn = TRUE;
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
|
|
@ -821,27 +858,6 @@ static Res MRGDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
|
|||
}
|
||||
|
||||
|
||||
static Res MRGScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
||||
{
|
||||
MRG mrg = MustBeA(MRGPool, pool);
|
||||
MRGRefSeg refseg = MustBeA(MRGRefSeg, seg);
|
||||
Res res;
|
||||
|
||||
AVERT(ScanState, ss);
|
||||
AVER(SegRankSet(seg) == RankSetSingle(RankFINAL)); /* .improve.rank */
|
||||
AVER(TraceSetInter(SegGrey(seg), ss->traces) != TraceSetEMPTY);
|
||||
|
||||
res = MRGRefSegScan(ss, refseg, mrg);
|
||||
if (res != ResOK) {
|
||||
*totalReturn = FALSE;
|
||||
return res;
|
||||
}
|
||||
|
||||
*totalReturn = TRUE;
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
|
||||
DEFINE_CLASS(Pool, MRGPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, MRGPool, AbstractPool);
|
||||
|
|
@ -849,9 +865,7 @@ DEFINE_CLASS(Pool, MRGPool, klass)
|
|||
klass->instClassStruct.finish = MRGFinish;
|
||||
klass->size = sizeof(MRGStruct);
|
||||
klass->init = MRGInit;
|
||||
klass->grey = PoolTrivGrey;
|
||||
klass->blacken = PoolTrivBlacken;
|
||||
klass->scan = MRGScan;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -260,6 +260,7 @@ static Res MVInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
|
|||
mv = CouldBeA(MVPool, pool);
|
||||
|
||||
pool->alignment = align;
|
||||
pool->alignShift = SizeLog2(pool->alignment);
|
||||
|
||||
/* At 100% fragmentation we will need one block descriptor for every other */
|
||||
/* allocated block, or (extendBy/avgSize)/2 descriptors. See note 1. */
|
||||
|
|
@ -873,6 +874,7 @@ DEFINE_CLASS(Pool, MVPool, klass)
|
|||
klass->free = MVFree;
|
||||
klass->totalSize = MVTotalSize;
|
||||
klass->freeSize = MVFreeSize;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -891,6 +893,7 @@ DEFINE_CLASS(Pool, MVDebugPool, klass)
|
|||
klass->size = sizeof(MVDebugStruct);
|
||||
klass->varargs = MVDebugVarargs;
|
||||
klass->debugMixin = MVDebugMixin;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* poolmv2.c: MANUAL VARIABLE-SIZED TEMPORAL POOL
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* .purpose: A manual-variable pool designed to take advantage of
|
||||
* placement according to predicted deathtime.
|
||||
|
|
@ -149,6 +149,7 @@ DEFINE_CLASS(Pool, MVTPool, klass)
|
|||
klass->bufferEmpty = MVTBufferEmpty;
|
||||
klass->totalSize = MVTTotalSize;
|
||||
klass->freeSize = MVTFreeSize;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
/* Macros */
|
||||
|
|
@ -306,6 +307,7 @@ static Res MVTInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
|
|||
goto failABQInit;
|
||||
|
||||
pool->alignment = align;
|
||||
pool->alignShift = SizeLog2(pool->alignment);
|
||||
mvt->reuseSize = reuseSize;
|
||||
mvt->fillSize = fillSize;
|
||||
mvt->abqOverflow = FALSE;
|
||||
|
|
@ -1030,7 +1032,7 @@ static Res MVTDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
|
|||
if (!TESTC(MVTPool, mvt))
|
||||
return ResPARAM;
|
||||
if (stream == NULL)
|
||||
return ResFAIL;
|
||||
return ResPARAM;
|
||||
|
||||
res = NextMethod(Inst, MVTPool, describe)(inst, stream, depth);
|
||||
if (res != ResOK)
|
||||
|
|
@ -1353,7 +1355,7 @@ static Bool MVTCheckFit(Addr base, Addr limit, Size min, Arena arena)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -519,6 +519,7 @@ static Res MVFFInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
|
|||
mvff->extendBy = ArenaGrainSize(arena);
|
||||
mvff->avgSize = avgSize;
|
||||
pool->alignment = align;
|
||||
pool->alignShift = SizeLog2(pool->alignment);
|
||||
mvff->slotHigh = slotHigh;
|
||||
mvff->firstFit = firstFit;
|
||||
mvff->spare = spare;
|
||||
|
|
@ -741,6 +742,7 @@ DEFINE_CLASS(Pool, MVFFPool, klass)
|
|||
klass->bufferEmpty = MVFFBufferEmpty;
|
||||
klass->totalSize = MVFFTotalSize;
|
||||
klass->freeSize = MVFFFreeSize;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -759,6 +761,7 @@ DEFINE_CLASS(Pool, MVFFDebugPool, klass)
|
|||
klass->size = sizeof(MVFFDebugStruct);
|
||||
klass->varargs = MVFFDebugVarargs;
|
||||
klass->debugMixin = MVFFDebugMixin;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -156,90 +156,6 @@ static Res NDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
|
|||
}
|
||||
|
||||
|
||||
/* NWhiten -- condemn method for class N */
|
||||
|
||||
static Res NWhiten(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
PoolN poolN = MustBeA(NPool, pool);
|
||||
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
UNUSED(poolN);
|
||||
|
||||
NOTREACHED; /* pool doesn't have any actions */
|
||||
|
||||
return ResUNIMPL;
|
||||
}
|
||||
|
||||
|
||||
/* NGrey -- greyen method for class N */
|
||||
|
||||
static void NGrey(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
PoolN poolN = MustBeA(NPool, pool);
|
||||
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
UNUSED(poolN);
|
||||
}
|
||||
|
||||
|
||||
/* NBlacken -- blacken method for class N */
|
||||
|
||||
static void NBlacken(Pool pool, TraceSet traceSet, Seg seg)
|
||||
{
|
||||
PoolN poolN = MustBeA(NPool, pool);
|
||||
|
||||
AVERT(TraceSet, traceSet);
|
||||
AVERT(Seg, seg);
|
||||
UNUSED(poolN);
|
||||
}
|
||||
|
||||
|
||||
/* NScan -- scan method for class N */
|
||||
|
||||
static Res NScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
||||
{
|
||||
PoolN poolN = MustBeA(NPool, pool);
|
||||
|
||||
AVER(totalReturn != NULL);
|
||||
AVERT(ScanState, ss);
|
||||
AVERT(Seg, seg);
|
||||
UNUSED(poolN);
|
||||
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
|
||||
/* NFix -- fix method for class N */
|
||||
|
||||
static Res NFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
||||
{
|
||||
PoolN poolN = MustBeA(NPool, pool);
|
||||
|
||||
AVERT(ScanState, ss);
|
||||
UNUSED(refIO);
|
||||
AVERT(Seg, seg);
|
||||
UNUSED(poolN);
|
||||
NOTREACHED; /* Since we don't allocate any objects, should never */
|
||||
/* be called upon to fix a reference. */
|
||||
return ResFAIL;
|
||||
}
|
||||
|
||||
|
||||
/* NReclaim -- reclaim method for class N */
|
||||
|
||||
static void NReclaim(Pool pool, Trace trace, Seg seg)
|
||||
{
|
||||
PoolN poolN = MustBeA(NPool, pool);
|
||||
|
||||
AVERT(Trace, trace);
|
||||
AVERT(Seg, seg);
|
||||
UNUSED(poolN);
|
||||
/* all unmarked and white objects reclaimed */
|
||||
}
|
||||
|
||||
|
||||
/* NPoolClass -- pool class definition for N */
|
||||
|
||||
DEFINE_CLASS(Pool, NPool, klass)
|
||||
|
|
@ -254,13 +170,6 @@ DEFINE_CLASS(Pool, NPool, klass)
|
|||
klass->free = NFree;
|
||||
klass->bufferFill = NBufferFill;
|
||||
klass->bufferEmpty = NBufferEmpty;
|
||||
klass->whiten = NWhiten;
|
||||
klass->grey = NGrey;
|
||||
klass->blacken = NBlacken;
|
||||
klass->scan = NScan;
|
||||
klass->fix = NFix;
|
||||
klass->fixEmergency = NFix;
|
||||
klass->reclaim = NReclaim;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,12 +45,15 @@ typedef struct SNCStruct {
|
|||
|
||||
typedef SNC SNCPool;
|
||||
#define SNCPoolCheck SNCCheck
|
||||
DECLARE_CLASS(Pool, SNCPool, AbstractScanPool);
|
||||
DECLARE_CLASS(Pool, SNCPool, AbstractSegBufPool);
|
||||
|
||||
DECLARE_CLASS(Seg, SNCSeg, GCSeg);
|
||||
DECLARE_CLASS(Buffer, SNCBuf, RankBuf);
|
||||
static Bool SNCCheck(SNC snc);
|
||||
static void sncPopPartialSegChain(SNC snc, Buffer buf, Seg upTo);
|
||||
static Res sncSegScan(Bool *totalReturn, Seg seg, ScanState ss);
|
||||
static void sncSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s);
|
||||
|
||||
|
||||
/* Management of segment chains
|
||||
|
|
@ -161,6 +164,7 @@ DEFINE_CLASS(Buffer, SNCBuf, klass)
|
|||
klass->instClassStruct.finish = SNCBufFinish;
|
||||
klass->size = sizeof(SNCBufStruct);
|
||||
klass->init = SNCBufInit;
|
||||
AVERT(BufferClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -225,14 +229,32 @@ static Res sncSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
|
|||
}
|
||||
|
||||
|
||||
/* sncSegFinish -- finish an SNC segment */
|
||||
|
||||
static void sncSegFinish(Inst inst)
|
||||
{
|
||||
Seg seg = MustBeA(Seg, inst);
|
||||
SNCSeg sncseg = MustBeA(SNCSeg, seg);
|
||||
|
||||
sncseg->sig = SigInvalid;
|
||||
|
||||
/* finish the superclass fields last */
|
||||
NextMethod(Inst, SNCSeg, finish)(inst);
|
||||
}
|
||||
|
||||
|
||||
/* SNCSegClass -- Class definition for SNC segments */
|
||||
|
||||
DEFINE_CLASS(Seg, SNCSeg, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, SNCSeg, GCSeg);
|
||||
SegClassMixInNoSplitMerge(klass); /* no support for this (yet) */
|
||||
klass->instClassStruct.finish = sncSegFinish;
|
||||
klass->size = sizeof(SNCSegStruct);
|
||||
klass->init = sncSegInit;
|
||||
klass->scan = sncSegScan;
|
||||
klass->walk = sncSegWalk;
|
||||
AVERT(SegClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -367,6 +389,7 @@ static Res SNCInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
|
|||
AVER(pool->format != NULL);
|
||||
|
||||
pool->alignment = pool->format->alignment;
|
||||
pool->alignShift = SizeLog2(pool->alignment);
|
||||
snc->freeSegs = NULL;
|
||||
|
||||
SetClassOfPoly(pool, CLASS(SNCPool));
|
||||
|
|
@ -480,24 +503,20 @@ static void SNCBufferEmpty(Pool pool, Buffer buffer,
|
|||
}
|
||||
|
||||
|
||||
static Res SNCScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
||||
static Res sncSegScan(Bool *totalReturn, Seg seg, ScanState ss)
|
||||
{
|
||||
Addr base, limit;
|
||||
Format format;
|
||||
SNC snc;
|
||||
Res res;
|
||||
|
||||
AVER(totalReturn != NULL);
|
||||
AVERT(ScanState, ss);
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Pool, pool);
|
||||
snc = PoolSNC(pool);
|
||||
AVERT(SNC, snc);
|
||||
|
||||
format = pool->format;
|
||||
format = SegPool(seg)->format;
|
||||
base = SegBase(seg);
|
||||
limit = SegBufferScanLimit(seg);
|
||||
|
||||
|
||||
if (base < limit) {
|
||||
res = FormatScan(format, ss, base, limit);
|
||||
if (res != ResOK) {
|
||||
|
|
@ -588,11 +607,11 @@ static Res SNCFramePop(Pool pool, Buffer buf, AllocFrame frame)
|
|||
}
|
||||
|
||||
|
||||
static void SNCWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
static void sncSegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
{
|
||||
AVERT(Pool, pool);
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Format, format);
|
||||
AVER(FUNCHECK(f));
|
||||
/* p and s are arbitrary closures and can't be checked */
|
||||
|
||||
|
|
@ -602,12 +621,8 @@ static void SNCWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
|||
Addr object = SegBase(seg);
|
||||
Addr nextObject;
|
||||
Addr limit;
|
||||
SNC snc;
|
||||
Format format;
|
||||
Pool pool = SegPool(seg);
|
||||
|
||||
snc = PoolSNC(pool);
|
||||
AVERT(SNC, snc);
|
||||
format = pool->format;
|
||||
limit = SegBufferScanLimit(seg);
|
||||
|
||||
while(object < limit) {
|
||||
|
|
@ -671,21 +686,19 @@ static Size SNCFreeSize(Pool pool)
|
|||
|
||||
DEFINE_CLASS(Pool, SNCPool, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, SNCPool, AbstractScanPool);
|
||||
PoolClassMixInFormat(klass);
|
||||
INHERIT_CLASS(klass, SNCPool, AbstractSegBufPool);
|
||||
klass->instClassStruct.finish = SNCFinish;
|
||||
klass->size = sizeof(SNCStruct);
|
||||
klass->varargs = SNCVarargs;
|
||||
klass->init = SNCInit;
|
||||
klass->bufferFill = SNCBufferFill;
|
||||
klass->bufferEmpty = SNCBufferEmpty;
|
||||
klass->scan = SNCScan;
|
||||
klass->framePush = SNCFramePush;
|
||||
klass->framePop = SNCFramePop;
|
||||
klass->walk = SNCWalk;
|
||||
klass->bufferClass = SNCBufClassGet;
|
||||
klass->totalSize = SNCTotalSize;
|
||||
klass->freeSize = SNCFreeSize;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* prmcix.c: MUTATOR CONTEXT (POSIX)
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2016-2017 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2016-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* .purpose: Implement the mutator context module. See <design/prmc/>.
|
||||
*
|
||||
|
|
@ -12,14 +12,16 @@
|
|||
* the context at pointer-aligned boundaries.
|
||||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI)
|
||||
#error "prmcix.c is specific to MPS_OS_FR or MPS_OS_LI"
|
||||
#endif
|
||||
|
||||
#include "prmcix.h"
|
||||
|
||||
SRCID(prmcix, "$Id$");
|
||||
|
||||
#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI)
|
||||
#error "prmcix.c is specific to MPS_OS_FR and MPS_OS_LI"
|
||||
#endif
|
||||
|
||||
|
||||
Bool MutatorContextCheck(MutatorContext context)
|
||||
{
|
||||
|
|
@ -82,7 +84,7 @@ Res MutatorContextScan(ScanState ss, MutatorContext context,
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2016-2017 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2016-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -35,15 +35,15 @@
|
|||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
#include "vm.h"
|
||||
|
||||
#if !defined(MPS_OS_LI) && !defined(MPS_OS_FR) && !defined(MPS_OS_XC)
|
||||
#error "protix.c is Unix-specific, currently for MPS_OS_LI FR XC"
|
||||
#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) && !defined(MPS_OS_XC)
|
||||
#error "protix.c is specific to MPS_OS_FR, MPS_OS_LI or MPS_OS_XC"
|
||||
#endif
|
||||
|
||||
#include "vm.h"
|
||||
|
||||
#include <limits.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ DEFINE_CLASS(Inst, Inst, klass)
|
|||
{
|
||||
InstClassInitInternal(klass);
|
||||
klass->instStruct.klass = CLASS(InstClass);
|
||||
AVERT(InstClass, klass);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Inst, InstClass, klass)
|
||||
|
|
@ -34,6 +35,7 @@ DEFINE_CLASS(Inst, InstClass, klass)
|
|||
klass->name = "InstClass";
|
||||
klass->level = ClassLevelInstClass;
|
||||
klass->display[ClassLevelInstClass] = CLASS_ID(InstClass);
|
||||
AVERT(InstClass, klass);
|
||||
}
|
||||
|
||||
static void InstClassInitInternal(InstClass klass)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* protocol.h: PROTOCOL INHERITANCE DEFINITIONS
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* See design.mps.protocol.
|
||||
*/
|
||||
|
|
@ -292,7 +292,7 @@ extern void ClassRegister(InstClass klass);
|
|||
*
|
||||
* This should only be used when specialising an instance to be a
|
||||
* member of a subclass, once the instance has been initialized. See
|
||||
* design.mps.protocol.if.set-class.
|
||||
* design.mps.protocol.if.set-class-of-poly.
|
||||
*/
|
||||
|
||||
#define SetClassOfPoly(inst, _class) \
|
||||
|
|
@ -332,7 +332,7 @@ extern void ClassRegister(InstClass klass);
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* protsgix.c: PROTECTION (SIGNAL HANDLER) FOR POSIX
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* This implements protection exception handling using POSIX signals.
|
||||
* It is designed to run on any POSIX-compliant Unix.
|
||||
|
|
@ -23,7 +23,7 @@
|
|||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI)
|
||||
#error "protsgix.c is Unix-specific, currently for MPS_OS_FR and MPS_OS_LI"
|
||||
#error "protsgix.c is specific to MPS_OS_FR or MPS_OS_LI"
|
||||
#endif
|
||||
|
||||
#include "prmcix.h"
|
||||
|
|
@ -144,7 +144,7 @@ void ProtSetup(void)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,17 +1,17 @@
|
|||
/* protw3.c: PROTECTION FOR WIN32
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*/
|
||||
|
||||
#include "prmcw3.h"
|
||||
#include "vm.h" /* PageSize */
|
||||
|
||||
#ifndef MPS_OS_W3
|
||||
#error "protw3.c is Win32-specific, but MPS_OS_W3 is not set"
|
||||
#if !defined(MPS_OS_W3)
|
||||
#error "protw3.c is specific to MPS_OS_W3"
|
||||
#endif
|
||||
|
||||
#include "mpswin.h"
|
||||
#include "vm.h" /* PageSize */
|
||||
|
||||
SRCID(protw3, "$Id$");
|
||||
|
||||
|
|
@ -138,7 +138,7 @@ void ProtSync(Arena arena)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -56,14 +56,14 @@
|
|||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_XC)
|
||||
#error "protxc.c is specific to MPS_OS_XC"
|
||||
#endif
|
||||
|
||||
#include "prmcxc.h"
|
||||
#include "protxc.h"
|
||||
|
||||
#include <stdlib.h> /* see .trans.stdlib */
|
||||
#include <stdio.h> /* see .trans.stdlib */
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
#include <mach/mach_port.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/task.h>
|
||||
|
|
@ -72,10 +72,9 @@
|
|||
#include <mach/mach_error.h>
|
||||
#include <mach/i386/thread_status.h>
|
||||
#include <mach/exc.h>
|
||||
|
||||
#if !defined(MPS_OS_XC)
|
||||
#error "protxc.c is macOS specific"
|
||||
#endif
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h> /* see .trans.stdlib */
|
||||
#include <stdio.h> /* see .trans.stdlib */
|
||||
|
||||
SRCID(protxc, "$Id$");
|
||||
|
||||
|
|
@ -377,15 +376,9 @@ static void protExcThreadStart(void)
|
|||
}
|
||||
|
||||
|
||||
/* atfork handlers -- support for fork(). See <design/thread-safety/> */
|
||||
|
||||
static void protAtForkPrepare(void)
|
||||
{
|
||||
}
|
||||
|
||||
static void protAtForkParent(void)
|
||||
{
|
||||
}
|
||||
/* protAtForkChild -- support for fork()
|
||||
* <design/thread-safety/#sol.fork.exc-thread>
|
||||
*/
|
||||
|
||||
static void protAtForkChild(void)
|
||||
{
|
||||
|
|
@ -402,7 +395,7 @@ static void protSetupInner(void)
|
|||
protExcThreadStart();
|
||||
|
||||
/* Install fork handlers <design/thread-safety/#sol.fork.atfork>. */
|
||||
pthread_atfork(protAtForkPrepare, protAtForkParent, protAtForkChild);
|
||||
pthread_atfork(NULL, NULL, protAtForkChild);
|
||||
}
|
||||
|
||||
void ProtSetup(void)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* pthreadext.c: POSIX THREAD EXTENSIONS
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* .purpose: Provides extension to Pthreads.
|
||||
*
|
||||
|
|
@ -12,19 +12,22 @@
|
|||
* (<David.Butenhof@compaq.com>, <rlau@csc.com>).
|
||||
*/
|
||||
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#include <signal.h> /* see .feature.li in config.h */
|
||||
#include <semaphore.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI)
|
||||
#error "protsgix.c is specific to MPS_OS_FR or MPS_OS_LI"
|
||||
#endif
|
||||
|
||||
#include "pthrdext.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#include <semaphore.h>
|
||||
#include <signal.h> /* see .feature.li in config.h */
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
SRCID(pthreadext, "$Id$");
|
||||
|
||||
|
||||
|
|
@ -353,7 +356,7 @@ unlock:
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* qs.c: QUICKSORT
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* The purpose of this program is to act as a "real" client of the MM.
|
||||
* It is a test, but (hopefully) less contrived than some of the other
|
||||
|
|
@ -415,7 +415,7 @@ static mps_res_t scan1(mps_ss_t ss, mps_addr_t *objectIO)
|
|||
if(res != MPS_RES_OK)
|
||||
return res;
|
||||
cell->value = addr;
|
||||
/* fall */
|
||||
/* fall through */
|
||||
|
||||
case QSInt:
|
||||
fixTail:
|
||||
|
|
@ -538,7 +538,7 @@ int main(int argc, char *argv[])
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
425
mps/code/seg.c
425
mps/code/seg.c
|
|
@ -686,6 +686,142 @@ failControl:
|
|||
}
|
||||
|
||||
|
||||
/* SegAccess -- mutator read/write access to a segment */
|
||||
|
||||
Res SegAccess(Seg seg, Arena arena, Addr addr,
|
||||
AccessSet mode, MutatorContext context)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Arena, arena);
|
||||
AVER(arena == PoolArena(SegPool(seg)));
|
||||
AVER(SegBase(seg) <= addr);
|
||||
AVER(addr < SegLimit(seg));
|
||||
AVERT(AccessSet, mode);
|
||||
AVERT(MutatorContext, context);
|
||||
|
||||
return Method(Seg, seg, access)(seg, arena, addr, mode, context);
|
||||
}
|
||||
|
||||
|
||||
/* SegWhiten -- whiten objects */
|
||||
|
||||
Res SegWhiten(Seg seg, Trace trace)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Trace, trace);
|
||||
AVER(PoolArena(SegPool(seg)) == trace->arena);
|
||||
return Method(Seg, seg, whiten)(seg, trace);
|
||||
}
|
||||
|
||||
|
||||
/* SegGreyen -- greyen non-white objects */
|
||||
|
||||
void SegGreyen(Seg seg, Trace trace)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Trace, trace);
|
||||
AVER(PoolArena(SegPool(seg)) == trace->arena);
|
||||
Method(Seg, seg, greyen)(seg, trace);
|
||||
}
|
||||
|
||||
|
||||
/* SegBlacken -- blacken grey objects without scanning */
|
||||
|
||||
void SegBlacken(Seg seg, TraceSet traceSet)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(TraceSet, traceSet);
|
||||
Method(Seg, seg, blacken)(seg, traceSet);
|
||||
}
|
||||
|
||||
|
||||
/* SegScan -- scan a segment */
|
||||
|
||||
Res SegScan(Bool *totalReturn, Seg seg, ScanState ss)
|
||||
{
|
||||
AVER(totalReturn != NULL);
|
||||
AVERT(Seg, seg);
|
||||
AVERT(ScanState, ss);
|
||||
AVER(PoolArena(SegPool(seg)) == ss->arena);
|
||||
|
||||
/* We check that either ss->rank is in the segment's
|
||||
* ranks, or that ss->rank is exact. The check is more complicated if
|
||||
* we actually have multiple ranks in a seg.
|
||||
* See <code/trace.c#scan.conservative> */
|
||||
AVER(ss->rank == RankEXACT || RankSetIsMember(SegRankSet(seg), ss->rank));
|
||||
|
||||
/* Should only scan segments which contain grey objects. */
|
||||
AVER(TraceSetInter(SegGrey(seg), ss->traces) != TraceSetEMPTY);
|
||||
|
||||
return Method(Seg, seg, scan)(totalReturn, seg, ss);
|
||||
}
|
||||
|
||||
|
||||
/* SegFix* -- fix a reference to an object in this segment
|
||||
*
|
||||
* See <design/pool/#req.fix>.
|
||||
*/
|
||||
|
||||
Res SegFix(Seg seg, ScanState ss, Addr *refIO)
|
||||
{
|
||||
AVERT_CRITICAL(Seg, seg);
|
||||
AVERT_CRITICAL(ScanState, ss);
|
||||
AVER_CRITICAL(refIO != NULL);
|
||||
|
||||
/* Should only be fixing references to white segments. */
|
||||
AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY);
|
||||
|
||||
return Method(Seg, seg, fix)(seg, ss, refIO);
|
||||
}
|
||||
|
||||
Res SegFixEmergency(Seg seg, ScanState ss, Addr *refIO)
|
||||
{
|
||||
Res res;
|
||||
|
||||
AVERT_CRITICAL(Seg, seg);
|
||||
AVERT_CRITICAL(ScanState, ss);
|
||||
AVER_CRITICAL(refIO != NULL);
|
||||
|
||||
/* Should only be fixing references to white segments. */
|
||||
AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY);
|
||||
|
||||
res = Method(Seg, seg, fixEmergency)(seg, ss, refIO);
|
||||
AVER_CRITICAL(res == ResOK);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/* SegReclaim -- reclaim a segment */
|
||||
|
||||
void SegReclaim(Seg seg, Trace trace)
|
||||
{
|
||||
AVERT_CRITICAL(Seg, seg);
|
||||
AVERT_CRITICAL(Trace, trace);
|
||||
AVER_CRITICAL(PoolArena(SegPool(seg)) == trace->arena);
|
||||
|
||||
/* There shouldn't be any grey things left for this trace. */
|
||||
AVER_CRITICAL(!TraceSetIsMember(SegGrey(seg), trace));
|
||||
/* Should only be reclaiming segments which are still white. */
|
||||
AVER_CRITICAL(TraceSetIsMember(SegWhite(seg), trace));
|
||||
|
||||
Method(Seg, seg, reclaim)(seg, trace);
|
||||
}
|
||||
|
||||
|
||||
/* SegWalk -- walk objects in this segment */
|
||||
|
||||
void SegWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Format, format);
|
||||
AVER(FUNCHECK(f));
|
||||
/* p and s are arbitrary values, hence can't be checked. */
|
||||
|
||||
Method(Seg, seg, walk)(seg, format, f, p, s);
|
||||
}
|
||||
|
||||
|
||||
/* Class Seg -- The most basic segment class
|
||||
*
|
||||
* .seg.method.check: Many seg methods are lightweight and used
|
||||
|
|
@ -1028,6 +1164,211 @@ static Res segTrivSplit(Seg seg, Seg segHi,
|
|||
}
|
||||
|
||||
|
||||
/* segNoAccess -- access method for non-GC segs
|
||||
*
|
||||
* Should be used (for the access method) by segment classes which do
|
||||
* not expect to ever have pages which the mutator will fault on. That
|
||||
* is, no protected pages, or only pages which are inaccessible by the
|
||||
* mutator are protected.
|
||||
*/
|
||||
static Res segNoAccess(Seg seg, Arena arena, Addr addr,
|
||||
AccessSet mode, MutatorContext context)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Arena, arena);
|
||||
AVER(SegBase(seg) <= addr);
|
||||
AVER(addr < SegLimit(seg));
|
||||
AVERT(AccessSet, mode);
|
||||
AVERT(MutatorContext, context);
|
||||
UNUSED(mode);
|
||||
UNUSED(context);
|
||||
|
||||
NOTREACHED;
|
||||
return ResUNIMPL;
|
||||
}
|
||||
|
||||
|
||||
/* SegWholeAccess
|
||||
*
|
||||
* See also SegSingleAccess
|
||||
*
|
||||
* Should be used (for the access method) by segment classes which
|
||||
* intend to handle page faults by scanning the entire segment and
|
||||
* lowering the barrier.
|
||||
*/
|
||||
Res SegWholeAccess(Seg seg, Arena arena, Addr addr,
|
||||
AccessSet mode, MutatorContext context)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Arena, arena);
|
||||
AVER(arena == PoolArena(SegPool(seg)));
|
||||
AVER(SegBase(seg) <= addr);
|
||||
AVER(addr < SegLimit(seg));
|
||||
AVERT(AccessSet, mode);
|
||||
AVERT(MutatorContext, context);
|
||||
|
||||
UNUSED(addr);
|
||||
UNUSED(context);
|
||||
TraceSegAccess(arena, seg, mode);
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
|
||||
/* SegSingleAccess
|
||||
*
|
||||
* See also ArenaRead, and SegWhileAccess.
|
||||
*
|
||||
* Handles page faults by attempting emulation. If the faulting
|
||||
* instruction cannot be emulated then this function returns ResFAIL.
|
||||
*
|
||||
* Due to the assumptions made below, segment classes should only use
|
||||
* this function if all words in an object are tagged or traceable.
|
||||
*
|
||||
* .single-access.assume.ref: It currently assumes that the address
|
||||
* being faulted on contains a plain reference or a tagged
|
||||
* non-reference.
|
||||
*
|
||||
* .single-access.improve.format: Later this will be abstracted
|
||||
* through the client object format interface, so that no such
|
||||
* assumption is necessary.
|
||||
*/
|
||||
Res SegSingleAccess(Seg seg, Arena arena, Addr addr,
|
||||
AccessSet mode, MutatorContext context)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Arena, arena);
|
||||
AVER(arena == PoolArena(SegPool(seg)));
|
||||
AVER(SegBase(seg) <= addr);
|
||||
AVER(addr < SegLimit(seg));
|
||||
AVERT(AccessSet, mode);
|
||||
AVERT(MutatorContext, context);
|
||||
|
||||
if (MutatorContextCanStepInstruction(context)) {
|
||||
Ref ref;
|
||||
Res res;
|
||||
|
||||
ShieldExpose(arena, seg);
|
||||
|
||||
if(mode & SegSM(seg) & AccessREAD) {
|
||||
/* Read access. */
|
||||
/* .single-access.assume.ref */
|
||||
/* .single-access.improve.format */
|
||||
ref = *(Ref *)addr;
|
||||
/* .tagging: Check that the reference is aligned to a word boundary */
|
||||
/* (we assume it is not a reference otherwise). */
|
||||
if(WordIsAligned((Word)ref, sizeof(Word))) {
|
||||
Rank rank;
|
||||
/* See the note in TraceRankForAccess */
|
||||
/* (<code/trace.c#scan.conservative>). */
|
||||
|
||||
rank = TraceRankForAccess(arena, seg);
|
||||
TraceScanSingleRef(arena->flippedTraces, rank, arena,
|
||||
seg, (Ref *)addr);
|
||||
}
|
||||
}
|
||||
res = MutatorContextStepInstruction(context);
|
||||
AVER(res == ResOK);
|
||||
|
||||
/* Update SegSummary according to the possibly changed reference. */
|
||||
ref = *(Ref *)addr;
|
||||
/* .tagging: ought to check the reference for a tag. But
|
||||
* this is conservative. */
|
||||
SegSetSummary(seg, RefSetAdd(arena, SegSummary(seg), ref));
|
||||
|
||||
ShieldCover(arena, seg);
|
||||
|
||||
return ResOK;
|
||||
} else {
|
||||
/* couldn't single-step instruction */
|
||||
return ResFAIL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* segNoWhiten -- whiten method for non-GC segs */
|
||||
|
||||
static Res segNoWhiten(Seg seg, Trace trace)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Trace, trace);
|
||||
AVER(PoolArena(SegPool(seg)) == trace->arena);
|
||||
NOTREACHED;
|
||||
return ResUNIMPL;
|
||||
}
|
||||
|
||||
|
||||
/* segNoGreyen -- greyen method for non-GC segs */
|
||||
|
||||
static void segNoGreyen(Seg seg, Trace trace)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Trace, trace);
|
||||
AVER(PoolArena(SegPool(seg)) == trace->arena);
|
||||
NOTREACHED;
|
||||
}
|
||||
|
||||
|
||||
/* segNoGreyen -- blacken method for non-GC segs */
|
||||
|
||||
static void segNoBlacken(Seg seg, TraceSet traceSet)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(TraceSet, traceSet);
|
||||
NOTREACHED;
|
||||
}
|
||||
|
||||
|
||||
/* segNoScan -- scan method for non-GC segs */
|
||||
|
||||
static Res segNoScan(Bool *totalReturn, Seg seg, ScanState ss)
|
||||
{
|
||||
AVER(totalReturn != NULL);
|
||||
AVERT(Seg, seg);
|
||||
AVERT(ScanState, ss);
|
||||
AVER(PoolArena(SegPool(seg)) == ss->arena);
|
||||
NOTREACHED;
|
||||
return ResUNIMPL;
|
||||
}
|
||||
|
||||
|
||||
/* segNoFix -- fix method for non-GC segs */
|
||||
|
||||
static Res segNoFix(Seg seg, ScanState ss, Ref *refIO)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(ScanState, ss);
|
||||
AVER(refIO != NULL);
|
||||
NOTREACHED;
|
||||
return ResUNIMPL;
|
||||
}
|
||||
|
||||
|
||||
/* segNoReclaim -- reclaim method for non-GC segs */
|
||||
|
||||
static void segNoReclaim(Seg seg, Trace trace)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Trace, trace);
|
||||
AVER(PoolArena(SegPool(seg)) == trace->arena);
|
||||
NOTREACHED;
|
||||
}
|
||||
|
||||
|
||||
/* segTrivWalk -- walk method for non-formatted segs */
|
||||
|
||||
static void segTrivWalk(Seg seg, Format format, FormattedObjectsVisitor f,
|
||||
void *p, size_t s)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Format, format);
|
||||
AVER(FUNCHECK(f));
|
||||
/* p and s are arbitrary, hence can't be checked */
|
||||
UNUSED(p);
|
||||
UNUSED(s);
|
||||
NOOP;
|
||||
}
|
||||
|
||||
|
||||
/* Class GCSeg -- Segment class with GC support
|
||||
*/
|
||||
|
||||
|
|
@ -1551,6 +1892,52 @@ failSuper:
|
|||
}
|
||||
|
||||
|
||||
/* gcSegWhiten -- GCSeg white method */
|
||||
|
||||
static Res gcSegWhiten(Seg seg, Trace trace)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Trace, trace);
|
||||
AVER(PoolArena(SegPool(seg)) == trace->arena);
|
||||
|
||||
SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace));
|
||||
|
||||
return ResOK;
|
||||
}
|
||||
|
||||
|
||||
/* gcSegGreyen -- GCSeg greyen method
|
||||
*
|
||||
* If we had a (partially) white segment, then other parts of the same
|
||||
* segment might need to get greyed. In fact, all current pools only
|
||||
* ever whiten a whole segment, so we never need to greyen any part of
|
||||
* an already whitened segment. So we exclude white segments.
|
||||
*/
|
||||
|
||||
static void gcSegGreyen(Seg seg, Trace trace)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(Trace, trace);
|
||||
AVER(PoolArena(SegPool(seg)) == trace->arena);
|
||||
|
||||
if (!TraceSetIsMember(SegWhite(seg), trace))
|
||||
SegSetGrey(seg, TraceSetSingle(trace));
|
||||
}
|
||||
|
||||
|
||||
/* gcSegTrivBlacken -- GCSeg trivial blacken method
|
||||
*
|
||||
* For segments which do not keep additional colour information.
|
||||
*/
|
||||
|
||||
static void gcSegTrivBlacken(Seg seg, TraceSet traceSet)
|
||||
{
|
||||
AVERT(Seg, seg);
|
||||
AVERT(TraceSet, traceSet);
|
||||
NOOP;
|
||||
}
|
||||
|
||||
|
||||
/* gcSegDescribe -- GCSeg description method */
|
||||
|
||||
static Res gcSegDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
|
||||
|
|
@ -1599,6 +1986,25 @@ Bool SegClassCheck(SegClass klass)
|
|||
CHECKL(FUNCHECK(klass->setRankSummary));
|
||||
CHECKL(FUNCHECK(klass->merge));
|
||||
CHECKL(FUNCHECK(klass->split));
|
||||
CHECKL(FUNCHECK(klass->access));
|
||||
CHECKL(FUNCHECK(klass->whiten));
|
||||
CHECKL(FUNCHECK(klass->greyen));
|
||||
CHECKL(FUNCHECK(klass->blacken));
|
||||
CHECKL(FUNCHECK(klass->scan));
|
||||
CHECKL(FUNCHECK(klass->fix));
|
||||
CHECKL(FUNCHECK(klass->fixEmergency));
|
||||
CHECKL(FUNCHECK(klass->reclaim));
|
||||
CHECKL(FUNCHECK(klass->walk));
|
||||
|
||||
/* Check that segment classes override sets of related methods. */
|
||||
CHECKL((klass->init == SegAbsInit)
|
||||
== (klass->instClassStruct.finish == SegAbsFinish));
|
||||
CHECKL((klass->init == gcSegInit)
|
||||
== (klass->instClassStruct.finish == gcSegFinish));
|
||||
CHECKL((klass->merge == segTrivMerge) == (klass->split == segTrivSplit));
|
||||
CHECKL((klass->fix == segNoFix) == (klass->fixEmergency == segNoFix));
|
||||
CHECKL((klass->fix == segNoFix) == (klass->reclaim == segNoReclaim));
|
||||
|
||||
CHECKS(SegClass, klass);
|
||||
return TRUE;
|
||||
}
|
||||
|
|
@ -1609,6 +2015,7 @@ Bool SegClassCheck(SegClass klass)
|
|||
DEFINE_CLASS(Inst, SegClass, klass)
|
||||
{
|
||||
INHERIT_CLASS(klass, SegClass, InstClass);
|
||||
AVERT(InstClass, klass);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(Seg, Seg, klass)
|
||||
|
|
@ -1628,6 +2035,15 @@ DEFINE_CLASS(Seg, Seg, klass)
|
|||
klass->setRankSummary = segNoSetRankSummary;
|
||||
klass->merge = segTrivMerge;
|
||||
klass->split = segTrivSplit;
|
||||
klass->access = segNoAccess;
|
||||
klass->whiten = segNoWhiten;
|
||||
klass->greyen = segNoGreyen;
|
||||
klass->blacken = segNoBlacken;
|
||||
klass->scan = segNoScan;
|
||||
klass->fix = segNoFix;
|
||||
klass->fixEmergency = segNoFix;
|
||||
klass->reclaim = segNoReclaim;
|
||||
klass->walk = segTrivWalk;
|
||||
klass->sig = SegClassSig;
|
||||
AVERT(SegClass, klass);
|
||||
}
|
||||
|
|
@ -1654,6 +2070,15 @@ DEFINE_CLASS(Seg, GCSeg, klass)
|
|||
klass->setRankSummary = gcSegSetRankSummary;
|
||||
klass->merge = gcSegMerge;
|
||||
klass->split = gcSegSplit;
|
||||
klass->access = SegWholeAccess;
|
||||
klass->whiten = gcSegWhiten;
|
||||
klass->greyen = gcSegGreyen;
|
||||
klass->blacken = gcSegTrivBlacken;
|
||||
klass->scan = segNoScan; /* no useful default method */
|
||||
klass->fix = segNoFix; /* no useful default method */
|
||||
klass->fixEmergency = segNoFix; /* no useful default method */
|
||||
klass->reclaim = segNoReclaim; /* no useful default method */
|
||||
klass->walk = segTrivWalk;
|
||||
AVERT(SegClass, klass);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -381,23 +381,14 @@ static Bool AMSSegIsFree(Seg seg)
|
|||
|
||||
static Bool AMSSegRegionIsFree(Seg seg, Addr base, Addr limit)
|
||||
{
|
||||
AMSSeg amsseg;
|
||||
AMS ams;
|
||||
Count bgrain, lgrain;
|
||||
Addr sbase;
|
||||
|
||||
AVERT(Seg, seg);
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
sbase = SegBase(seg);
|
||||
ams = PoolAMS(SegPool(seg));
|
||||
|
||||
bgrain = AMSGrains(ams, AddrOffset(sbase, base));
|
||||
lgrain = AMSGrains(ams, AddrOffset(sbase, limit));
|
||||
AMSSeg amsseg = MustBeA(AMSSeg, seg);
|
||||
Index baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base);
|
||||
|
||||
if (amsseg->allocTableInUse) {
|
||||
return BTIsResRange(amsseg->allocTable, bgrain, lgrain);
|
||||
Index limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit);
|
||||
return BTIsResRange(amsseg->allocTable, baseIndex, limitIndex);
|
||||
} else {
|
||||
return amsseg->firstFree <= bgrain;
|
||||
return amsseg->firstFree <= baseIndex;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -416,8 +407,8 @@ static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit)
|
|||
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
|
||||
baseIndex = AMS_ADDR_INDEX(seg, base);
|
||||
limitIndex = AMS_ADDR_INDEX(seg, limit);
|
||||
baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base);
|
||||
limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit);
|
||||
|
||||
if (amsseg->allocTableInUse) {
|
||||
/* check that it's allocated */
|
||||
|
|
@ -441,7 +432,8 @@ static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit)
|
|||
AVER(amsseg->bufferedGrains >= unallocatedGrains);
|
||||
amsseg->freeGrains += unallocatedGrains;
|
||||
amsseg->bufferedGrains -= unallocatedGrains;
|
||||
PoolGenAccountForEmpty(ams->pgen, 0, AMSGrainsSize(ams, unallocatedGrains),
|
||||
PoolGenAccountForEmpty(ams->pgen, 0,
|
||||
PoolGrainsSize(AMSPool(ams), unallocatedGrains),
|
||||
FALSE);
|
||||
}
|
||||
|
||||
|
|
@ -460,8 +452,8 @@ static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit)
|
|||
|
||||
amsseg = Seg2AMSSeg(seg);
|
||||
|
||||
baseIndex = AMS_ADDR_INDEX(seg, base);
|
||||
limitIndex = AMS_ADDR_INDEX(seg, limit);
|
||||
baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base);
|
||||
limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit);
|
||||
|
||||
if (amsseg->allocTableInUse) {
|
||||
/* check that it's not allocated */
|
||||
|
|
@ -655,6 +647,7 @@ DEFINE_CLASS(Pool, AMSTPool, klass)
|
|||
klass->size = sizeof(AMSTStruct);
|
||||
klass->init = AMSTInit;
|
||||
klass->bufferFill = AMSTBufferFill;
|
||||
AVERT(PoolClass, klass);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* splay.c: SPLAY TREE IMPLEMENTATION
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* .purpose: Splay trees are used to manage potentially unbounded
|
||||
* collections of ordered things. In the MPS these are usually
|
||||
|
|
@ -753,7 +753,7 @@ Bool SplayTreeInsert(SplayTree splay, Tree node) {
|
|||
switch (SplaySplay(splay, splay->nodeKey(node), splay->compare)) {
|
||||
default:
|
||||
NOTREACHED;
|
||||
/* defensive fall-through */
|
||||
/* fall through */
|
||||
case CompareEQUAL: /* duplicate node */
|
||||
return FALSE;
|
||||
|
||||
|
|
@ -930,7 +930,7 @@ Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn,
|
|||
switch (cmp) {
|
||||
default:
|
||||
NOTREACHED;
|
||||
/* defensive fall-through */
|
||||
/* fall through */
|
||||
case CompareEQUAL:
|
||||
found = FALSE;
|
||||
break;
|
||||
|
|
@ -1005,7 +1005,7 @@ Tree SplayTreeNext(SplayTree splay, TreeKey oldKey) {
|
|||
switch (SplaySplay(splay, oldKey, splay->compare)) {
|
||||
default:
|
||||
NOTREACHED;
|
||||
/* defensive fall-through */
|
||||
/* fall through */
|
||||
case CompareLESS:
|
||||
return SplayTreeRoot(splay);
|
||||
|
||||
|
|
@ -1394,7 +1394,7 @@ Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, Count depth,
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* spw3i3.c: STACK PROBE FOR 32-BIT WINDOWS
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
* Portions copyright (C) 2001 Global Graphics Software.
|
||||
*
|
||||
* This function reads a location that is depth words beyond the
|
||||
|
|
@ -13,6 +13,10 @@
|
|||
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_W3) && !defined(MPS_ARCH_I3)
|
||||
#error "spw3i3.c is specific to MPS_OS_W3 and MPS_ARCH_I3"
|
||||
#endif
|
||||
|
||||
#ifdef MPS_BUILD_PC
|
||||
|
||||
/* "[ISO] Inline assembly code is not portable." */
|
||||
|
|
@ -33,7 +37,7 @@ void StackProbe(Size depth)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* spw3i6.c: STACK PROBE FOR 64-BIT WINDOWS
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2013-2014 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2013-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* The function StackProbe ensures that the stack has at least depth
|
||||
* words available. It achieves this by exploiting an obscure but
|
||||
|
|
@ -10,9 +10,14 @@
|
|||
* _alloca: http://msdn.microsoft.com/en-us/library/wb1s57t5.aspx
|
||||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_W3)
|
||||
#error "spw3i3.c is specific to MPS_OS_W3"
|
||||
#endif
|
||||
|
||||
#include <stdlib.h> /* _alloca */
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
void StackProbe(Size depth)
|
||||
{
|
||||
|
|
@ -22,7 +27,7 @@ void StackProbe(Size depth)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2013-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -39,9 +39,12 @@
|
|||
*
|
||||
*/
|
||||
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if (!defined(MPS_OS_FR) && !defined(MPS_OS_LI) && !defined(MPS_OS_XC)) || !defined(MPS_ARCH_I3)
|
||||
#error "ssixi3.c is specific to MPS_OS_FR, MPS_OS_LI or MPS_OS_XC, and MPS_ARCH_I3"
|
||||
#endif
|
||||
|
||||
SRCID(ssixi3, "$Id$");
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -37,9 +37,12 @@
|
|||
*
|
||||
*/
|
||||
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if (!defined(MPS_OS_FR) && !defined(MPS_OS_LI) && !defined(MPS_OS_XC)) || !defined(MPS_ARCH_I6)
|
||||
#error "ssixi3.c is specific to MPS_OS_FR, MPS_OS_LI or MPS_OS_XC, and MPS_ARCH_I6"
|
||||
#endif
|
||||
|
||||
SRCID(ssixi6, "$Id$");
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* ssw3i3mv.c: STACK SCANNING FOR WIN32 WITH MICROSOFT C
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* This scans the stack and fixes the registers which may contain roots.
|
||||
* See <design/thread-manager/>.
|
||||
|
|
@ -17,6 +17,11 @@
|
|||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_W3) || !defined(MPS_ARCH_I3) || !defined(MPS_BUILD_MV)
|
||||
#error "ssw3i3mv.c is specific to MPS_OS_W3, MPS_ARCH_I3 and MPS_BUILD_MV"
|
||||
#endif
|
||||
|
||||
#include <setjmp.h>
|
||||
|
||||
SRCID(ssw3i3mv, "$Id$");
|
||||
|
|
@ -50,7 +55,7 @@ Res StackScan(ScanState ss, Word *stackCold,
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2002 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* ssw3i3pc.c: STACK SCANNING FOR WIN32 WITH PELLES C
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* This scans the stack and fixes the registers which may contain roots.
|
||||
* See <design/thread-manager/>.
|
||||
|
|
@ -23,6 +23,11 @@
|
|||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_W3) || !defined(MPS_ARCH_I3) || !defined(MPS_BUILD_PC)
|
||||
#error "ssw3i3pc.c is specific to MPS_OS_W3, MPS_ARCH_I3 and MPS_BUILD_PC"
|
||||
#endif
|
||||
|
||||
#include <setjmp.h>
|
||||
|
||||
SRCID(ssw3i3pc, "$Id$");
|
||||
|
|
@ -74,7 +79,7 @@ Res StackScan(ScanState ss, Word *stackCold,
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* ssw3i6mv.c: STACK SCANNING FOR WIN64 WITH MICROSOFT C
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* This scans the stack and fixes the registers which may contain roots.
|
||||
* See <design/thread-manager/>. It's unlikely that the callee-save
|
||||
|
|
@ -25,6 +25,11 @@
|
|||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_W3) || !defined(MPS_ARCH_I6) || !defined(MPS_BUILD_MV)
|
||||
#error "ssw3i3mv.c is specific to MPS_OS_W3, MPS_ARCH_I6 and MPS_BUILD_MV"
|
||||
#endif
|
||||
|
||||
#include <setjmp.h>
|
||||
|
||||
SRCID(ssw3i6mv, "$Id$");
|
||||
|
|
@ -68,7 +73,7 @@ Res StackScan(ScanState ss, Word *stackCold,
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* ssw3i6pc.c: STACK SCANNING FOR WIN64 WITH PELLES C
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* This scans the stack and fixes the registers which may contain roots.
|
||||
* See <design/thread-manager/>.
|
||||
|
|
@ -29,6 +29,11 @@
|
|||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_W3) || !defined(MPS_ARCH_I6) || !defined(MPS_BUILD_PC)
|
||||
#error "ssw3i3mv.c is specific to MPS_OS_W3, MPS_ARCH_I6 and MPS_BUILD_PC"
|
||||
#endif
|
||||
|
||||
#include <setjmp.h>
|
||||
|
||||
SRCID(ssw3i6pc, "$Id$");
|
||||
|
|
@ -106,7 +111,7 @@ Res StackScan(ScanState ss, Word *stackCold, Word mask, Word pattern)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* thix.c: Threads Manager for Posix threads
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* .purpose: This is a pthreads implementation of the threads manager.
|
||||
* This implements <code/th.h>.
|
||||
|
|
@ -32,12 +32,17 @@
|
|||
* word-aligned at the time of reading the context of another thread.
|
||||
*/
|
||||
|
||||
#include "prmcix.h"
|
||||
#include "mpm.h"
|
||||
|
||||
#include <pthread.h>
|
||||
#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI)
|
||||
#error "thix.c is specific to MPS_OS_FR or MPS_OS_LI"
|
||||
#endif
|
||||
|
||||
#include "prmcix.h"
|
||||
#include "pthrdext.h"
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
SRCID(thix, "$Id$");
|
||||
|
||||
|
||||
|
|
@ -342,7 +347,7 @@ void ThreadSetup(void)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* thw3.c: WIN32 THREAD MANAGER
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* Implements thread registration, suspension, and stack and register
|
||||
* scanning. See <design/thread-manager/>.
|
||||
|
|
@ -55,7 +55,7 @@
|
|||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_W3) /* .nt */
|
||||
#error "Compiling thw3 when MPS_OS_W3 not defined."
|
||||
#error "thw3.c is specific to MPS_OS_W3"
|
||||
#endif
|
||||
|
||||
#include "prmcw3.h"
|
||||
|
|
@ -320,7 +320,7 @@ void ThreadSetup(void)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -20,6 +20,11 @@
|
|||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_XC)
|
||||
#error "protw3.c is specific to MPS_OS_XC"
|
||||
#endif
|
||||
|
||||
#include "protxc.h"
|
||||
|
||||
#include <mach/mach_init.h>
|
||||
|
|
|
|||
|
|
@ -38,7 +38,6 @@ Bool ScanStateCheck(ScanState ss)
|
|||
|
||||
CHECKS(ScanState, ss);
|
||||
CHECKL(FUNCHECK(ss->fix));
|
||||
/* Can't check ss->fixClosure. */
|
||||
CHECKL(ScanStateZoneShift(ss) == ss->arena->zoneShift);
|
||||
white = ZoneSetEMPTY;
|
||||
TRACE_SET_ITER(ti, trace, ss->traces, ss->arena)
|
||||
|
|
@ -69,27 +68,24 @@ void ScanStateInit(ScanState ss, TraceSet ts, Arena arena,
|
|||
AVERT(Rank, rank);
|
||||
/* white is arbitrary and can't be checked */
|
||||
|
||||
/* NOTE: We can only currently support scanning for a set of traces with
|
||||
the same fix method and closure. To remove this restriction,
|
||||
it would be necessary to dispatch to the fix methods of sets of traces
|
||||
in TraceFix. */
|
||||
/* NOTE: We can only currently support scanning for a set of traces
|
||||
with the same fix method. To remove this restriction, it would be
|
||||
necessary to dispatch to the fix methods of sets of traces in
|
||||
TraceFix. */
|
||||
ss->fix = NULL;
|
||||
ss->fixClosure = NULL;
|
||||
TRACE_SET_ITER(ti, trace, ts, arena) {
|
||||
if (ss->fix == NULL) {
|
||||
ss->fix = trace->fix;
|
||||
ss->fixClosure = trace->fixClosure;
|
||||
} else {
|
||||
AVER(ss->fix == trace->fix);
|
||||
AVER(ss->fixClosure == trace->fixClosure);
|
||||
}
|
||||
} TRACE_SET_ITER_END(ti, trace, ts, arena);
|
||||
AVER(ss->fix != NULL);
|
||||
|
||||
/* If the fix method is the normal GC fix, then we optimise the test for
|
||||
whether it's an emergency or not by updating the dispatch here, once. */
|
||||
if (ss->fix == PoolFix && ArenaEmergency(arena))
|
||||
ss->fix = PoolFixEmergency;
|
||||
if (ss->fix == SegFix && ArenaEmergency(arena))
|
||||
ss->fix = SegFixEmergency;
|
||||
|
||||
ss->rank = rank;
|
||||
ss->traces = ts;
|
||||
|
|
@ -192,7 +188,6 @@ Bool TraceCheck(Trace trace)
|
|||
CHECKU(Chain, trace->chain);
|
||||
}
|
||||
CHECKL(FUNCHECK(trace->fix));
|
||||
/* Can't check trace->fixClosure. */
|
||||
|
||||
/* @@@@ checks for counts missing */
|
||||
|
||||
|
|
@ -365,7 +360,7 @@ Res TraceAddWhite(Trace trace, Seg seg)
|
|||
|
||||
/* Give the pool the opportunity to turn the segment white. */
|
||||
/* If it fails, unwind. */
|
||||
res = PoolWhiten(pool, trace, seg);
|
||||
res = SegWhiten(seg, trace);
|
||||
if(res != ResOK)
|
||||
return res;
|
||||
|
||||
|
|
@ -673,8 +668,7 @@ found:
|
|||
trace->ti = ti;
|
||||
trace->state = TraceINIT;
|
||||
trace->band = RankMIN;
|
||||
trace->fix = PoolFix;
|
||||
trace->fixClosure = NULL;
|
||||
trace->fix = SegFix;
|
||||
trace->chain = NULL;
|
||||
STATISTIC(trace->preTraceArenaReserved = ArenaReserved(arena));
|
||||
trace->condemned = (Size)0; /* nothing condemned yet */
|
||||
|
|
@ -844,7 +838,7 @@ static void traceReclaim(Trace trace)
|
|||
if(TraceSetIsMember(SegWhite(seg), trace)) {
|
||||
AVER_CRITICAL(PoolHasAttr(pool, AttrGC));
|
||||
STATISTIC(++trace->reclaimCount);
|
||||
PoolReclaim(pool, trace, seg);
|
||||
SegReclaim(seg, trace);
|
||||
|
||||
/* If the segment still exists, it should no longer be white. */
|
||||
/* Note that the seg returned by this SegOfAddr may not be */
|
||||
|
|
@ -1099,7 +1093,7 @@ static Res traceScanSegRes(TraceSet ts, Rank rank, Arena arena, Seg seg)
|
|||
|
||||
/* Only scan a segment if it refers to the white set. */
|
||||
if(ZoneSetInter(white, SegSummary(seg)) == ZoneSetEMPTY) {
|
||||
PoolBlacken(SegPool(seg), ts, seg);
|
||||
SegBlacken(seg, ts);
|
||||
/* Setup result code to return later. */
|
||||
res = ResOK;
|
||||
} else { /* scan it */
|
||||
|
|
@ -1109,7 +1103,7 @@ static Res traceScanSegRes(TraceSet ts, Rank rank, Arena arena, Seg seg)
|
|||
|
||||
/* Expose the segment to make sure we can scan it. */
|
||||
ShieldExpose(arena, seg);
|
||||
res = PoolScan(&wasTotal, ss, SegPool(seg), seg);
|
||||
res = SegScan(&wasTotal, seg, ss);
|
||||
/* Cover, regardless of result */
|
||||
ShieldCover(arena, seg);
|
||||
|
||||
|
|
@ -1292,7 +1286,6 @@ mps_res_t _mps_fix2(mps_ss_t mps_ss, mps_addr_t *mps_ref_io)
|
|||
Tract tract;
|
||||
Seg seg;
|
||||
Res res;
|
||||
Pool pool;
|
||||
|
||||
/* Special AVER macros are used on the critical path. */
|
||||
/* See <design/trace/#fix.noaver> */
|
||||
|
|
@ -1355,11 +1348,10 @@ mps_res_t _mps_fix2(mps_ss_t mps_ss, mps_addr_t *mps_ref_io)
|
|||
STATISTIC(++ss->whiteSegRefCount);
|
||||
EVENT1(TraceFixSeg, seg);
|
||||
EVENT0(TraceFixWhite);
|
||||
pool = TractPool(tract);
|
||||
res = (*ss->fix)(pool, ss, seg, &ref);
|
||||
res = (*ss->fix)(seg, ss, &ref);
|
||||
if (res != ResOK) {
|
||||
/* PoolFixEmergency must not fail. */
|
||||
AVER_CRITICAL(ss->fix != PoolFixEmergency);
|
||||
/* SegFixEmergency must not fail. */
|
||||
AVER_CRITICAL(ss->fix != SegFixEmergency);
|
||||
/* Fix protocol (de facto): if Fix fails, ref must be unchanged
|
||||
* Justification for this restriction:
|
||||
* A: it simplifies;
|
||||
|
|
@ -1610,7 +1602,7 @@ Res TraceStart(Trace trace, double mortality, double finishingTime)
|
|||
/* Note: can a white seg get greyed as well? At this point */
|
||||
/* we still assume it may. (This assumption runs out in */
|
||||
/* PoolTrivGrey). */
|
||||
PoolGrey(SegPool(seg), trace, seg);
|
||||
SegGreyen(seg, trace);
|
||||
if(TraceSetIsMember(SegGrey(seg), trace)) {
|
||||
trace->foundation += size;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* traceanc.c: ANCILLARY SUPPORT FOR TRACER
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2016 Ravenbrook Limited.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited.
|
||||
* See end of file for license.
|
||||
* Portions copyright (C) 2002 Global Graphics Software.
|
||||
*
|
||||
|
|
@ -437,11 +437,9 @@ void TracePostMessage(Trace trace)
|
|||
|
||||
Bool TraceIdMessagesCheck(Arena arena, TraceId ti)
|
||||
{
|
||||
CHECKL(!arena->tsMessage[ti]
|
||||
|| TraceStartMessageCheck(arena->tsMessage[ti]));
|
||||
CHECKL(!arena->tMessage[ti]
|
||||
|| TraceMessageCheck(arena->tMessage[ti]));
|
||||
CHECKL(! (arena->tsMessage[ti] && !arena->tMessage[ti]) );
|
||||
CHECKL(!arena->tsMessage[ti] || TraceStartMessageCheck(arena->tsMessage[ti]));
|
||||
CHECKL(!arena->tsMessage[ti] || arena->tMessage[ti]);
|
||||
CHECKL(!arena->tMessage[ti] || TraceMessageCheck(arena->tMessage[ti]));
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
|
@ -854,7 +852,7 @@ static void arenaForgetProtection(Globals globals)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited
|
||||
* <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
|
|
|
|||
|
|
@ -39,22 +39,17 @@
|
|||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
|
||||
#if !defined(MPS_OS_FR) && !defined(MPS_OS_LI) && !defined(MPS_OS_XC)
|
||||
#error "vmix.c is specific to MPS_OS_FR, MPS_OS_LI or MPS_OS_XC"
|
||||
#endif
|
||||
|
||||
#include "vm.h"
|
||||
|
||||
/* for mmap(2), munmap(2) */
|
||||
#include <sys/types.h>
|
||||
#include <errno.h> /* errno */
|
||||
#include <sys/mman.h> /* see .feature.li in config.h */
|
||||
|
||||
/* for errno(2) */
|
||||
#include <errno.h>
|
||||
|
||||
/* for getpagesize(3) */
|
||||
#include <unistd.h>
|
||||
|
||||
|
||||
#if !defined(MPS_OS_FR) && !defined(MPS_OS_XC) && !defined(MPS_OS_LI)
|
||||
#error "vmix.c is Unix-like specific, currently MPS_OS_FR XC LI"
|
||||
#endif
|
||||
#include <sys/types.h> /* mmap, munmap */
|
||||
#include <unistd.h> /* getpagesize */
|
||||
|
||||
SRCID(vmix, "$Id$");
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* vmw3.c: VIRTUAL MEMORY MAPPING FOR WIN32
|
||||
*
|
||||
* $Id$
|
||||
* Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
|
||||
* Copyright (c) 2001-2018 Ravenbrook Limited. See end of file for license.
|
||||
*
|
||||
* .design: See <design/vm/>.
|
||||
*
|
||||
|
|
@ -39,13 +39,13 @@
|
|||
*/
|
||||
|
||||
#include "mpm.h"
|
||||
#include "vm.h"
|
||||
|
||||
#ifndef MPS_OS_W3
|
||||
#error "vmw3.c is Win32 specific, but MPS_OS_W3 is not set"
|
||||
#if !defined(MPS_OS_W3)
|
||||
#error "vmw3.c is specific to MPS_OS_W3"
|
||||
#endif
|
||||
|
||||
#include "mpswin.h"
|
||||
#include "vm.h"
|
||||
|
||||
SRCID(vmw3, "$Id$");
|
||||
|
||||
|
|
@ -229,7 +229,7 @@ void VMUnmap(VM vm, Addr base, Addr limit)
|
|||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* Copyright (C) 2001-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
* All rights reserved. This is an open source license. Contact
|
||||
* Ravenbrook for commercial licensing options.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ static void ArenaFormattedObjectsStep(Addr object, Format format, Pool pool,
|
|||
AVERT(Pool, pool);
|
||||
c = p;
|
||||
AVERT(FormattedObjectsStepClosure, c);
|
||||
AVER(s == 0);
|
||||
AVER(s == UNUSED_SIZE);
|
||||
|
||||
(*c->f)((mps_addr_t)object, (mps_fmt_t)format, (mps_pool_t)pool,
|
||||
c->p, c->s);
|
||||
|
|
@ -61,26 +61,22 @@ static void ArenaFormattedObjectsWalk(Arena arena, FormattedObjectsVisitor f,
|
|||
{
|
||||
Seg seg;
|
||||
FormattedObjectsStepClosure c;
|
||||
Format format;
|
||||
|
||||
AVERT(Arena, arena);
|
||||
AVER(FUNCHECK(f));
|
||||
AVER(f == ArenaFormattedObjectsStep);
|
||||
/* p and s are arbitrary closures. */
|
||||
/* Know that p is a FormattedObjectsStepClosure */
|
||||
/* Know that s is 0 */
|
||||
AVER(p != NULL);
|
||||
AVER(s == 0);
|
||||
|
||||
c = p;
|
||||
AVERT(FormattedObjectsStepClosure, c);
|
||||
/* Know that s is UNUSED_SIZE */
|
||||
AVER(s == UNUSED_SIZE);
|
||||
|
||||
if (SegFirst(&seg, arena)) {
|
||||
do {
|
||||
Pool pool;
|
||||
pool = SegPool(seg);
|
||||
if (PoolHasAttr(pool, AttrFMT)) {
|
||||
if (PoolFormat(&format, SegPool(seg))) {
|
||||
ShieldExpose(arena, seg);
|
||||
PoolWalk(pool, seg, f, p, s);
|
||||
SegWalk(seg, format, f, p, s);
|
||||
ShieldCover(arena, seg);
|
||||
}
|
||||
} while(SegNext(&seg, arena, seg));
|
||||
|
|
@ -107,7 +103,7 @@ void mps_arena_formatted_objects_walk(mps_arena_t mps_arena,
|
|||
c.f = f;
|
||||
c.p = p;
|
||||
c.s = s;
|
||||
ArenaFormattedObjectsWalk(arena, ArenaFormattedObjectsStep, &c, 0);
|
||||
ArenaFormattedObjectsWalk(arena, ArenaFormattedObjectsStep, &c, UNUSED_SIZE);
|
||||
ArenaLeave(arena);
|
||||
}
|
||||
|
||||
|
|
@ -184,7 +180,7 @@ static Bool rootsStepClosureCheck(rootsStepClosure rsc)
|
|||
|
||||
static void rootsStepClosureInit(rootsStepClosure rsc,
|
||||
Globals arena, Trace trace,
|
||||
PoolFixMethod rootFix,
|
||||
SegFixMethod rootFix,
|
||||
mps_roots_stepper_t f, void *p, size_t s)
|
||||
{
|
||||
ScanState ss;
|
||||
|
|
@ -228,12 +224,10 @@ static void rootsStepClosureFinish(rootsStepClosure rsc)
|
|||
* This doesn't cause further scanning of transitive references, it just
|
||||
* calls the client closure. */
|
||||
|
||||
static Res RootsWalkFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
|
||||
static Res RootsWalkFix(Seg seg, ScanState ss, Ref *refIO)
|
||||
{
|
||||
rootsStepClosure rsc;
|
||||
Ref ref;
|
||||
|
||||
UNUSED(pool);
|
||||
|
||||
AVERT(ScanState, ss);
|
||||
AVER(refIO != NULL);
|
||||
|
|
|
|||
|
|
@ -88,11 +88,11 @@ operation is to look up the segment pointed to by the pointer and see if
|
|||
it was condemned. This is a fast lookup.
|
||||
|
||||
After that, each pool class must decide whether the pointer is to a
|
||||
condemned object and do something to preserve it. This code is still
|
||||
critical. The MPS will have tried to condemn objects that are dead, but
|
||||
those objects are still likely to be in segments with other objects that
|
||||
must be preserved. The pool class fix method must quickly distinguish
|
||||
between them.
|
||||
condemned object and do something to preserve it. This code is still
|
||||
critical. The MPS will have tried to condemn objects that are dead,
|
||||
but those objects are still likely to be in segments with other
|
||||
objects that must be preserved. The segment class fix method must
|
||||
quickly distinguish between them.
|
||||
|
||||
Furthermore, many objects will be preserved at least once in their
|
||||
lifetime, so even the code that preserves an object needs to be highly
|
||||
|
|
@ -126,10 +126,10 @@ Very briefly, the critical path consists of five stages:
|
|||
|
||||
.. _trace.c: ../code/trace.c
|
||||
|
||||
#. The third-stage fix, which filters out pointers using pool-specific
|
||||
information. Implemented in pool class functions called
|
||||
``AMCFix()``, ``LOFix()``, etc. in pool*.c.
|
||||
|
||||
#. The third-stage fix, which filters out pointers using
|
||||
segment-specific information. Implemented in segment class
|
||||
functions called ``amcSegFix()``, ``loSegFix()``, etc. in pool*.c.
|
||||
|
||||
#. Preserving the object, which might entail:
|
||||
|
||||
- marking_ it to prevent it being recycled; and/or
|
||||
|
|
@ -266,36 +266,30 @@ is with luck still in the processor cache. The reason there is a
|
|||
dispatch at all is to allow for a fast changeover to emergency garbage
|
||||
collection, or overriding of garbage collection with extra operations.
|
||||
Those are beyond the scope of this document. Normally, ``ss->fix``
|
||||
points at ``PoolFix()``, and we rely somewhat on modern processor
|
||||
`branch target prediction
|
||||
<https://en.wikipedia.org/wiki/Branch_target_predictor>`_).
|
||||
``PoolFix()`` is passed the pool, which is fetched from the tract
|
||||
table entry, and that should be in the cache.
|
||||
points at ``SegFix()``.
|
||||
|
||||
``PoolFix()`` itself dispatches to the pool class. Normally, a
|
||||
dispatch to a pool class would indirect through the pool class object.
|
||||
That would be a double indirection from the tract, so instead we have
|
||||
a cache of the pool's fix method in the pool object. This also allows
|
||||
a pool class to vary its fix method per pool instance if that would
|
||||
improve performance.
|
||||
``SegFix()`` is passed the segment, which is fetched from the tract
|
||||
table entry, and that should be in the cache. ``SegFix()`` itself
|
||||
dispatches to the segment class.
|
||||
|
||||
|
||||
The third stage fix in the pool class
|
||||
-------------------------------------
|
||||
The final stage of fixing is entirely dependent on the pool class. The
|
||||
MPM can't, in general, know how the objects within a pool are arranged,
|
||||
so this is pool class specific code.
|
||||
The third stage fix in the segment class
|
||||
----------------------------------------
|
||||
The final stage of fixing is entirely dependent on the segment class.
|
||||
The MPM can't, in general, know how the objects within a segment are
|
||||
arranged, so this is segment class specific code.
|
||||
|
||||
Furthermore, the pool class must make decisions based on the "reference
|
||||
rank" of the pointer. If a pointer is ambiguous (``RankAMBIG``) then it
|
||||
can't be changed, so even a copying pool class can't move an object.
|
||||
On the other hand, if the pointer is weak (``RankWEAK``) then the pool fix
|
||||
method shouldn't preserve the object at all, even if it's condemned.
|
||||
Furthermore, the segment class must make decisions based on the
|
||||
"reference rank" of the pointer. If a pointer is ambiguous
|
||||
(``RankAMBIG``) then it can't be changed, so even a copying segment
|
||||
class can't move an object. On the other hand, if the pointer is weak
|
||||
(``RankWEAK``) then the segment fix method shouldn't preserve the
|
||||
object at all, even if it's condemned.
|
||||
|
||||
The exact details of the logic that the pool fix must implement in
|
||||
The exact details of the logic that the segment fix must implement in
|
||||
order to co-operate with the MPM and other pools are beyond the scope
|
||||
of this document, which is about the critical path. Since it is on
|
||||
the critical path, it's important that whatever the pool fix does is
|
||||
the critical path, it's important that whatever the segment fix does is
|
||||
simple and fast and returns to scanning as soon as possible.
|
||||
|
||||
The first step, though, is to further filter out pointers which aren't
|
||||
|
|
@ -308,23 +302,23 @@ implements a copying collector), or was already moved when fixing a
|
|||
previous reference to it, the reference being fixed must be updated
|
||||
(this is the origin of the term "fix").
|
||||
|
||||
As a simple example, ``LOFix()`` is the pool fix method for the LO
|
||||
(Leaf Object) pool class. It implements a marking garbage collector,
|
||||
and does not have to worry about scanning preserved objects because it
|
||||
is used to store objects that don't contain pointers. (It is used in
|
||||
compiler run-time systems to store binary data such as character
|
||||
strings, thus avoiding any scanning, decoding, or remembered set
|
||||
overhead for them.)
|
||||
As a simple example, ``loSegFix()`` is the segment fix method for
|
||||
segments belonging to the LO (Leaf Object) pool class. It implements a
|
||||
marking garbage collector, and does not have to worry about scanning
|
||||
preserved objects because it is used to store objects that don't
|
||||
contain pointers. (It is used in compiler run-time systems to store
|
||||
binary data such as character strings, thus avoiding any scanning,
|
||||
decoding, or remembered set overhead for them.)
|
||||
|
||||
``LOFix()`` filters any ambiguous pointers that aren't aligned, since
|
||||
they can't point to objects it allocated. Otherwise it subtracts the
|
||||
segment base address and shifts the result to get an index into a mark
|
||||
bit table. If the object wasn't marked and the pointer is weak, then
|
||||
it sets the pointer to zero, since the object is about to be recycled.
|
||||
Otherwise, the mark bit is set, which preserves the object from
|
||||
recycling when ``LOReclaim()`` is called later on. ``LOFix()``
|
||||
illustrates about the minimum and most efficient thing a pool fix
|
||||
method can do.
|
||||
``loSegFix()`` filters any ambiguous pointers that aren't aligned,
|
||||
since they can't point to objects it allocated. Otherwise it subtracts
|
||||
the segment base address and shifts the result to get an index into a
|
||||
mark bit table. If the object wasn't marked and the pointer is weak,
|
||||
then it sets the pointer to zero, since the object is about to be
|
||||
recycled. Otherwise, the mark bit is set, which preserves the object
|
||||
from recycling when ``loSegReclaim()`` is called later on.
|
||||
``loSegFix()`` illustrates about the minimum and most efficient thing
|
||||
a segment fix method can do.
|
||||
|
||||
|
||||
Other considerations
|
||||
|
|
|
|||
|
|
@ -21,26 +21,23 @@ interface also allows the value of such references to be changed (this
|
|||
is necessary in order to implement a moving memory manager).
|
||||
|
||||
|
||||
Architecture
|
||||
-------------
|
||||
Was-marked protocol
|
||||
-------------------
|
||||
|
||||
_`.protocol.was-marked`: The ``ScanState`` has a ``Bool``
|
||||
``wasMarked`` field. This is used for finalization.
|
||||
_`.was-marked`: The ``ScanState`` has a ``Bool wasMarked``
|
||||
field. This is used for finalization.
|
||||
|
||||
_`.protocol.was-marked.set`: All pool-specific fix methods must set
|
||||
the ``wasMarked`` field in the ``ScanState`` that they are passed.
|
||||
_`.was-marked.not`: If a segment's fix method discovers that the
|
||||
object referred to by the ref (the one that it is supposed to be
|
||||
fixing) has not previously been marked (that is, this is the first
|
||||
reference to this object that has been fixed), and that the object was
|
||||
white (that is, in condemned space), it should (but need not) set the
|
||||
field to ``FALSE`` in the passed ``ScanState````wasMarked`` .
|
||||
|
||||
_`.protocol.was-marked.meaning`: If the pool-specific fix method sets
|
||||
the ``wasMarked`` field to ``FALSE`` it is indicating the object
|
||||
referred to by the ref (the one that it is supposed to be fixing) has
|
||||
not previously been marked (ie, this is the first reference to this
|
||||
object that has been fixed), and that the object was white (in
|
||||
condemned space).
|
||||
_`.was-marked.otherwise`: Otherwise, the fix method must
|
||||
leave the ``wasMarked`` field unchanged.
|
||||
|
||||
_`.protocol.was-marked.conservative`: It is always okay to set the
|
||||
``wasMarked`` field to ``TRUE``.
|
||||
|
||||
_`.protocol.was-marked.finalizable`: The MRG pool (design.mps.poolmrg_)
|
||||
_`.was-marked.finalizable`: The MRG pool (design.mps.poolmrg_)
|
||||
uses the value of the ``wasMarked`` field to determine whether an
|
||||
object is finalizable.
|
||||
|
||||
|
|
@ -66,6 +63,8 @@ Document History
|
|||
|
||||
- 2013-04-14 GDR_ Converted to reStructuredText.
|
||||
|
||||
- 2018-06-18 GDR_ Simplify the ``wasMarked`` protocol.
|
||||
|
||||
.. _RB: http://www.ravenbrook.com/consultants/rb/
|
||||
.. _GDR: http://www.ravenbrook.com/consultants/gdr/
|
||||
|
||||
|
|
@ -73,7 +72,7 @@ Document History
|
|||
Copyright and License
|
||||
---------------------
|
||||
|
||||
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
Copyright © 2013-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
All rights reserved. This is an open source license. Contact
|
||||
Ravenbrook for commercial licensing options.
|
||||
|
||||
|
|
|
|||
|
|
@ -74,6 +74,15 @@ attributes. See design.mps.type.attr_.
|
|||
|
||||
.. _design.mps.type.attr: type#attr
|
||||
|
||||
_`.field.alignShift`: The ``alignShift`` field is the ``SizeLog2`` of
|
||||
the pool's alignment. It is computed and initialised when a pool is
|
||||
created. Mark-and-sweep pool classes use it to compute the number of
|
||||
grains in a segment, which is the number of bits need in the segment's
|
||||
mark and alloc bit tables.
|
||||
|
||||
_`.field.format`: The ``format`` field is used to refer to the object
|
||||
format. The object format is passed to the pool during pool creation.
|
||||
|
||||
|
||||
Methods
|
||||
-------
|
||||
|
|
@ -182,119 +191,6 @@ part between init and limit). This method must be provided if and only
|
|||
if ``bufferFill`` is provided. This method is called by the generic
|
||||
function ``BufferDetach()``.
|
||||
|
||||
``typedef Res (*PoolAccessMethod)(Pool pool, Seg seg, Addr addr, AccessSet mode, MutatorContext context)``
|
||||
|
||||
_`.method.access`: The ``access`` method indicates that the client
|
||||
program attempted to access the address ``addr``, but has been denied
|
||||
due to a protection fault. The ``mode`` indicates whether the client
|
||||
program was trying to read (``AccessREAD``) or write (``AccessWRITE``)
|
||||
the address. If this can't be determined, ``mode`` is ``AccessREAD |
|
||||
AccessWRITE``. The pool should perform any work necessary to remove
|
||||
the protection whilst still preserving appropriate invariants (this
|
||||
might scanning the region containing ``addr``). Pool classes are not
|
||||
required to provide this method, and not doing so indicates they never
|
||||
protect any memory managed by the pool. This method is called via the
|
||||
generic function ``PoolAccess()``.
|
||||
|
||||
``typedef Res (*PoolWhitenMethod)(Pool pool, Trace trace, Seg seg)``
|
||||
|
||||
_`.method.whiten`: The ``whiten`` method requests that the pool to
|
||||
condemn (a subset of, but typically all) the objects in the segment
|
||||
``seg`` for the trace ``trace``. That is, prepare them for
|
||||
participation in the trace to determine their liveness. The pool
|
||||
should expect fix requests (`.method.fix`_) during the trace and a
|
||||
reclaim request (`.method.reclaim`_) at the end of the trace. Pool
|
||||
classes that automatically reclaim dead objects must provide this
|
||||
method, and must additionally set the ``AttrGC`` attribute. This
|
||||
method is called via the generic function ``PoolWhiten()``.
|
||||
|
||||
``typedef void (*PoolGreyMethod)(Pool pool, Trace trace, Seg seg)``
|
||||
|
||||
_`.method.grey`: The ``grey`` method requires the pool to colour the
|
||||
objects in the segment ``seg`` grey for the trace ``trace`` (excepting
|
||||
objects that were already condemned for this trace). That is, make
|
||||
them ready for scanning by the trace ``trace``. The pool must arrange
|
||||
that any appropriate invariants are preserved, possibly by using the
|
||||
protection interface (see design.mps.prot_). Pool classes are not
|
||||
required to provide this method, and not doing so indicates that all
|
||||
instances of this class will have no fixable or traceable references
|
||||
in them.
|
||||
|
||||
.. _design.mps.prot: prot
|
||||
|
||||
``typedef void (*PoolBlackenMethod)(Pool pool, TraceSet traceSet, Seg seg)``
|
||||
|
||||
_`.method.blacken`: The ``blacken`` method is called if it is known
|
||||
that the segment ``seg`` cannot refer to the white set for any of the
|
||||
traces in ``traceSet``. The pool must blacken all grey objects in the
|
||||
segment for those traces. Pool classes are not required to provide
|
||||
this method, and not doing so indicates that all instances of this
|
||||
class will have no fixable or traceable references in them. This
|
||||
method is called via the generic function ``PoolBlacken()``.
|
||||
|
||||
``typedef Res (*PoolScanMethod)(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)``
|
||||
|
||||
_`.method.scan`: The ``scan`` method requires that the pool scan all
|
||||
the grey objects on the segment ``seg``, passing the scan state ``ss``
|
||||
to ``FormatScan``. The pool may additionally accumulate a summary of
|
||||
*all* the objects on the segment. If it succeeds in accumulating such
|
||||
a summary it must indicate that it has done so by setting the
|
||||
``*totalReturn`` parameter to ``TRUE``. Otherwise it must set
|
||||
``*totalReturn`` to ``FALSE``. Pool classes are not required to
|
||||
provide this method, and not doing so indicates that all instances of
|
||||
this class will have no fixable or traceable references in them. This
|
||||
method is called via the generic function ``PoolScan()``.
|
||||
|
||||
``typedef Res (*PoolFixMethod)(Pool pool, ScanState ss, Seg seg, Ref *refIO)``
|
||||
|
||||
_`.method.fix`: The ``fix`` method indicates that the reference
|
||||
``*refIO`` has been discovered at rank ``ss->rank`` by the traces in
|
||||
``ss->traces``, and the pool must handle this discovery according to
|
||||
the fix protocol (design.mps.fix_). If the pool moves the object, it
|
||||
must update ``*refIO`` to refer to the new location of the object. If
|
||||
the pool determines that the referenced object died (for example,
|
||||
because the highest-ranking references to the object were weak), it
|
||||
must update ``*refIO`` to ``NULL``. Pool classes that automatically
|
||||
reclaim dead objects must provide this method, and must additionally
|
||||
set the ``AttrGC`` attribute. Pool classes that may move objects must
|
||||
also set the ``AttrMOVINGGC`` attribute. The ``fix`` method is on the
|
||||
critical path (see design.mps.critical-path_) and so must be fast.
|
||||
This method is called via the function ``TraceFix()``.
|
||||
|
||||
.. _design.mps.fix: fix
|
||||
.. _design.mps.critical-path: critical-path
|
||||
|
||||
_`.method.fixEmergency`: The ``fixEmergency`` method is used to
|
||||
perform fixing in "emergency" situations. Its specification is
|
||||
identical to the ``fix`` method, but it must complete its work without
|
||||
allocating memory (perhaps by using some approximation, or by running
|
||||
more slowly). Pool classes must provide this method if and only if
|
||||
they provide the ``fix`` method. If the ``fix`` method does not need
|
||||
to allocate memory, then it is acceptable for ``fix`` and
|
||||
``fixEmergency`` to be the same.
|
||||
|
||||
``typedef void (*PoolReclaimMethod)(Pool pool, Trace trace, Seg seg)``
|
||||
|
||||
_`.method.reclaim`: The ``reclaim`` method indicates that any
|
||||
remaining white objects in the segment ``seg`` have now been proved
|
||||
unreachable by the trace ``trace``, and so are dead. The pool should
|
||||
reclaim the resources associated with the dead objects. Pool classes
|
||||
are not required to provide this method. If they do, they must set the
|
||||
``AttrGC`` attribute. This method is called via the generic function
|
||||
``PoolReclaim()``.
|
||||
|
||||
``typedef void (*PoolWalkMethod)(Pool pool, Seg seg, FormattedObjectsVisitor f, void *v, size_t s)``
|
||||
|
||||
_`.method.walk`: The ``walk`` method must call the visitor function
|
||||
``f`` (along with its closure parameters ``v`` and ``s`` and the
|
||||
appropriate object format) once for each of the *black* objects in the
|
||||
segment ``seg``. Padding objects may or may not be included in the
|
||||
walk, at the pool's discretion: it is the responsibility of the client
|
||||
program to handle them. Forwarding objects must not be included in the
|
||||
walk. Pool classes need not provide this method. If they do, they must
|
||||
set the ``AttrFMT`` attribute. This method is called by the heap
|
||||
walker ``mps_arena_formatted_objects_walk()``.
|
||||
|
||||
``typedef Size (*PoolSizeMethod)(Pool pool)``
|
||||
|
||||
_`.method.totalSize`: The ``totalSize`` method must return the total
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ size, ``AMCBufferFill()`` fills any remainder with an large segment
|
|||
pad.
|
||||
|
||||
_`.pad.reason.nmr`: Non-mobile reclaim (NMR) pads are made by
|
||||
``amcReclaimNailed()``, when performing reclaim on a non-mobile (that
|
||||
``amcSegReclaimNailed()``, when performing reclaim on a non-mobile (that
|
||||
is, either boarded or stuck) segment:
|
||||
|
||||
The more common NMR scenario is reclaim of a boarded segment after a
|
||||
|
|
@ -117,7 +117,7 @@ not distinguishable because there is no nailboard. On reclaim, all
|
|||
objects except forwarding pointers are preserved; each forwarding
|
||||
object is replaced by an NMR pad.
|
||||
|
||||
If ``amcReclaimNailed()`` finds no objects to be preserved then it
|
||||
If ``amcSegReclaimNailed()`` finds no objects to be preserved then it
|
||||
calls ``SegFree()`` (new with job001809_).
|
||||
|
||||
|
||||
|
|
@ -137,7 +137,7 @@ Retained pads could be a problem
|
|||
|
||||
Retained pads are the NMR pads stuck in "from-space": non-mobile
|
||||
segments that were condemned but have preserved-in-place objects
|
||||
cannot be freed by ``amcReclaimNailed()``. The space around the
|
||||
cannot be freed by ``amcSegReclaimNailed()``. The space around the
|
||||
preserved objects is filled with NMR pads.
|
||||
|
||||
In the worst case, retained pads could waste an enormous amount of
|
||||
|
|
@ -181,7 +181,7 @@ AMC might treat "Large" segments specially, in two ways:
|
|||
any) is immediately padded with an LSP pad.
|
||||
|
||||
- _`.large.lsp-no-retain`: Nails to such an LSP pad do not cause
|
||||
AMCReclaimNailed() to retain the segment.
|
||||
``amcSegReclaimNailed()`` to retain the segment.
|
||||
|
||||
`.large.single-reserve`_ is implemented. See job001811_.
|
||||
|
||||
|
|
@ -204,21 +204,21 @@ indistinguishable from a client object, so AMC has no direct way to
|
|||
detect, and safely ignore, the final LSP object in the seg. If AMC
|
||||
could *guarantee* that the single buffer reserve
|
||||
(`.large.single-reserve`_) is only used for a single *object*, then
|
||||
``AMCReclaimNailed()`` could honour a nail at the start of a large seg
|
||||
and ignore all others; this would be extremely simple to implement.
|
||||
But AMC cannot guarantee this, because in the MPS Allocation Point
|
||||
Protocol the client is permitted to make a large buffer reserve and
|
||||
then fill it with many small objects. In such a case, AMC must honour
|
||||
all nails (if the buffer reserve request was an exact multiple of the
|
||||
arena grain size), or all nails except to the last object (if there
|
||||
was a remainder filled with an LSP pad). Because an LSP pad cannot be
|
||||
distinguished from a client object, and the requested allocation size
|
||||
is not recorded, AMC cannot distinguish these two conditions at
|
||||
reclaim time. Therefore AMC must record whether or not the last object
|
||||
in the seg is a pad, in order to ignore nails to it. This could be
|
||||
done by adding a flag to ``AMCSegStruct``. (This can be done without
|
||||
increasing the structure size, by making the ``Bool new`` field
|
||||
smaller than its current 32 bits.)
|
||||
``amcSegReclaimNailed()`` could honour a nail at the start of a large
|
||||
seg and ignore all others; this would be extremely simple to
|
||||
implement. But AMC cannot guarantee this, because in the MPS
|
||||
Allocation Point Protocol the client is permitted to make a large
|
||||
buffer reserve and then fill it with many small objects. In such a
|
||||
case, AMC must honour all nails (if the buffer reserve request was an
|
||||
exact multiple of the arena grain size), or all nails except to the
|
||||
last object (if there was a remainder filled with an LSP pad). Because
|
||||
an LSP pad cannot be distinguished from a client object, and the
|
||||
requested allocation size is not recorded, AMC cannot distinguish
|
||||
these two conditions at reclaim time. Therefore AMC must record
|
||||
whether or not the last object in the seg is a pad, in order to ignore
|
||||
nails to it. This could be done by adding a flag to ``AMCSegStruct``.
|
||||
(This can be done without increasing the structure size, by making the
|
||||
``Bool new`` field smaller than its current 32 bits.)
|
||||
|
||||
|
||||
The LSP payoff calculation
|
||||
|
|
@ -439,8 +439,8 @@ segment to survive even though there are no surviving objects on it.
|
|||
Emergency tracing
|
||||
-----------------
|
||||
|
||||
_`.emergency.fix`: ``AMCFixEmergency()`` is at the core of AMC's
|
||||
emergency tracing policy (unsurprisingly). ``AMCFixEmergency()``
|
||||
_`.emergency.fix`: ``amcSegFixEmergency()`` is at the core of AMC's
|
||||
emergency tracing policy (unsurprisingly). ``amcSegFixEmergency()``
|
||||
chooses exactly one of three options:
|
||||
|
||||
#. use the existing nailboard structure to record the fix;
|
||||
|
|
@ -456,8 +456,8 @@ is used to snapout the pointer. Otherwise it is as for an
|
|||
|
||||
_`.emergency.scan`: This is basically as before, the only complication
|
||||
is that when scanning a nailed segment we may need to do multiple
|
||||
passes, as ``FixEmergency()`` may introduce new marks into the nail
|
||||
board.
|
||||
passes, as ``amcSegFixEmergency()`` may introduce new marks into the
|
||||
nail board.
|
||||
|
||||
|
||||
Buffers
|
||||
|
|
@ -653,8 +653,9 @@ for small enough ramps.
|
|||
_`.ramp.begin.leave.ramping`: We enter the RAMPING state if a
|
||||
collection starts that condemns the ramp generation (pedantically when
|
||||
a new GC begins, and a segment in the ramp generation is condemned, we
|
||||
leave the BEGIN state, see AMCWhiten). At this point we switch the
|
||||
ramp generation to forward to itself (`.gen.ramp.ramping`_).
|
||||
leave the BEGIN state, see ``amcSegWhiten()``). At this point we
|
||||
switch the ramp generation to forward to itself
|
||||
(`.gen.ramp.ramping`_).
|
||||
|
||||
_`.ramp.ramping.leave`: We leave the RAMPING state and go to the
|
||||
FINISH state when the ramp count goes back to zero. Thus, the FINISH
|
||||
|
|
@ -736,9 +737,9 @@ exposed, in which case the group attached to it should be exposed. See
|
|||
`.flush.cover`_.
|
||||
|
||||
|
||||
``Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)``
|
||||
``Res amcSegFix(Seg seg, ScanState ss, Ref *refIO)``
|
||||
|
||||
_`.fix`: Fix a reference to the pool.
|
||||
_`.fix`: Fix a reference to an AMC segment.
|
||||
|
||||
Ambiguous references lock down an entire segment by removing it
|
||||
from old-space and also marking it grey for future scanning.
|
||||
|
|
@ -762,13 +763,13 @@ _`.fix.exact.grey`: The new copy must be at least as grey as the old
|
|||
as it may have been grey for some other collection.
|
||||
|
||||
|
||||
``Res AMCScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)``
|
||||
``Res amcSegScan(Bool *totalReturn, Seg seg, ScanState ss1)``
|
||||
|
||||
_`.scan`: Searches for a group which is grey for the trace and scans
|
||||
it. If there aren't any, it sets the finished flag to true.
|
||||
|
||||
|
||||
``void AMCReclaim(Pool pool, Trace trace, Seg seg)``
|
||||
``void amcSegReclaim(Seg seg, Trace trace)``
|
||||
|
||||
_`.reclaim`: After a trace, destroy any groups which are still
|
||||
condemned for the trace, because they must be dead.
|
||||
|
|
|
|||
|
|
@ -255,9 +255,9 @@ there are grey objects in the segment, because the grey objects might
|
|||
have been subsequently scanned and blackened.
|
||||
|
||||
_`.marked.fix`: The ``marksChanged`` flag is set ``TRUE`` by
|
||||
``AMSFix()`` when an object is made grey.
|
||||
``amsSegFix()`` when an object is made grey.
|
||||
|
||||
_`.marked.scan`: ``AMSScan()`` must blacken all grey objects on the
|
||||
_`.marked.scan`: ``amsSegScan()`` must blacken all grey objects on the
|
||||
segment, so it must iterate over the segment until all grey objects
|
||||
have been seen. Scanning an object in the segment might grey another
|
||||
one (`.marked.fix`_), so the scanner iterates until this flag is
|
||||
|
|
@ -285,9 +285,9 @@ nothing marked as grey, so the ``marksChanged`` flag must already be
|
|||
``FALSE``.
|
||||
|
||||
_`.marked.blacken`: When the tracer decides not to scan, but to call
|
||||
``PoolBlacken()``, we know that any greyness can be removed.
|
||||
``AMSBlacken()`` does this and resets the ``marksChanged`` flag, if it
|
||||
finds that the segment has been condemned.
|
||||
``SegBlacken()``, we know that any greyness can be removed.
|
||||
``amsSegBlacken()`` does this and resets the ``marksChanged`` flag, if
|
||||
it finds that the segment has been condemned.
|
||||
|
||||
_`.marked.clever`: AMS could be clever about not setting the
|
||||
``marksChanged`` flag, if the fixed object is ahead of the current
|
||||
|
|
@ -325,7 +325,7 @@ current (2002-01) implementation of buffers assumes buffers are black,
|
|||
so they'd better.
|
||||
|
||||
_`.fill.colour.reclaim`: In fact, putting a buffer on a condemned
|
||||
segment will screw up the accounting in ``AMCReclaim()``, so it's
|
||||
segment will screw up the accounting in ``amsSegReclaim()``, so it's
|
||||
disallowed.
|
||||
|
||||
_`.fill.slow`: ``AMSBufferFill()`` gets progressively slower as more
|
||||
|
|
@ -462,10 +462,10 @@ index in a segment uses macros such as ``AMS_INDEX`` and
|
|||
every translation -- we could cache that.
|
||||
|
||||
_`.grey-mutator`: To enforce the restriction set in `.not-req.grey`_
|
||||
we check that all the traces are flipped in ``AMSScan()``. It would be
|
||||
good to check in ``AMSFix()`` as well, but we can't do that, because
|
||||
it's called during the flip, and we can't tell the difference between
|
||||
the flip and the grey mutator phases with the current tracer
|
||||
we check that all the traces are flipped in ``amsSegScan()``. It would
|
||||
be good to check in ``amsSegFix()`` as well, but we can't do that,
|
||||
because it's called during the flip, and we can't tell the difference
|
||||
between the flip and the grey mutator phases with the current tracer
|
||||
interface.
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -103,60 +103,14 @@ _`.poolstruct`: The class specific pool structure is::
|
|||
|
||||
struct AWLStruct {
|
||||
PoolStruct poolStruct;
|
||||
Format format;
|
||||
Shift alignShift;
|
||||
ActionStruct actionStruct;
|
||||
double lastCollected;
|
||||
Serial gen;
|
||||
Sig sig;
|
||||
PoolGenStruct pgenStruct; /* pool generation */
|
||||
PoolGen pgen; /* NULL or pointer to pgenStruct */
|
||||
Count succAccesses; /* number of successive single accesses */
|
||||
FindDependentFunction findDependent; /* to find a dependent object */
|
||||
awlStatTotalStruct stats;
|
||||
Sig sig; /* <code/misc.h#sig> */
|
||||
}
|
||||
|
||||
_`.poolstruct.format`: The format field is used to refer to the object
|
||||
format. The object format is passed to the pool during pool creation.
|
||||
|
||||
_`.poolstruct.alignshift`: The ``alignShift`` field is the
|
||||
``SizeLog2`` of the pool's alignment. It is computed and initialised
|
||||
when a pool is created. It is used to compute the number of alignment
|
||||
grains in a segment which is the number of bits need in the segment's
|
||||
mark and alloc bit table (see `.awlseg.bt`_, `.awlseg.mark`_, and
|
||||
`.awlseg.alloc`_ below).
|
||||
|
||||
.. note::
|
||||
|
||||
Clarify this.
|
||||
|
||||
_`.poolstruct.actionStruct`: Contains an Action which is used to
|
||||
participate in the collection benefit protocol. See ``AWLBenefit()``
|
||||
below for a description of the algorithm used for determining when to
|
||||
collect.
|
||||
|
||||
_`.poolstruct.lastCollected`: Records the time (using the mutator
|
||||
total allocation clock, ie that returned by
|
||||
``ArenaMutatorAllocSize()``) of the most recent call to either
|
||||
``AWLInit()`` or ``AWLTraceBegin()`` for this pool. So this is the
|
||||
time of the beginning of the last collection of this pool. Actually
|
||||
this isn't true because the pool can be collected without
|
||||
``AWLTraceBegin()`` being called (I think) as it will get collected by
|
||||
being in the same zone as another pool/generation that is being
|
||||
collected (which it does arrange to be, see the use of the gen field
|
||||
in `.poolstruct.gen`_ below and `.fun.awlsegcreate.where`_ below).
|
||||
|
||||
_`.poolstruct.gen`: This part of the mechanism by which the pool
|
||||
arranges to be in a particular zone and arranges to be collected
|
||||
simultaneously with other cohorts in the system. ``gen`` is the
|
||||
generation that is used in expressing a generation preference when
|
||||
allocating a segment. The intention is that this pool will get
|
||||
collected simultaneously with any other segments that are also
|
||||
allocated using this generation preference (when using the VM arena,
|
||||
generation preferences get mapped more or less to zones, each
|
||||
generation to a unique set of zones in the ideal case). Whilst AWL is
|
||||
not generational it is expected that this mechanism will arrange for
|
||||
it to be collected simultaneously with some particular generation of
|
||||
AMC.
|
||||
|
||||
_`.poolstruct.gen.1`: At the moment the ``gen`` field is set for all
|
||||
AWL pools to be 1.
|
||||
|
||||
_`.awlseg`: The pool defines a segment class ``AWLSegClass``, which is
|
||||
a subclass of ``GCSegClass`` (see
|
||||
design.mps.seg.over.hierarchy.gcseg_). All segments allocated by the
|
||||
|
|
@ -164,60 +118,63 @@ pool are instances of this class, and are of type ``AWLSeg``, for
|
|||
which the structure is::
|
||||
|
||||
struct AWLSegStruct {
|
||||
GCSegStruct gcSegStruct;
|
||||
GCSegStruct gcSegStruct; /* superclass fields must come first */
|
||||
BT mark;
|
||||
BT scanned;
|
||||
BT alloc;
|
||||
Count grains;
|
||||
Count free;
|
||||
Count singleAccesses;
|
||||
AWLStatSegStruct stats;
|
||||
Sig sig;
|
||||
Count freeGrains; /* free grains */
|
||||
Count bufferedGrains; /* grains in buffers */
|
||||
Count newGrains; /* grains allocated since last collection */
|
||||
Count oldGrains; /* grains allocated prior to last collection */
|
||||
Count singleAccesses; /* number of accesses processed singly */
|
||||
awlStatSegStruct stats;
|
||||
Sig sig; /* <code/misc.h#sig> */
|
||||
}
|
||||
|
||||
.. _design.mps.seg.over.hierarchy.gcseg: seg#over-hierarchy-gcseg
|
||||
|
||||
_`.awlseg.bt`: The mark, alloc, and scanned fields are bit-tables (see
|
||||
design.mps.bt_). Each bit in the table corresponds to a a single
|
||||
alignment grain in the pool.
|
||||
_`.awlseg.bt`: The ``mark``, ``alloc``, and ``scanned`` fields are
|
||||
bit-tables (see design.mps.bt_). Each bit in the table corresponds to
|
||||
a a single alignment grain in the pool.
|
||||
|
||||
.. _design.mps.bt: bt
|
||||
|
||||
_`.awlseg.mark`: The mark bit table is used to record mark bits during
|
||||
a trace. ``AWLCondemn()`` (see `.fun.condemn`_ below) sets all the
|
||||
bits of this table to zero. Fix will read and set bits in this table.
|
||||
Currently there is only one mark bit table. This means that the pool
|
||||
can only be condemned for one trace.
|
||||
_`.awlseg.mark`: The ``mark`` bit table is used to record mark bits
|
||||
during a trace. ``awlSegWhiten()`` (see `.fun.whiten`_ below) sets all
|
||||
the bits of this table to zero. Fix will read and set bits in this
|
||||
table. Currently there is only one mark bit table. This means that the
|
||||
pool can only be condemned for one trace.
|
||||
|
||||
_`.awlseg.mark.justify`: This is simple, and can be improved later
|
||||
when we want to run more than one trace.
|
||||
|
||||
_`.awlseg.scanned`: The scanned bit-table is used to note which
|
||||
_`.awlseg.scanned`: The ``scanned`` bit-table is used to note which
|
||||
objects have been scanned. Scanning (see `.fun.scan`_ below) a segment
|
||||
will find objects that are marked but not scanned, scan each object
|
||||
found and set the corresponding bits in the scanned table.
|
||||
|
||||
_`.awlseg.alloc`: The alloc bit table is used to record which portions
|
||||
of a segment have been allocated. Ranges of bits in this table are set
|
||||
when a buffer is attached to the segment. When a buffer is flushed (ie
|
||||
``AWLBufferEmpty()`` is called) from the segment, the bits
|
||||
corresponding to the unused portion at the end of the buffer are
|
||||
_`.awlseg.alloc`: The ``alloc`` bit table is used to record which
|
||||
portions of a segment have been allocated. Ranges of bits in this
|
||||
table are set when a buffer is attached to the segment. When a buffer
|
||||
is flushed (ie ``AWLBufferEmpty()`` is called) from the segment, the
|
||||
bits corresponding to the unused portion at the end of the buffer are
|
||||
reset.
|
||||
|
||||
_`.awlseg.alloc.invariant`: A bit is set in the alloc table if and
|
||||
only if the corresponding address is currently being buffered, or the
|
||||
corresponding address lies within the range of an allocated object.
|
||||
|
||||
_`.awlseg.grains`: The grains field is the number of grains that fit
|
||||
in the segment. Strictly speaking this is not necessary as it can be
|
||||
computed from ``SegSize`` and AWL's alignment, however, precalculating
|
||||
it and storing it in the segment makes the code simpler by avoiding
|
||||
lots of repeated calculations.
|
||||
_`.awlseg.grains`: The ``grains`` field is the number of grains that
|
||||
fit in the segment. Strictly speaking this is not necessary as it can
|
||||
be computed from ``SegSize`` and AWL's alignment, however,
|
||||
precalculating it and storing it in the segment makes the code simpler
|
||||
by avoiding lots of repeated calculations.
|
||||
|
||||
_`.awlseg.free`: A conservative estimate of the number of free grains
|
||||
in the segment. It is always guaranteed to be greater than or equal to
|
||||
the number of free grains in the segment, hence can be used during
|
||||
allocation to quickly pass over a segment.
|
||||
_`.awlseg.freeGrains`: A conservative estimate of the number of free
|
||||
grains in the segment. It is always guaranteed to be greater than or
|
||||
equal to the number of free grains in the segment, hence can be used
|
||||
during allocation to quickly pass over a segment.
|
||||
|
||||
.. note::
|
||||
|
||||
|
|
@ -241,12 +198,6 @@ _`.fun.init`: ``AWLStruct`` has four fields, each one needs initializing.
|
|||
_`.fun.init.poolstruct`: The ``poolStruct`` field has already been
|
||||
initialized by generic code (impl.c.pool).
|
||||
|
||||
_`.fun.init.format`: The format will be copied from the argument list,
|
||||
checked, and written into this field.
|
||||
|
||||
_`.fun.init.alignshift`: The ``alignShift`` will be computed from the
|
||||
pool alignment and written into this field.
|
||||
|
||||
_`.fun.init.sig`: The ``sig`` field will be initialized with the
|
||||
signature for this pool.
|
||||
|
||||
|
|
@ -282,32 +233,72 @@ locations as being free in the relevant alloc table. The segment that
|
|||
the buffer is pointing at (which contains the alloc table that needs
|
||||
to be dinked with) is available via ``BufferSeg()``.
|
||||
|
||||
_`.fun.benefit`: The benefit returned is the total amount of mutator
|
||||
allocation minus the ``lastRembemberedSize`` minus 10 MiB, so the pool
|
||||
becomes an increasingly good candidate for collection at a constant
|
||||
(mutator allocation) rate, crossing the 0 line when there has been
|
||||
10 MiB of allocation since the (beginning of the) last collection. So
|
||||
it gets collected approximately every 10 MiB of allocation. Note that
|
||||
it will also get collected by virtue of being in the same zone as some
|
||||
AMC generation (assuming there are instantiated AMC pools), see
|
||||
`.poolstruct.gen`_ above.
|
||||
``Res AWLDescribe(Pool pool, mps_lib_FILE *stream, Count depth)``
|
||||
|
||||
``Res AWLCondemn(Pool pool, Trace trace, Seg seg)``
|
||||
_`.fun.describe`:
|
||||
|
||||
_`.fun.condemn`: The current design only permits each segment to be
|
||||
|
||||
Internal
|
||||
........
|
||||
|
||||
``Res AWLSegCreate(AWLSeg *awlsegReturn, Size size)``
|
||||
|
||||
_`.fun.awlsegcreate`: Creates a segment of class ``AWLSegClass`` of size at least ``size``.
|
||||
|
||||
_`.fun.awlsegcreate.size.round`: ``size`` is rounded up to the arena
|
||||
grain size before requesting the segment.
|
||||
|
||||
_`.fun.awlsegcreate.size.round.justify`: The arena requires that all
|
||||
segment sizes are rounded up to the arena grain size.
|
||||
|
||||
_`.fun.awlsegcreate.where`: The segment is allocated using a
|
||||
generation preference, using the generation number stored in the
|
||||
``AWLStruct`` (the ``gen`` field), see `.poolstruct.gen`_ above.
|
||||
|
||||
``Res awlSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)``
|
||||
|
||||
_`.fun.awlseginit`: Init method for ``AWLSegClass``, called for
|
||||
``SegAlloc()`` whenever an ``AWLSeg`` is created (see
|
||||
`.fun.awlsegcreate`_ above).
|
||||
|
||||
_`.fun.awlseginit.tables`: The segment's mark scanned and alloc tables
|
||||
(see `.awlseg.bt`_ above) are allocated and initialised. The segment's
|
||||
grains field is computed and stored.
|
||||
|
||||
``void awlSegFinish(Seg seg)``
|
||||
|
||||
_`.fun.awlsegfinish`: Finish method for ``AWLSegClass``, called from
|
||||
``SegFree()``. Will free the segment's tables (see `.awlseg.bt`_).
|
||||
|
||||
``Bool AWLSegAlloc(Addr *baseReturn, Addr *limitReturn, AWLSeg awlseg, AWL awl, Size size)``
|
||||
|
||||
_`.fun.awlsegalloc`: Will search for a free block in the segment that
|
||||
is at least size bytes long. The base address of the block is returned
|
||||
in ``*baseReturn``, the limit of the entire free block (which must be
|
||||
at least as large size and may be bigger) is returned in
|
||||
``*limitReturn``. The requested size is converted to a number of
|
||||
grains, ``BTFindResRange()`` is called to find a run of this length in
|
||||
the alloc bit-table (`.awlseg.alloc`_). The return results (if it is
|
||||
successful) from ``BTFindResRange()`` are in terms of grains, they are
|
||||
converted back to addresses before returning the relevant values from
|
||||
this function.
|
||||
|
||||
``Res awlSegWhiten(Seg seg, Trace trace)``
|
||||
|
||||
_`.fun.whiten`: The current design only permits each segment to be
|
||||
condemned for one trace (see `.awlseg.mark`_). This function checks
|
||||
that the segment is not condemned for any trace (``seg->white ==
|
||||
that the segment is not white for any trace (``seg->white ==
|
||||
TraceSetEMPTY``). The segment's mark bit-table is reset, and the
|
||||
whiteness of the seg (``seg->white``) has the current trace added to
|
||||
it.
|
||||
|
||||
``void AWLGrey(Pool pool, Trace trace, Seg seg)``
|
||||
``void awlSegGreyen(Seg seg, Trace trace)``
|
||||
|
||||
_`.fun.grey`: If the segment is not condemned for this trace the
|
||||
_`.fun.grey`: If the segment is not white for this trace, the
|
||||
segment's mark table is set to all 1s and the segment is recorded as
|
||||
being grey.
|
||||
|
||||
``Res AWLScan(ScanState ss, Pool pool, Seg seg)``
|
||||
``Res awlSegScan(Bool *totalReturn, Seg seg, ScanState ss)``
|
||||
|
||||
_`.fun.scan`:
|
||||
|
||||
|
|
@ -406,38 +397,31 @@ _`.fun.scan.pass.more`: At the end of a pass the finished flag is
|
|||
examined.
|
||||
|
||||
_`.fun.scan.pass.more.not`: If the finished flag is set then we are
|
||||
done (see `.fun.scan.overview.finished-flag`_ above), ``AWLScan()``
|
||||
done (see `.fun.scan.overview.finished-flag`_ above), ``awlSegScan()``
|
||||
returns.
|
||||
|
||||
_`.fun.scan.pass.more.so`: Otherwise (the finished flag is reset) we
|
||||
perform another pass (see `.fun.scan.pass`_ above).
|
||||
|
||||
``Res AWLFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)``
|
||||
``Res awlSegFix(Seg seg, ScanState ss, Ref *refIO)``
|
||||
|
||||
_`.fun.fix`: ``ss->wasMarked`` is set to ``TRUE`` (clear compliance
|
||||
with design.mps.fix.protocol.was-marked.conservative_).
|
||||
|
||||
.. _design.mps.fix.protocol.was-marked.conservative: fix#protocol-was-marked-conservative
|
||||
|
||||
If the rank (``ss->rank``) is ``RankAMBIG`` then fix returns
|
||||
immediately unless the reference is aligned to the pool alignment.
|
||||
|
||||
If the rank (``ss->rank``) is ``RankAMBIG`` then fix returns
|
||||
immediately unless the referenced grain is allocated.
|
||||
_`.fun.fix`: If the rank (``ss->rank``) is ``RankAMBIG`` then fix
|
||||
returns immediately unless the reference is in the segment bounds,
|
||||
aligned to the pool alignment, and allocated.
|
||||
|
||||
The bit in the marked table corresponding to the referenced grain will
|
||||
be read. If it is already marked then fix returns. Otherwise (the
|
||||
grain is unmarked), ``ss->wasMarked`` is set to ``FALSE``, the
|
||||
remaining actions depend on whether the rank (``ss->rank``) is
|
||||
``RankWEAK`` or not. If the rank is weak then the reference is
|
||||
adjusted to 0 (see design.mps.weakness) and fix returns. If the rank
|
||||
is something else then the mark bit corresponding to the referenced
|
||||
grain is set, and the segment is greyed using ``TraceSegGreyen()``.
|
||||
grain is unmarked), ``ss->wasMarked`` is set to ``FALSE`` (see
|
||||
design.mps.fix.was-marked.not_), the remaining actions depend on
|
||||
whether the rank (``ss->rank``) is ``RankWEAK`` or not. If the rank is
|
||||
weak then the reference is adjusted to 0 (see design.mps.weakness) and
|
||||
fix returns. If the rank is something else then the mark bit
|
||||
corresponding to the referenced grain is set, and the segment is
|
||||
greyed using ``SegSetGrey()``.
|
||||
|
||||
Fix returns.
|
||||
.. _design.mps.fix.was-marked.not: fix#was-marked-not
|
||||
|
||||
|
||||
``void AWLReclaim(Pool pool, Trace trace, Seg seg)``
|
||||
``void awlSegReclaim(Seg seg, Trace trace)``
|
||||
|
||||
_`.fun.reclaim`: This iterates over all allocated objects in the
|
||||
segment and frees objects that are not marked. When this iteration is
|
||||
|
|
@ -459,56 +443,6 @@ objects. Now reclaim doesn't need to check that the objects are
|
|||
allocated before skipping them. There may be a corresponding change
|
||||
for scan as well.
|
||||
|
||||
``Res AWLDescribe(Pool pool, mps_lib_FILE *stream, Count depth)``
|
||||
|
||||
_`.fun.describe`:
|
||||
|
||||
|
||||
Internal
|
||||
........
|
||||
|
||||
``Res AWLSegCreate(AWLSeg *awlsegReturn, Size size)``
|
||||
|
||||
_`.fun.awlsegcreate`: Creates a segment of class ``AWLSegClass`` of size at least ``size``.
|
||||
|
||||
_`.fun.awlsegcreate.size.round`: ``size`` is rounded up to the arena
|
||||
grain size before requesting the segment.
|
||||
|
||||
_`.fun.awlsegcreate.size.round.justify`: The arena requires that all
|
||||
segment sizes are rounded up to the arena grain size.
|
||||
|
||||
_`.fun.awlsegcreate.where`: The segment is allocated using a
|
||||
generation preference, using the generation number stored in the
|
||||
``AWLStruct`` (the ``gen`` field), see `.poolstruct.gen`_ above.
|
||||
|
||||
``Res awlSegInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)``
|
||||
|
||||
_`.fun.awlseginit`: Init method for ``AWLSegClass``, called for
|
||||
``SegAlloc()`` whenever an ``AWLSeg`` is created (see
|
||||
`.fun.awlsegcreate`_ above).
|
||||
|
||||
_`.fun.awlseginit.tables`: The segment's mark scanned and alloc tables
|
||||
(see `.awlseg.bt`_ above) are allocated and initialised. The segment's
|
||||
grains field is computed and stored.
|
||||
|
||||
``void awlSegFinish(Seg seg)``
|
||||
|
||||
_`.fun.awlsegfinish`: Finish method for ``AWLSegClass``, called from
|
||||
``SegFree()``. Will free the segment's tables (see `.awlseg.bt`_).
|
||||
|
||||
``Bool AWLSegAlloc(Addr *baseReturn, Addr *limitReturn, AWLSeg awlseg, AWL awl, Size size)``
|
||||
|
||||
_`.fun.awlsegalloc`: Will search for a free block in the segment that
|
||||
is at least size bytes long. The base address of the block is returned
|
||||
in ``*baseReturn``, the limit of the entire free block (which must be
|
||||
at least as large size and may be bigger) is returned in
|
||||
``*limitReturn``. The requested size is converted to a number of
|
||||
grains, ``BTFindResRange()`` is called to find a run of this length in
|
||||
the alloc bit-table (`.awlseg.alloc`_). The return results (if it is
|
||||
successful) from ``BTFindResRange()`` are in terms of grains, they are
|
||||
converted back to addresses before returning the relevant values from
|
||||
this function.
|
||||
|
||||
``Bool AWLDependentObject(Addr *objReturn, Addr parent)``
|
||||
|
||||
_`.fun.dependent-object`: This function abstracts the association
|
||||
|
|
@ -565,7 +499,7 @@ Document History
|
|||
Copyright and License
|
||||
---------------------
|
||||
|
||||
Copyright © 2013-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
Copyright © 2013-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
All rights reserved. This is an open source license. Contact
|
||||
Ravenbrook for commercial licensing options.
|
||||
|
||||
|
|
|
|||
|
|
@ -116,19 +116,11 @@ _`.poolstruct`: The class specific pool structure is::
|
|||
|
||||
typedef struct LOStruct {
|
||||
PoolStruct poolStruct; /* generic pool structure */
|
||||
Format format; /* format for allocated objects */
|
||||
Shift alignShift;
|
||||
Sig sig; /* impl.h.misc.sig */
|
||||
PoolGenStruct pgenStruct; /* pool generation */
|
||||
PoolGen pgen; /* NULL or pointer to pgenStruct */
|
||||
Sig sig; /* <code/misc.h#sig> */
|
||||
} LOStruct;
|
||||
|
||||
_`.poolstruct.format`: This is the format of the objects that are
|
||||
allocated in the pool.
|
||||
|
||||
_`.poolstruct.alignShift`: This is shift used in alignment
|
||||
computations. It is ``SizeLog2(pool->alignment).`` It can be used on
|
||||
the right of a shift operator (``<<`` or ``>>``) to convert between a
|
||||
number of bytes and a number of grains.
|
||||
|
||||
_`.loseg`: Every segment is an instance of segment class ``LOSegClass``, a
|
||||
subclass of ``GCSegClass``, and is an object of type ``LOSegStruct``.
|
||||
|
||||
|
|
@ -140,11 +132,13 @@ _`.loseg.decl`: The declaration of the structure is as follows::
|
|||
|
||||
typedef struct LOSegStruct {
|
||||
GCSegStruct gcSegStruct; /* superclass fields must come first */
|
||||
LO lo; /* owning LO */
|
||||
BT mark; /* mark bit table */
|
||||
BT alloc; /* alloc bit table */
|
||||
Count free; /* number of free grains */
|
||||
Sig sig; /* impl.h.misc.sig */
|
||||
Count freeGrains; /* free grains */
|
||||
Count bufferedGrains; /* grains in buffers */
|
||||
Count newGrains; /* grains allocated since last collection */
|
||||
Count oldGrains; /* grains allocated prior to last collection */
|
||||
Sig sig; /* <code/misc.h#sig> */
|
||||
} LOSegStruct;
|
||||
|
||||
_`.loseg.sig`: The signature for a loseg is 0x519705E9 (SIGLOSEG).
|
||||
|
|
@ -160,7 +154,7 @@ but might be inefficient in terms of space in some circumstances.
|
|||
|
||||
_`.loseg.mark`: This is a Bit Table that is used to mark objects
|
||||
during a trace. Each grain in the segment is associated with 1 bit in
|
||||
this table. When ``LOFix()`` (see `.fun.fix`_ below) is called the
|
||||
this table. When ``loSegFix()`` (see `.fun.fix`_ below) is called the
|
||||
address is converted to a grain within the segment and the
|
||||
corresponding bit in this table is set.
|
||||
|
||||
|
|
@ -198,7 +192,11 @@ _`.fun.buffer-empty`:
|
|||
|
||||
_`.fun.condemn`:
|
||||
|
||||
``Res LOFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)``
|
||||
|
||||
Internal
|
||||
........
|
||||
|
||||
``Res loSegFix(Seg seg, ScanState ss, Ref *refIO)``
|
||||
|
||||
_`.fun.fix`: Fix treats references of most ranks much the same. There
|
||||
is one mark table that records all marks. A reference of rank
|
||||
|
|
@ -213,16 +211,7 @@ been marked otherwise nothing happens. Note that there is no check
|
|||
that the reference refers to a valid object boundary (which wouldn't
|
||||
be a valid check in the case of ambiguous references anyway).
|
||||
|
||||
``void LOReclaim(Pool pool, Trace trace, Seg seg)``
|
||||
|
||||
_`.fun.reclaim`: Derives the loseg from the seg, and calls
|
||||
``loSegReclaim()`` (see `.fun.segreclaim`_ below).
|
||||
|
||||
|
||||
Internal
|
||||
........
|
||||
|
||||
``void loSegReclaim(LOSeg loseg, Trace trace)``
|
||||
``void loSegReclaim(Seg seg, Trace trace)``
|
||||
|
||||
_`.fun.segreclaim`: For all the contiguous allocated regions in the
|
||||
segment it locates the boundaries of all the objects in that region by
|
||||
|
|
@ -231,7 +220,7 @@ of the region (the beginning of the region is guaranteed to coincide
|
|||
with the beginning of an object). For each object it examines the bit
|
||||
in the mark bit table that corresponds to the beginning of the object.
|
||||
If that bit is set then the object has been marked as a result of a
|
||||
previous call to ``LOFix()``, the object is preserved by doing
|
||||
previous call to ``loSegFix()``, the object is preserved by doing
|
||||
nothing. If that bit is not set then the object has not been marked
|
||||
and should be reclaimed; the object is reclaimed by resetting the
|
||||
appropriate range of bits in the segment's free bit table.
|
||||
|
|
|
|||
|
|
@ -432,9 +432,9 @@ to grow very quickly.
|
|||
_`.finish`: Iterate over all the segments, returning all the segments
|
||||
to the arena.
|
||||
|
||||
``Res MRGScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)``
|
||||
``Res mrgRefSegScan(Bool *totalReturn, Pool pool, Seg seg, ScanState ss)``
|
||||
|
||||
_`.scan`: ``MRGScan()`` scans a segment.
|
||||
_`.scan`: ``mrgRefSegScan()`` scans a segment of guardians.
|
||||
|
||||
_`.scan.trivial`: Scan will do nothing (that is, return immediately)
|
||||
if the tracing rank is anything other than final.
|
||||
|
|
@ -451,10 +451,10 @@ scanning is detrimental, it will only delay finalization. If the rank
|
|||
is higher than final there is nothing to do, the pool only contains
|
||||
final references.
|
||||
|
||||
_`.scan.guardians`: ``MRGScan()`` will iterate over all guardians in
|
||||
the segment. Every guardian's reference will be fixed (_`.scan.free`:
|
||||
note that guardians that are on the free list have ``NULL`` in their
|
||||
reference part).
|
||||
_`.scan.guardians`: ``mrgRefSegScan()`` will iterate over all
|
||||
guardians in the segment. Every guardian's reference will be fixed
|
||||
(_`.scan.free`: note that guardians that are on the free list have
|
||||
``NULL`` in their reference part).
|
||||
|
||||
_`.scan.wasold`: If the object referred to had not been fixed
|
||||
previously (that is, was unmarked) then the object is not referenced
|
||||
|
|
@ -489,14 +489,6 @@ and exit lists and prints the guardians in each. The location of the
|
|||
guardian and the value of the reference in it will be printed out.
|
||||
Provided for debugging only.
|
||||
|
||||
_`.functions.unused`: All of these will be unused: ``BufferInit()``,
|
||||
``BufferFill()``, ``BufferEmpty()``, ``BufferFinish()``,
|
||||
``TraceBegin()``, ``TraceCondemn()``, ``PoolFix()``, ``PoolReclaim()``, ``TraceEnd()``.
|
||||
|
||||
_`.functions.trivial`: The Grey method of the pool class will be
|
||||
``PoolTrivGrey()``, this pool has no further bookkeeping to perform
|
||||
for grey segments.
|
||||
|
||||
|
||||
Transgressions
|
||||
--------------
|
||||
|
|
@ -512,7 +504,7 @@ arena. A suggested strategy for this is as follows:
|
|||
|
||||
- Add a free segment ring to the pool.
|
||||
|
||||
- In ``MRGRefSegScan()``, if the segment is entirely free, don't scan
|
||||
- In ``mrgRefSegScan()``, if the segment is entirely free, don't scan
|
||||
it, but instead detach its links from the free ring, and move the
|
||||
segment to the free segment ring.
|
||||
|
||||
|
|
@ -658,7 +650,7 @@ and haven't been finalized. This will test `.promise.unreachable`_ and
|
|||
Notes
|
||||
-----
|
||||
|
||||
_`.access.inadequate`: ``PoolAccess()`` will scan segments at
|
||||
_`.access.inadequate`: ``SegAccess()`` will scan segments at
|
||||
`RankEXACT``. Really it should be scanned at whatever the minimum rank
|
||||
of all grey segments is (the trace rank phase), however there is no
|
||||
way to find this out. As a consequence we will sometimes scan pages at
|
||||
|
|
|
|||
|
|
@ -395,34 +395,35 @@ functions.
|
|||
|
||||
``SuperclassPoly(kind, class)``
|
||||
|
||||
_`.if.superclass`: An introspection function which returns the direct
|
||||
superclass of class object ``class`` as a class of kind ``kind``.
|
||||
This may assert if the superclass is not (a subtype of) the kind
|
||||
requested.
|
||||
_`.if.superclass-poly`: An introspection function which returns the
|
||||
direct superclass of class object ``class`` as a class of kind
|
||||
``kind``. This may assert if the superclass is not (a subtype of) the
|
||||
kind requested.
|
||||
|
||||
|
||||
``ClassOfPoly(kind, inst)``
|
||||
|
||||
_`.if.class`: An introspection function which returns the class of
|
||||
which ``inst`` is a direct instance, as a class of kind ``kind``.
|
||||
This may assert if the class is not (a subtype of) the kind requested.
|
||||
_`.if.class-of-poly`: An introspection function which returns the
|
||||
class of which ``inst`` is a direct instance, as a class of kind
|
||||
``kind``. This may assert if the class is not (a subtype of) the kind
|
||||
requested.
|
||||
|
||||
|
||||
``SetClassOfPoly(inst, class)``
|
||||
|
||||
_`.if.set-class`: An initialization function that sets the class of
|
||||
``inst`` to be ``class``. This is intended only for use in
|
||||
_`.if.set-class-of-poly`: An initialization function that sets the
|
||||
class of ``inst`` to be ``class``. This is intended only for use in
|
||||
initialization functions, to specialize the instance once its fields
|
||||
have been initialized. Each Init function should call its superclass
|
||||
have been initialized. Each Init function should call its superclass
|
||||
init, finally reaching InstInit, and then, once it has set up its
|
||||
fields, use SetClassOfPoly to set the class and check the instance
|
||||
with its check method. Compare with `design.mps.sig`_.
|
||||
with its check method. Compare with `design.mps.sig`_.
|
||||
|
||||
.. _`design.mps.sig`: sig
|
||||
|
||||
``IsSubclass(sub, super)``
|
||||
|
||||
_`.if.subclass`: An introspection function which returns a ``Bool``
|
||||
_`.if.is-subclass`: An introspection function which returns a ``Bool``
|
||||
indicating whether ``sub`` is a subclass of ``super``. That is, it is
|
||||
a predicate for testing subclass relationships.
|
||||
|
||||
|
|
@ -644,7 +645,7 @@ B. Document History
|
|||
C. Copyright and License
|
||||
------------------------
|
||||
|
||||
Copyright © 2013-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
Copyright © 2013-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
All rights reserved. This is an open source license. Contact
|
||||
Ravenbrook for commercial licensing options.
|
||||
|
||||
|
|
|
|||
|
|
@ -135,8 +135,8 @@ Interface
|
|||
|
||||
_`.if.pthreadext.abstract`: A thread is represented by the abstract
|
||||
type ``PThreadext``. A ``PThreadext`` object corresponds directly with
|
||||
a PThread (of type ``pthread_t``). There may be more than one
|
||||
``PThreadext`` object for the same PThread.
|
||||
a thread (of type ``pthread_t``). There may be more than one
|
||||
``PThreadext`` object for the same thread.
|
||||
|
||||
_`.if.pthreadext.structure`: The structure definition of
|
||||
``PThreadext`` (``PThreadextStruct``) is exposed by the interface so
|
||||
|
|
@ -163,16 +163,16 @@ _`.if.suspend`: Suspends a ``PThreadext`` object (puts it into a
|
|||
suspended state). Meets `.req.suspend`_. The object must not already
|
||||
be in a suspended state. If the function returns ``ResOK``, the
|
||||
context of the thread is returned in contextReturn, and the
|
||||
corresponding PThread will not make any progress until it is resumed:
|
||||
corresponding thread will not make any progress until it is resumed.
|
||||
|
||||
``Res PThreadextResume(PThreadext pthreadext)``
|
||||
|
||||
_`.if.resume`: Resumes a ``PThreadext`` object. Meets
|
||||
`.req.resume`_. The object must already be in a suspended state.
|
||||
Puts the object into a non-suspended state. Permits the corresponding
|
||||
PThread to make progress again, (although that might not happen
|
||||
immediately if there is another suspended ``PThreadext`` object
|
||||
corresponding to the same thread):
|
||||
_`.if.resume`: Resumes a ``PThreadext`` object. Meets `.req.resume`_.
|
||||
The object must already be in a suspended state. Puts the object into
|
||||
a non-suspended state. Permits the corresponding thread to make
|
||||
progress again, although that might not happen immediately if there is
|
||||
another suspended ``PThreadext`` object corresponding to the same
|
||||
thread.
|
||||
|
||||
``void PThreadextFinish(PThreadext pthreadext)``
|
||||
|
||||
|
|
@ -189,9 +189,9 @@ _`.impl.pthreadext`: The structure definition for a ``PThreadext``
|
|||
object is::
|
||||
|
||||
struct PThreadextStruct {
|
||||
Sig sig; /* design.mps.sig */
|
||||
Sig sig; /* <design/sig/> */
|
||||
pthread_t id; /* Thread ID */
|
||||
struct sigcontext *suspendedScp; /* sigcontext if suspended */
|
||||
MutatorContext context; /* context if suspended */
|
||||
RingStruct threadRing; /* ring of suspended threads */
|
||||
RingStruct idRing; /* duplicate suspensions for id */
|
||||
};
|
||||
|
|
@ -199,7 +199,7 @@ object is::
|
|||
_`.impl.field.id`: The ``id`` field shows which PThread the object
|
||||
corresponds to.
|
||||
|
||||
_`.impl.field.scp`: The ``suspendedScp`` field contains the context
|
||||
_`.impl.field.context`: The ``context`` field contains the context
|
||||
when in a suspended state. Otherwise it is ``NULL``.
|
||||
|
||||
_`.impl.field.threadring`: The ``threadRing`` field is used to chain
|
||||
|
|
@ -208,29 +208,30 @@ the object onto the suspend ring when it is in the suspended state
|
|||
this ring is single.
|
||||
|
||||
_`.impl.field.idring`: The ``idRing`` field is used to group the
|
||||
object with other objects corresponding to the same PThread (same
|
||||
object with other objects corresponding to the same thread (same
|
||||
``id`` field) when they are in the suspended state. When not in a
|
||||
suspended state, or when this is the only ``PThreadext`` object with
|
||||
this ``id`` in the suspended state, this ring is single.
|
||||
|
||||
_`.impl.global.suspend-ring`: The module maintains a global
|
||||
suspend-ring -- a ring of ``PThreadext`` objects which are in a
|
||||
_`.impl.global.suspend-ring`: The module maintains a global varaible
|
||||
``suspendedRing``, a ring of ``PThreadext`` objects which are in a
|
||||
suspended state. This is primarily so that it's possible to determine
|
||||
whether a thread is curently suspended anyway because of another
|
||||
``PThreadext`` object, when a suspend attempt is made.
|
||||
|
||||
_`.impl.global.victim`: The module maintains a global variable which
|
||||
is used to indicate which ``PThreadext`` is the current victim during
|
||||
suspend operations. This is used to communicate information between
|
||||
the controlling thread and the thread being suspended (the victim).
|
||||
The variable has value ``NULL`` at other times.
|
||||
_`.impl.global.victim`: The module maintains a global variable
|
||||
``suspendingVictim`` which is used to indicate which ``PThreadext`` is
|
||||
the current victim during suspend operations. This is used to
|
||||
communicate information between the controlling thread and the thread
|
||||
being suspended (the victim). The variable has value ``NULL`` at other
|
||||
times.
|
||||
|
||||
_`.impl.static.mutex`: We use a lock (mutex) around the suspend and
|
||||
resume operations. This protects the state data (the suspend-ring the
|
||||
victim: see `.impl.global.suspend-ring`_ and `.impl.global.victim`_
|
||||
respectively). Since only one thread can be suspended at a time,
|
||||
there's no possibility of two arenas suspending each other by
|
||||
concurrently suspending each other's threads.
|
||||
resume operations. This protects the state data (the suspend-ring and
|
||||
the victim: see `.impl.global.suspend-ring`_ and
|
||||
`.impl.global.victim`_ respectively). Since only one thread can be
|
||||
suspended at a time, there's no possibility of two arenas suspending
|
||||
each other by concurrently suspending each other's threads.
|
||||
|
||||
_`.impl.static.semaphore`: We use a semaphore to synchronize between
|
||||
the controlling and victim threads during the suspend operation. See
|
||||
|
|
@ -375,7 +376,7 @@ Document History
|
|||
Copyright and License
|
||||
---------------------
|
||||
|
||||
Copyright © 2013-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
Copyright © 2013-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
All rights reserved. This is an open source license. Contact
|
||||
Ravenbrook for commercial licensing options.
|
||||
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ There are two reasons that it is not an equality relation:
|
|||
|
||||
The reason that ``ss.unfixedSummary`` is always a subset of the
|
||||
previous summary is due to an "optimization" which has not been made
|
||||
in ``TraceFix``. See design.mps.trace.fix.fixed.all_.
|
||||
in ``TraceFix()``. See design.mps.trace.fix.fixed.all_.
|
||||
|
||||
.. _design.mps.trace.fix.fixed.all: trace#fix-fixed-all
|
||||
|
||||
|
|
@ -77,12 +77,12 @@ of all scanned references in the segment. We don't know this
|
|||
accurately until we've scanned everything in the segment. So we add in
|
||||
the segment summary each time.
|
||||
|
||||
_`.clever-summary.scan.fix`: TraceScan also expects the scan state
|
||||
fixed summary to include the post-scan summary of all references which
|
||||
were white. Since we don't scan all white references, we need to add
|
||||
in an approximation to the summary of all white references which we
|
||||
didn't scan. This is the intersection of the segment summary and the
|
||||
white summary.
|
||||
_`.clever-summary.scan.fix`: ``traceScanSeg()`` also expects the scan
|
||||
state fixed summary to include the post-scan summary of all references
|
||||
which were white. Since we don't scan all white references, we need to
|
||||
add in an approximation to the summary of all white references which
|
||||
we didn't scan. This is the intersection of the segment summary and
|
||||
the white summary.
|
||||
|
||||
_`.clever-summary.wb`: If the cumulative summary is smaller than the
|
||||
mutator's summary, a write-barrier is needed to prevent the mutator
|
||||
|
|
|
|||
|
|
@ -227,6 +227,125 @@ of ``segLo`` and ``segHi``.
|
|||
Extensibility
|
||||
-------------
|
||||
|
||||
Garbage collection
|
||||
..................
|
||||
|
||||
``typedef Res (*SegAccessMethod)(Seg seg, Arena arena, Addr addr, AccessSet mode, MutatorContext context)``
|
||||
|
||||
_`.method.access`: The ``access`` method indicates that the client
|
||||
program attempted to access the address ``addr``, but has been denied
|
||||
due to a protection fault. The ``mode`` indicates whether the client
|
||||
program was trying to read (``AccessREAD``) or write (``AccessWRITE``)
|
||||
the address. If this can't be determined, ``mode`` is ``AccessREAD |
|
||||
AccessWRITE``. The segment should perform any work necessary to remove
|
||||
the protection whilst still preserving appropriate invariants (this
|
||||
might scanning the region containing ``addr``). Segment classes are
|
||||
not required to provide this method, and not doing so indicates they
|
||||
never protect any memory managed by the pool. This method is called
|
||||
via the generic function ``SegAccess()``.
|
||||
|
||||
``typedef Res (*SegWhitenMethod)(Seg seg, Trace trace)``
|
||||
|
||||
_`.method.whiten`: The ``whiten`` method requests that the segment
|
||||
``seg`` condemn (a subset of, but typically all) its objects for the
|
||||
trace ``trace``. That is, prepare them for participation in the trace
|
||||
to determine their liveness. The segment should expect fix requests
|
||||
(`.method.fix`_) during the trace and a reclaim request
|
||||
(`.method.reclaim`_) at the end of the trace. Segment
|
||||
classes that automatically reclaim dead objects must provide this
|
||||
method, and pools that use these segment classes must additionally set
|
||||
the ``AttrGC`` attribute. This method is called via the generic
|
||||
function ``SegWhiten()``.
|
||||
|
||||
``typedef void (*SegGreyenMethod)(Seg seg, Trace trace)``
|
||||
|
||||
_`.method.grey`: The ``greyen`` method requires the segment ``seg`` to
|
||||
colour its objects grey for the trace ``trace`` (excepting objects
|
||||
that were already condemned for this trace). That is, make them ready
|
||||
for scanning by the trace ``trace``. The segment must arrange that any
|
||||
appropriate invariants are preserved, possibly by using the protection
|
||||
interface (see design.mps.prot_). Segment classes are not required to
|
||||
provide this method, and not doing so indicates that all instances of
|
||||
this class will have no fixable or traceable references in them. This
|
||||
method is called via the generic function ``SegGreyen()``.
|
||||
|
||||
.. _design.mps.prot: prot
|
||||
|
||||
``typedef void (*SegBlackenMethod)(Seg seg, TraceSet traceSet)``
|
||||
|
||||
_`.method.blacken`: The ``blacken`` method is called if it is known
|
||||
that the segment ``seg`` cannot refer to the white set for any of the
|
||||
traces in ``traceSet``. The segment must blacken all its grey objects
|
||||
for those traces. Segment classes are not required to provide this
|
||||
method, and not doing so indicates that all instances of this class
|
||||
will have no fixable or traceable references in them. This method is
|
||||
called via the generic function ``SegBlacken()``.
|
||||
|
||||
``typedef Res (*SegScanMethod)(Bool *totalReturn, Seg seg, ScanState ss)``
|
||||
|
||||
_`.method.scan`: The ``scan`` method scans all the grey objects on the
|
||||
segment ``seg``, passing the scan state ``ss`` to ``FormatScan``. The
|
||||
segment may additionally accumulate a summary of *all* its objects. If
|
||||
it succeeds in accumulating such a summary it must indicate that it
|
||||
has done so by setting the ``*totalReturn`` parameter to ``TRUE``.
|
||||
Otherwise it must set ``*totalReturn`` to ``FALSE``. Segment classes
|
||||
are not required to provide this method, and not doing so indicates
|
||||
that all instances of this class will have no fixable or traceable
|
||||
references in them. This method is called via the generic function
|
||||
``SegScan()``.
|
||||
|
||||
``typedef Res (*SegFixMethod)(Seg seg, ScanState ss, Ref *refIO)``
|
||||
|
||||
_`.method.fix`: The ``fix`` method indicates that the reference
|
||||
``*refIO`` has been discovered at rank ``ss->rank`` by the traces in
|
||||
``ss->traces``, and the segment must handle this discovery according
|
||||
to the fix protocol (design.mps.fix_). If the method moves the object,
|
||||
it must update ``*refIO`` to refer to the new location of the object.
|
||||
If the method determines that the referenced object died (for example,
|
||||
because the highest-ranking references to the object were weak), it
|
||||
must update ``*refIO`` to ``NULL``. Segment classes that automatically
|
||||
reclaim dead objects must provide this method, and pools that use
|
||||
these classes must additionally set the ``AttrGC`` attribute. Pool
|
||||
classes that use segment classes that may move objects must also set
|
||||
the ``AttrMOVINGGC`` attribute. The ``fix`` method is on the critical
|
||||
path (see design.mps.critical-path_) and so must be fast. This method
|
||||
is called via the function ``TraceFix()``.
|
||||
|
||||
.. _design.mps.fix: fix
|
||||
.. _design.mps.critical-path: critical-path
|
||||
|
||||
_`.method.fixEmergency`: The ``fixEmergency`` method is used to
|
||||
perform fixing in "emergency" situations. Its specification is
|
||||
identical to the ``fix`` method, but it must complete its work without
|
||||
allocating memory (perhaps by using some approximation, or by running
|
||||
more slowly). Segment classes must provide this method if and only if
|
||||
they provide the ``fix`` method. If the ``fix`` method does not need
|
||||
to allocate memory, then it is acceptable for ``fix`` and
|
||||
``fixEmergency`` to be the same.
|
||||
|
||||
``typedef void (*SegReclaimMethod)(Seg seg, Trace trace)``
|
||||
|
||||
_`.method.reclaim`: The ``reclaim`` method indicates that any
|
||||
remaining white objects in the segment ``seg`` have now been proved
|
||||
unreachable by the trace ``trace``, and so are dead. The segment
|
||||
should reclaim the resources associated with the dead objects. Segment
|
||||
classes are not required to provide this method. If they do, pools
|
||||
that use them must set the ``AttrGC`` attribute. This method is called
|
||||
via the generic function ``SegReclaim()``.
|
||||
|
||||
``typedef void (*SegWalkMethod)(Seg seg, Format format, FormattedObjectsVisitor f, void *v, size_t s)``
|
||||
|
||||
_`.method.walk`: The ``walk`` method must call the visitor function
|
||||
``f`` (along with its closure parameters ``v`` and ``s`` and the
|
||||
format ``format``) once for each of the *black* objects in the
|
||||
segment ``seg``. Padding objects may or may not be included in the
|
||||
walk, at the segment's discretion: it is the responsibility of the
|
||||
client program to handle them. Forwarding objects must not be included
|
||||
in the walk. Segment classes need not provide this method. This
|
||||
method is called by the genetic function ``SegWalk()``, which is
|
||||
called by the heap walker ``mps_arena_formatted_objects_walk()``.
|
||||
|
||||
|
||||
Splitting and merging
|
||||
.....................
|
||||
|
||||
|
|
|
|||
|
|
@ -88,22 +88,24 @@ documented in the manual.
|
|||
==== ====== ========================
|
||||
Args Locals Function
|
||||
==== ====== ========================
|
||||
5 0 ``PoolAccess()``
|
||||
5 0 ``PoolSegAccess()``
|
||||
3 5 ``TraceSegAccess()``
|
||||
5 0 ``SegAccess()``
|
||||
5 0 ``SegWholeAccess()``
|
||||
3 8 ``TraceSegAccess()``
|
||||
4 1 ``traceScanSeg()``
|
||||
4 8 ``traceScanSegRes()``
|
||||
4 0 ``PoolScan()``
|
||||
4 5 ``AMCScan()``
|
||||
4 9 ``traceScanSegRes()``
|
||||
4 0 ``SegScan()``
|
||||
4 5 ``amcSegScan()``
|
||||
4 0 ``FormatScan()``
|
||||
3 ≤64 ``format->scan()``
|
||||
4 15 ``AMCFix()``
|
||||
4 5 ``BufferFill()``
|
||||
6 10 ``AMCBufferFill()``
|
||||
6 9 ``PoolGenAlloc()``
|
||||
7 5 ``SegAlloc()``
|
||||
5 5 ``ArenaAlloc()``
|
||||
5 5 ``arenaAllocPolicy()``
|
||||
5 11 ``arenaAllocFromLand()``
|
||||
3 0 ``SegFix()``
|
||||
4 15 ``amcSegFix()``
|
||||
3 5 ``BufferFill()``
|
||||
5 11 ``AMCBufferFill()``
|
||||
5 73 ``PoolGenAlloc()``
|
||||
6 5 ``SegAlloc()``
|
||||
4 4 ``ArenaAlloc()``
|
||||
5 6 ``PolicyAlloc()``
|
||||
6 10 ``ArenaFreeLandAlloc()``
|
||||
7 1 ``LandFindInZones()``
|
||||
7 16 ``cbsFindInZones()``
|
||||
5 3 ``cbsFindFirst()``
|
||||
|
|
@ -111,13 +113,12 @@ Args Locals Function
|
|||
3 7 ``SplaySplay()``
|
||||
4 8 ``SplaySplitDown()``
|
||||
3 0 ``SplayZig()``
|
||||
109 ≤190 **Total**
|
||||
112 ≤258 **Total**
|
||||
==== ====== ========================
|
||||
|
||||
We expect that a compiler will often be able to share stack space
|
||||
between function arguments and local variables, but in the worst case
|
||||
where it cannot, this call requires no more than 299 words of stack
|
||||
space.
|
||||
We expect that a compiler will not need to push all local variables
|
||||
onto the stack, but even in the case where it pushes all of them, this
|
||||
call requires no more than 370 words of stack space.
|
||||
|
||||
This isn't necessarily the deepest call into the MPS (the MPS's
|
||||
modular design and class system makes it hard to do a complete
|
||||
|
|
@ -186,7 +187,7 @@ Document History
|
|||
Copyright and License
|
||||
---------------------
|
||||
|
||||
Copyright © 2014-2017 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
Copyright © 2014-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
All rights reserved. This is an open source license. Contact
|
||||
Ravenbrook for commercial licensing options.
|
||||
|
||||
|
|
|
|||
|
|
@ -424,7 +424,7 @@ other uses of that:
|
|||
pool. Any non-white segment in the rampGen with new set to FALSE has
|
||||
its size added to ``poolGen->newSize`` and gets new set to TRUE.
|
||||
|
||||
- in ``AMCWhiten()``, if new is TRUE, the segment size is deducted
|
||||
- in ``amcSegWhiten()``, if new is TRUE, the segment size is deducted
|
||||
from ``poolGen.newSize`` and new is set to FALSE.
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ guarantee behaviour in this case. For example, POSIX_ says, "A
|
|||
conforming implementation is free to reuse a thread ID after its
|
||||
lifetime has ended. If an application attempts to use a thread ID
|
||||
whose lifetime has ended, the behavior is undefined." For this reason,
|
||||
the documentation for ``mps_thread_dereg()`` specifies that it is an
|
||||
the documentation for ``mps_thread_reg()`` specifies that it is an
|
||||
error if a thread dies while registered.
|
||||
|
||||
.. _POSIX: http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_09_02
|
||||
|
|
|
|||
|
|
@ -157,14 +157,14 @@ the branch pedictors) resulting in a slow down. Replacing the
|
|||
improves the overall speed of the Dylan compiler by as much as 9%. See
|
||||
`design.mps.critical_path`_.
|
||||
|
||||
_`.fix.nocopy`: ``AMCFix()`` used to copy objects by using the format's
|
||||
copy method. This involved a function call (through an indirection)
|
||||
and in ``dylan_copy`` a call to ``dylan_skip`` (to recompute the
|
||||
length) and call to ``memcpy`` with general parameters. Replacing this
|
||||
with a direct call to ``memcpy`` removes these overheads and the call
|
||||
to ``memcpy`` now has aligned parameters. The call to ``memcpy`` is
|
||||
inlined by the C compiler. This change results in a 4–5% speed-up in
|
||||
the Dylan compiler.
|
||||
_`.fix.nocopy`: ``amcSegFix()`` used to copy objects by using the
|
||||
format's copy method. This involved a function call (through an
|
||||
indirection) and in ``dylan_copy`` a call to ``dylan_skip`` (to
|
||||
recompute the length) and call to ``memcpy`` with general parameters.
|
||||
Replacing this with a direct call to ``memcpy`` removes these
|
||||
overheads and the call to ``memcpy`` now has aligned parameters. The
|
||||
call to ``memcpy`` is inlined by the C compiler. This change results
|
||||
in a 4–5% speed-up in the Dylan compiler.
|
||||
|
||||
_`.reclaim`: Because the reclaim phase of the trace (implemented by
|
||||
``TraceReclaim()``) examines every segment it is fairly time
|
||||
|
|
|
|||
|
|
@ -97,8 +97,6 @@ are:
|
|||
=================== ===================================================
|
||||
Attribute Description
|
||||
=================== ===================================================
|
||||
``AttrFMT`` Contains formatted objects.
|
||||
Used to decide which pools to walk.
|
||||
``AttrGC`` Is garbage collecting, that is, parts may be
|
||||
reclaimed. Used to decide which segments are
|
||||
condemned.
|
||||
|
|
@ -301,7 +299,7 @@ determined then the smallest unsigned integer with a large enough
|
|||
range may be used instead.
|
||||
|
||||
|
||||
``typedef int LocusPrefKind``
|
||||
``typedef unsigned LocusPrefKind``
|
||||
|
||||
_`.locusprefkind`: The type ``LocusPrefKind`` expresses a preference for
|
||||
addresses within an address space. It takes one of the following
|
||||
|
|
@ -477,7 +475,7 @@ _`.rootmode.conv.c`: ``RootMode`` is converted to ``mps_rm_t`` in the
|
|||
MPS C Interface.
|
||||
|
||||
|
||||
``typedef int RootVar``
|
||||
``typedef unsigned RootVar``
|
||||
|
||||
_`.rootvar`: The type ``RootVar`` is the type of the discriminator for
|
||||
the union within ``RootStruct``.
|
||||
|
|
@ -563,7 +561,9 @@ represented in the obvious way::
|
|||
|
||||
member(ti, ts) ⇔ ((1<<ti) & ts) != 0
|
||||
|
||||
``TraceSet`` is used to represent colour in the Tracer. See
|
||||
In the multiple-traces design, each region of memory may be a
|
||||
different colour for each trace. Thus the set of traces for which a
|
||||
segment is grey (say) is represented by a ``TraceSet``. See
|
||||
design.mps.trace_.
|
||||
|
||||
.. _design.mps.trace: trace
|
||||
|
|
@ -694,7 +694,7 @@ Document History
|
|||
Copyright and License
|
||||
---------------------
|
||||
|
||||
Copyright © 2013-2017 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
Copyright © 2013-2018 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
All rights reserved. This is an open source license. Contact
|
||||
Ravenbrook for commercial licensing options.
|
||||
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ these must be balanced. There is no point spending 1000 CPU units
|
|||
raising a write barrier to avoid 10 CPU units of scanning cost.
|
||||
Therefore we do not raise the write barrier immediately.
|
||||
|
||||
_`.deferral.heuristic: We apply a simple heuristic`: A segment which was
|
||||
_`.deferral.heuristic`: We apply a simple heuristic: A segment which was
|
||||
found to be "interesting" while scanning is likely to be interesting
|
||||
again, and so raising the write barrier is not worthwhile. If we scan
|
||||
a segment several times and find it "boring" then we raise the barrier
|
||||
|
|
@ -84,7 +84,7 @@ _`.deferral.count`: We store a deferral count with the segment. The
|
|||
count is decremented after each boring scan (`.def.boring`_). The write
|
||||
barrier is raised only when the count reaches zero.
|
||||
|
||||
_`.deferral.reset: The count is reset after three events`:
|
||||
_`.deferral.reset`: The count is reset after three events:
|
||||
|
||||
1. segment creation (``WB_DEFER_INIT``)
|
||||
|
||||
|
|
|
|||
31
mps/test/argerr/155.c
Normal file
31
mps/test/argerr/155.c
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
TEST_HEADER
|
||||
id = $Id: //info.ravenbrook.com/project/mps/master/test/argerr/99.c#4 $
|
||||
summary = finalize address not managed by the arena
|
||||
language = c
|
||||
link = testlib.o
|
||||
OUTPUT_SPEC
|
||||
assert = true
|
||||
assertfile P= global.c
|
||||
assertcond = PoolOfAddr(&refpool, arena, (Addr)obj)
|
||||
END_HEADER
|
||||
*/
|
||||
|
||||
#include "testlib.h"
|
||||
#include "mps.h"
|
||||
|
||||
static void test(void)
|
||||
{
|
||||
void *p = &p;
|
||||
mps_arena_t arena;
|
||||
cdie(mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none),
|
||||
"create arena");
|
||||
mps_finalize(arena, &p);
|
||||
mps_arena_destroy(arena);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
easy_tramp(test);
|
||||
return 0;
|
||||
}
|
||||
31
mps/test/argerr/156.c
Normal file
31
mps/test/argerr/156.c
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
TEST_HEADER
|
||||
id = $Id: //info.ravenbrook.com/project/mps/master/test/argerr/99.c#4 $
|
||||
summary = definalize address not managed by the arena
|
||||
language = c
|
||||
link = testlib.o
|
||||
OUTPUT_SPEC
|
||||
assert = true
|
||||
assertfile P= global.c
|
||||
assertcond = ArenaHasAddr(arena, (Addr)obj)
|
||||
END_HEADER
|
||||
*/
|
||||
|
||||
#include "testlib.h"
|
||||
#include "mps.h"
|
||||
|
||||
static void test(void)
|
||||
{
|
||||
void *p = &p;
|
||||
mps_arena_t arena;
|
||||
cdie(mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none),
|
||||
"create arena");
|
||||
mps_definalize(arena, &p);
|
||||
mps_arena_destroy(arena);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
easy_tramp(test);
|
||||
return 0;
|
||||
}
|
||||
37
mps/test/argerr/157.c
Normal file
37
mps/test/argerr/157.c
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
TEST_HEADER
|
||||
id = $Id: //info.ravenbrook.com/project/mps/master/test/argerr/99.c#4 $
|
||||
summary = finalize address in manually managed pool
|
||||
language = c
|
||||
link = testlib.o
|
||||
OUTPUT_SPEC
|
||||
assert = true
|
||||
assertfile P= global.c
|
||||
assertcond = PoolHasAttr(refpool, AttrGC)
|
||||
END_HEADER
|
||||
*/
|
||||
|
||||
#include "testlib.h"
|
||||
#include "mps.h"
|
||||
#include "mpscmvff.h"
|
||||
|
||||
static void test(void)
|
||||
{
|
||||
void *p;
|
||||
mps_arena_t arena;
|
||||
mps_pool_t pool;
|
||||
cdie(mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none),
|
||||
"create arena");
|
||||
cdie(mps_pool_create_k(&pool, arena, mps_class_mvff(), mps_args_none),
|
||||
"create pool");
|
||||
cdie(mps_alloc(&p, pool, 16), "alloc");
|
||||
mps_finalize(arena, &p);
|
||||
mps_pool_destroy(pool);
|
||||
mps_arena_destroy(arena);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
easy_tramp(test);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -20,7 +20,7 @@ exposet0 =P
|
|||
expt825
|
||||
finalcv =P
|
||||
finaltest =P
|
||||
forktest =P =X
|
||||
forktest =X
|
||||
fotest
|
||||
gcbench =N benchmark
|
||||
landtest
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue