diff --git a/mps/code/amcsshe.c b/mps/code/amcsshe.c index fcbbbd4e88e..de8eec82a3a 100644 --- a/mps/code/amcsshe.c +++ b/mps/code/amcsshe.c @@ -166,7 +166,7 @@ static void *test(void *arg, size_t s) ramping = 1; objs = 0; while (collections < collectionsCOUNT) { - unsigned long c; + mps_word_t c; size_t r; c = mps_collections(arena); diff --git a/mps/code/apss.c b/mps/code/apss.c index fa94aefb4b8..e395b50df24 100644 --- a/mps/code/apss.c +++ b/mps/code/apss.c @@ -158,7 +158,7 @@ static void testInArena(mps_arena_t arena, mps_pool_debug_option_s *options) /* yet (MV Debug works here, because it fakes it through PoolAlloc). */ printf("MVFF\n"); res = stress(mps_class_mvff(), randomSizeAligned, arena, - (size_t)65536, (size_t)32, (size_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE); + (size_t)65536, (size_t)32, (mps_align_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE); if (res == MPS_RES_COMMIT_LIMIT) return; die(res, "stress MVFF"); diff --git a/mps/code/arena.c b/mps/code/arena.c index 18f74ab487a..df709753143 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -763,6 +763,26 @@ Bool ArenaHasAddr(Arena arena, Addr addr) } +/* ArenaAddrObject -- find client pointer to object containing addr + * See job003589. + */ + +Res ArenaAddrObject(Addr *pReturn, Arena arena, Addr addr) +{ + Seg seg; + Pool pool; + + AVER(pReturn != NULL); + AVERT(Arena, arena); + + if (!SegOfAddr(&seg, arena, addr)) { + return ResFAIL; + } + pool = SegPool(seg); + return PoolAddrObject(pReturn, pool, seg, addr); +} + + /* C. COPYRIGHT AND LICENSE * * Copyright (C) 2001-2002 Ravenbrook Limited . diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index 31c935d7682..b27f84ad0ff 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -568,6 +568,7 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args) nono.l = -1; vmArena->blacklist = ZoneSetAdd(arena, vmArena->blacklist, nono.addr); } + EVENT2(ArenaBlacklistZone, vmArena, vmArena->blacklist); for(gen = (Index)0; gen < VMArenaGenCount; gen++) { vmArena->genZoneSet[gen] = ZoneSetEMPTY; @@ -1473,9 +1474,19 @@ static Res vmAllocComm(Addr *baseReturn, Tract *baseTractReturn, if (pref->isGen) { Serial gen = vmGenOfSegPref(vmArena, pref); + if (!ZoneSetSuper(vmArena->genZoneSet[gen], zones)) { + /* Tracking the whole zoneset for each generation number gives + * more understandable telemetry than just reporting the added + * zones. */ + EVENT3(ArenaGenZoneAdd, arena, gen, ZoneSetUnion(vmArena->genZoneSet[gen], zones)); + } + vmArena->genZoneSet[gen] = ZoneSetUnion(vmArena->genZoneSet[gen], zones); } + if (ZoneSetInter(vmArena->freeSet, zones) != ZoneSetEMPTY) { + EVENT2(ArenaUseFreeZone, arena, ZoneSetInter(vmArena->freeSet, zones)); + } vmArena->freeSet = ZoneSetDiff(vmArena->freeSet, zones); *baseReturn = base; diff --git a/mps/code/buffer.c b/mps/code/buffer.c index dc702776a4f..e7946dda64f 100644 --- a/mps/code/buffer.c +++ b/mps/code/buffer.c @@ -22,7 +22,8 @@ * .trans.mod: There are several instances where pool structures are * directly accessed by this module because does not provide * an adequate (or adequately documented) interface. They bear this - * tag. */ + * tag. + */ #include "mpm.h" @@ -1283,7 +1284,7 @@ static Res segBufInit(Buffer buffer, Pool pool, ArgList args) segbuf->seg = NULL; segbuf->sig = SegBufSig; segbuf->rankSet = RankSetEMPTY; - + AVERT(SegBuf, segbuf); EVENT3(BufferInitSeg, buffer, pool, buffer->isMutator); return ResOK; @@ -1486,7 +1487,6 @@ static void rankBufVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) AVER(ArgListCheck(args)); } - /* rankBufInit -- RankBufClass init method */ static Res rankBufInit(Buffer buffer, Pool pool, ArgList args) diff --git a/mps/code/clock.h b/mps/code/clock.h index eb28de96612..5e103ff4393 100644 --- a/mps/code/clock.h +++ b/mps/code/clock.h @@ -78,9 +78,18 @@ typedef union EventClockUnion { #elif defined(MPS_ARCH_I6) +#if defined(MPS_BUILD_MV) + +#define EVENT_CLOCK_PRINT(stream, clock) \ + fprintf(stream, "%016llX", (clock)); + +#else + #define EVENT_CLOCK_PRINT(stream, clock) \ fprintf(stream, "%016lX", (clock)); +#endif + #define EVENT_CLOCK_WRITE(stream, clock) \ WriteF(stream, "$W", (WriteFW)(clock), NULL) diff --git a/mps/code/commpost.nmk b/mps/code/commpost.nmk index d0d1e3a3d36..808307b3b54 100644 --- a/mps/code/commpost.nmk +++ b/mps/code/commpost.nmk @@ -153,6 +153,9 @@ $(PFM)\$(VARIETY)\btcv.exe: $(PFM)\$(VARIETY)\btcv.obj \ $(PFM)\$(VARIETY)\bttest.exe: $(PFM)\$(VARIETY)\bttest.obj \ $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) +$(PFM)\$(VARIETY)\cvmicv.exe: $(PFM)\$(VARIETY)\cvmicv.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + $(PFM)\$(VARIETY)\exposet0.exe: $(PFM)\$(VARIETY)\exposet0.obj \ $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) @@ -220,12 +223,13 @@ $(PFM)\$(VARIETY)\walkt0.exe: $(PFM)\$(VARIETY)\walkt0.obj \ $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) $(PFM)\$(VARIETY)\zcoll.exe: $(PFM)\$(VARIETY)\zcoll.obj \ - $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) \ - $(TESTLIBOBJ) + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) $(PFM)\$(VARIETY)\zmess.exe: $(PFM)\$(VARIETY)\zmess.obj \ - $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) \ - $(TESTLIBOBJ) + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + +$(PFM)\$(VARIETY)\ztfm.exe: $(PFM)\$(VARIETY)\ztfm.obj \ + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) $(PFM)\$(VARIETY)\mpseventcnv.exe: $(PFM)\$(VARIETY)\eventcnv.obj \ $(PFM)\$(VARIETY)\mps.lib diff --git a/mps/code/config.h b/mps/code/config.h index 4c5a192d84f..380fb7f0449 100644 --- a/mps/code/config.h +++ b/mps/code/config.h @@ -322,9 +322,11 @@ */ #if defined(MPS_ARCH_I3) && defined(MPS_BUILD_MV) #define StackProbeDEPTH ((Size)500) +#elif defined(MPS_PF_W3I6MV) +#define StackProbeDEPTH ((Size)500) #else #define StackProbeDEPTH ((Size)0) -#endif /* MPS_ARCH_I3 */ +#endif /* Shield Configuration -- see */ diff --git a/mps/code/eventdef.h b/mps/code/eventdef.h index fa2940011da..0d9e2ea910f 100644 --- a/mps/code/eventdef.h +++ b/mps/code/eventdef.h @@ -38,7 +38,7 @@ #define EVENT_VERSION_MAJOR ((unsigned)1) #define EVENT_VERSION_MEDIAN ((unsigned)1) -#define EVENT_VERSION_MINOR ((unsigned)4) +#define EVENT_VERSION_MINOR ((unsigned)6) /* EVENT_LIST -- list of event types and general properties @@ -68,7 +68,7 @@ */ #define EventNameMAX ((size_t)19) -#define EventCodeMAX ((EventCode)0x0082) +#define EventCodeMAX ((EventCode)0x0086) #define EVENT_LIST(EVENT, X) \ /* 0123456789012345678 <- don't exceed without changing EventNameMAX */ \ @@ -176,7 +176,7 @@ EVENT(X, MessagesDropped , 0x006D, TRUE, Arena) \ EVENT(X, MessagesExist , 0x006E, TRUE, Arena) \ EVENT(X, ChainCondemnAuto , 0x006F, TRUE, Trace) \ - EVENT(X, TraceFindGrey , 0x0070, TRUE, Trace) \ + EVENT(X, TraceFindGrey , 0x0070, TRUE, Seg) \ EVENT(X, TraceBandAdvance , 0x0071, TRUE, Trace) \ EVENT(X, AWLDeclineTotal , 0x0072, TRUE, Trace) \ EVENT(X, AWLDeclineSeg , 0x0073, TRUE, Trace) \ @@ -188,7 +188,12 @@ EVENT(X, VMCompact , 0x0079, TRUE, Arena) \ EVENT(X, amcScanNailed , 0x0080, TRUE, Seg) \ EVENT(X, AMCTraceEnd , 0x0081, TRUE, Trace) \ - EVENT(X, TraceStartPoolGen , 0x0082, TRUE, Trace) + EVENT(X, TraceStartPoolGen , 0x0082, TRUE, Trace) \ + /* new events for performance analysis of large heaps. */ \ + EVENT(X, TraceCondemnZones , 0x0083, TRUE, Trace) \ + EVENT(X, ArenaGenZoneAdd , 0x0084, TRUE, Arena) \ + EVENT(X, ArenaUseFreeZone , 0x0085, TRUE, Arena) \ + EVENT(X, ArenaBlacklistZone , 0x0086, TRUE, Arena) /* Remember to update EventNameMAX and EventCodeMAX above! @@ -721,6 +726,24 @@ PARAM(X, 9, W, totalSize) /* total size of pool gen */ \ PARAM(X, 10, W, newSizeAtCreate) /* new size of pool gen at trace create */ +#define EVENT_TraceCondemnZones_PARAMS(PARAM, X) \ + PARAM(X, 0, P, trace) /* the trace */ \ + PARAM(X, 1, W, condemnedSet) /* the condemned zoneSet */ \ + PARAM(X, 2, W, white) /* the trace's white zoneSet */ + +#define EVENT_ArenaGenZoneAdd_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena) /* the arena */ \ + PARAM(X, 1, W, gen) /* the generation number */ \ + PARAM(X, 2, W, zoneSet) /* the new zoneSet */ + +#define EVENT_ArenaUseFreeZone_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena) /* the arena */ \ + PARAM(X, 1, W, zoneSet) /* zones that aren't free any longer */ + +#define EVENT_ArenaBlacklistZone_PARAMS(PARAM, X) \ + PARAM(X, 0, P, arena) /* the arena */ \ + PARAM(X, 1, W, zoneSet) /* the blacklist zoneset */ + #endif /* eventdef_h */ diff --git a/mps/code/exposet0.c b/mps/code/exposet0.c index 8bc0a190d73..b204b59d265 100644 --- a/mps/code/exposet0.c +++ b/mps/code/exposet0.c @@ -178,7 +178,7 @@ static void *test(void *arg, size_t s) collections = 0; objs = 0; while (collections < collectionsCOUNT) { - unsigned long c; + mps_word_t c; size_t r; c = mps_collections(arena); diff --git a/mps/code/locbwcss.c b/mps/code/locbwcss.c index ef29f241c00..5f9d6095623 100644 --- a/mps/code/locbwcss.c +++ b/mps/code/locbwcss.c @@ -154,12 +154,14 @@ static void testInArena(mps_arena_t arena) int i; die(mps_pool_create(&hipool, arena, mps_class_mvff(), - chunkSize, chunkSize, 1024, + chunkSize, chunkSize, + (mps_align_t)1024, TRUE, TRUE, TRUE), "Create HI MFFV"); die(mps_pool_create(&lopool, arena, mps_class_mvff(), - chunkSize, chunkSize, 1024, + chunkSize, chunkSize, + (mps_align_t)1024, FALSE, FALSE, TRUE), "Create LO MFFV"); diff --git a/mps/code/mpm.h b/mps/code/mpm.h index 37957a1bcc1..90a8b4b16dc 100644 --- a/mps/code/mpm.h +++ b/mps/code/mpm.h @@ -210,6 +210,7 @@ extern Res (PoolFix)(Pool pool, ScanState ss, Seg seg, Addr *refIO); extern Res PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO); extern void PoolReclaim(Pool pool, Trace trace, Seg seg); extern void PoolTraceEnd(Pool pool, Trace trace); +extern Res PoolAddrObject(Addr *pReturn, Pool pool, Seg seg, Addr addr); extern void PoolWalk(Pool pool, Seg seg, FormattedObjectsStepMethod f, void *v, size_t s); extern void PoolFreeWalk(Pool pool, FreeBlockStepMethod f, void *p); @@ -259,6 +260,7 @@ extern Res PoolTrivFramePush(AllocFrame *frameReturn, Pool pool, Buffer buf); extern Res PoolNoFramePop(Pool pool, Buffer buf, AllocFrame frame); extern Res PoolTrivFramePop(Pool pool, Buffer buf, AllocFrame frame); extern void PoolNoFramePopPending(Pool pool, Buffer buf, AllocFrame frame); +extern Res PoolNoAddrObject(Addr *pReturn, Pool pool, Seg seg, Addr addr); extern void PoolNoWalk(Pool pool, Seg seg, FormattedObjectsStepMethod step, void *p, size_t s); extern void PoolNoFreeWalk(Pool pool, FreeBlockStepMethod f, void *p); @@ -534,6 +536,7 @@ extern void ArenaRestoreProtection(Globals globals); extern Res ArenaStartCollect(Globals globals, int why); extern Res ArenaCollect(Globals globals, int why); extern Bool ArenaHasAddr(Arena arena, Addr addr); +extern Res ArenaAddrObject(Addr *pReturn, Arena arena, Addr addr); extern void ArenaSetEmergency(Arena arena, Bool emergency); extern Bool ArenaEmergency(Arena arean); diff --git a/mps/code/mpmss.c b/mps/code/mpmss.c index 76ad4ec48f3..c18c4fe07dd 100644 --- a/mps/code/mpmss.c +++ b/mps/code/mpmss.c @@ -159,7 +159,7 @@ static int testInArena(mps_arena_t arena, mps_pool_debug_option_s *options) /* cross-segment allocation (possibly MVFF ought not to). */ printf("MVFF\n"); die(stress(mps_class_mvff(), randomSize8, arena, - (size_t)65536, (size_t)32, (size_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE), + (size_t)65536, (size_t)32, (mps_align_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE), "stress MVFF"); printf("MV debug\n"); die(stress(mps_class_mv_debug(), randomSize, arena, diff --git a/mps/code/mpmst.h b/mps/code/mpmst.h index 73a3fed9388..a95083b32b5 100644 --- a/mps/code/mpmst.h +++ b/mps/code/mpmst.h @@ -73,6 +73,7 @@ typedef struct mps_class_s { PoolFramePushMethod framePush; /* push an allocation frame */ PoolFramePopMethod framePop; /* pop an allocation frame */ PoolFramePopPendingMethod framePopPending; /* notify pending pop */ + PoolAddrObjectMethod addrObject; /* find client pointer to object */ PoolWalkMethod walk; /* walk over a segment */ PoolFreeWalkMethod freewalk; /* walk over free blocks */ PoolBufferClassMethod bufferClass; /* default BufferClass of pool */ diff --git a/mps/code/mpmtypes.h b/mps/code/mpmtypes.h index 7ed5a89d9c8..481d0569fcf 100644 --- a/mps/code/mpmtypes.h +++ b/mps/code/mpmtypes.h @@ -223,6 +223,8 @@ typedef Res (*PoolFramePopMethod)(Pool pool, Buffer buf, AllocFrame frame); typedef void (*PoolFramePopPendingMethod)(Pool pool, Buffer buf, AllocFrame frame); +typedef Res (*PoolAddrObjectMethod)(Addr *pReturn, + Pool pool, Seg seg, Addr addr); typedef void (*PoolWalkMethod)(Pool pool, Seg seg, FormattedObjectsStepMethod f, void *v, size_t s); diff --git a/mps/code/mps.c b/mps/code/mps.c index f11533bb1fd..21aa6b3a634 100644 --- a/mps/code/mps.c +++ b/mps/code/mps.c @@ -209,7 +209,7 @@ #include "proti6.c" /* 64-bit Intel mutator context decoding */ #include "prmci6w3.c" /* Windows on 64-bit Intel mutator context */ #include "ssw3i6mv.c" /* Windows on 64-bit stack scan for Microsoft C */ -#include "span.c" /* generic stack probe FIXME: Is this correct? */ +#include "spw3i6mv.c" /* Windows on 64-bit stack probe for Microsoft C */ #include "mpsiw3.c" /* Windows interface layer extras */ #else diff --git a/mps/code/pool.c b/mps/code/pool.c index bc0283f75bf..8e188c51e52 100644 --- a/mps/code/pool.c +++ b/mps/code/pool.c @@ -68,6 +68,7 @@ Bool PoolClassCheck(PoolClass class) CHECKL(FUNCHECK(class->framePush)); CHECKL(FUNCHECK(class->framePop)); CHECKL(FUNCHECK(class->framePopPending)); + CHECKL(FUNCHECK(class->addrObject)); CHECKL(FUNCHECK(class->walk)); CHECKL(FUNCHECK(class->freewalk)); CHECKL(FUNCHECK(class->bufferClass)); @@ -474,6 +475,23 @@ void PoolTraceEnd(Pool pool, Trace trace) } +/* PoolAddrObject -- find client pointer to object containing addr + * See user documentation for mps_addr_object. + * addr is known to belong to seg, which belongs to pool. + * See job003589. + */ + +Res PoolAddrObject(Addr *pReturn, Pool pool, Seg seg, Addr addr) +{ + AVER(pReturn != NULL); + AVERT(Pool, pool); + AVERT(Seg, seg); + AVER(pool == SegPool(seg)); + AVER(SegBase(seg) <= addr && addr < SegLimit(seg)); + return (*pool->class->addrObject)(pReturn, pool, seg, addr); +} + + /* PoolWalk -- walk objects in this segment */ void PoolWalk(Pool pool, Seg seg, FormattedObjectsStepMethod f, diff --git a/mps/code/poolabs.c b/mps/code/poolabs.c index 212fa76a44c..af355577cc5 100644 --- a/mps/code/poolabs.c +++ b/mps/code/poolabs.c @@ -143,6 +143,7 @@ DEFINE_CLASS(AbstractPoolClass, class) class->framePush = PoolNoFramePush; class->framePop = PoolNoFramePop; class->framePopPending = PoolNoFramePopPending; + class->addrObject = PoolNoAddrObject; class->walk = PoolNoWalk; class->freewalk = PoolNoFreeWalk; class->bufferClass = PoolNoBufferClass; @@ -631,6 +632,16 @@ Res PoolTrivFramePop(Pool pool, Buffer buf, AllocFrame frame) } +Res PoolNoAddrObject(Addr *pReturn, Pool pool, Seg seg, Addr addr) +{ + AVER(pReturn != NULL); + AVERT(Pool, pool); + AVERT(Seg, seg); + AVER(SegPool(seg) == pool); + AVER(SegBase(seg) <= addr && addr < SegLimit(seg)); + return ResUNIMPL; +} + void PoolNoWalk(Pool pool, Seg seg, FormattedObjectsStepMethod f, void *p, size_t s) { diff --git a/mps/code/poolamc.c b/mps/code/poolamc.c index d16835fc7a2..0b2560a6bb0 100644 --- a/mps/code/poolamc.c +++ b/mps/code/poolamc.c @@ -100,12 +100,12 @@ typedef struct amcNailboardStruct { * additional parameter (the address of the segment's generation) to * SegAlloc. See . * - * .seg-ramp-new: "new" (if true) means this segment was allocated by - * AMCBufferFill while amc->rampMode == RampRAMPING, and therefore - * (I think) the contribution it *should* make to gen->pgen.newSize - * is being deferred until the ramping is over. "new" is set to FALSE - * in all other (ie. normal) circumstances. (The original comment for - * this struct member was "allocated since last GC"). RHSK 2009-04-15. + * .seg-ramp-new: The "new" flag is usually true, and indicates that the + * segment has been counted towards the pool generation's newSize. It is + * set to FALSE otherwise. This is used by both ramping and hash array + * allocations. TODO: The code for this is scrappy and needs refactoring, + * and the *reasons* for setting these flags need properly documenting. + * RB 2013-07-17 */ typedef struct amcSegStruct *amcSeg; @@ -563,13 +563,20 @@ typedef struct amcBufStruct *amcBuf; typedef struct amcBufStruct { SegBufStruct segbufStruct; /* superclass fields must come first */ amcGen gen; /* The AMC generation */ + Bool forHashArrays; /* allocates hash table arrays, see AMCBufferFill */ Sig sig; /* */ } amcBufStruct; /* Buffer2amcBuf -- convert generic Buffer to an amcBuf */ -#define Buffer2amcBuf(buffer) ((amcBuf)(buffer)) +#define Buffer2amcBuf(buffer) \ + PARENT(amcBufStruct, segbufStruct, \ + PARENT(SegBufStruct, bufferStruct, buffer)) + +/* amcBuf2Buffer -- convert amcBuf to generic Buffer */ + +#define amcBuf2Buffer(amcbuf) (&(amcbuf)->segbufStruct.bufferStruct) @@ -577,13 +584,13 @@ typedef struct amcBufStruct { static Bool amcBufCheck(amcBuf amcbuf) { - SegBuf segbuf; - CHECKS(amcBuf, amcbuf); - segbuf = &amcbuf->segbufStruct; - CHECKL(SegBufCheck(segbuf)); + CHECKL(SegBufCheck(&amcbuf->segbufStruct)); if(amcbuf->gen != NULL) CHECKD(amcGen, amcbuf->gen); + CHECKL(BoolCheck(amcbuf->forHashArrays)); + /* hash array buffers only created by mutator */ + CHECKL(BufferIsMutator(amcBuf2Buffer(amcbuf)) || !amcbuf->forHashArrays); return TRUE; } @@ -609,6 +616,10 @@ static void amcBufSetGen(Buffer buffer, amcGen gen) } +ARG_DEFINE_KEY(ap_hash_arrays, Bool); + +#define amcKeyAPHashArrays (&_mps_key_ap_hash_arrays) + /* AMCBufInit -- Initialize an amcBuf */ static Res AMCBufInit(Buffer buffer, Pool pool, ArgList args) @@ -617,12 +628,17 @@ static Res AMCBufInit(Buffer buffer, Pool pool, ArgList args) amcBuf amcbuf; BufferClass superclass; Res res; + Bool forHashArrays = FALSE; + ArgStruct arg; AVERT(Buffer, buffer); AVERT(Pool, pool); amc = Pool2AMC(pool); AVERT(AMC, amc); + if (ArgPick(&arg, args, amcKeyAPHashArrays)) + forHashArrays = arg.val.b; + /* call next method */ superclass = BUFFER_SUPERCLASS(amcBufClass); res = (*superclass->init)(buffer, pool, args); @@ -637,6 +653,7 @@ static Res AMCBufInit(Buffer buffer, Pool pool, ArgList args) /* No gen yet -- see . */ amcbuf->gen = NULL; } + amcbuf->forHashArrays = forHashArrays; amcbuf->sig = amcBufSig; AVERT(amcBuf, amcbuf); @@ -1147,6 +1164,8 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn, Serial genNr; SegPrefStruct segPrefStruct; PoolGen pgen; + amcBuf amcbuf; + Bool isRamping; AVERT(Pool, pool); amc = Pool2AMC(pool); @@ -1164,6 +1183,9 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn, pgen = &gen->pgen; + amcbuf = Buffer2amcBuf(buffer); + AVERT(amcBuf, amcbuf); + /* Create and attach segment. The location of this segment is */ /* expressed as a generation number. We rely on the arena to */ /* organize locations appropriately. */ @@ -1193,21 +1215,15 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn, ++gen->segs; pgen->totalSize += alignedSize; - /* If ramping, or if the buffer is a large proportion of the - * generation size, don't count it towards newSize. */ - - /* TODO: Find a better hack for this, which is really a work-around - * for a nasty problem in the collection scheduling strategy. - * See job003435. NB 2013-03-07. */ - - if((size < (pgen->chain->gens[genNr].capacity * 1024.0 / 4.0)) && - (amc->rampMode != RampRAMPING - || buffer != amc->rampGen->forward - || gen != amc->rampGen)) - { - pgen->newSize += alignedSize; - } else { + /* If ramping, or if the buffer is intended for allocating + hash table arrays, don't count it towards newSize. */ + isRamping = (amc->rampMode == RampRAMPING && + buffer == amc->rampGen->forward && + gen == amc->rampGen); + if (isRamping || amcbuf->forHashArrays) { Seg2amcSeg(seg)->new = FALSE; + } else { + pgen->newSize += alignedSize; } PoolGenUpdateZones(pgen, seg); @@ -2319,6 +2335,85 @@ static void amcWalkAll(Pool pool, FormattedObjectsStepMethod f, } +/* amcAddrObjectSearch -- skip over objects (belonging to pool) + * starting at objBase until we reach one of the following cases: + * 1. addr is found (and not moved): set *pReturn to the client + * pointer to the object containing addr and return ResOK; + * 2. addr is found, but it moved: return ResFAIL; + * 3. we reach searchLimit: return ResFAIL. + */ +static Res amcAddrObjectSearch(Addr *pReturn, Pool pool, Addr objBase, + Addr searchLimit, Addr addr) +{ + Format format; + Size hdrSize; + + AVER(pReturn != NULL); + AVERT(Pool, pool); + AVER(objBase <= searchLimit); + + format = pool->format; + hdrSize = format->headerSize; + while (objBase < searchLimit) { + Addr objRef = AddrAdd(objBase, hdrSize); + Addr objLimit = AddrSub((*format->skip)(objRef), hdrSize); + AVER(objBase < objLimit); + if (addr < objLimit) { + AVER(objBase <= addr && addr < objLimit); /* the point */ + if (!(*format->isMoved)(objRef)) { + *pReturn = objRef; + return ResOK; + } + break; + } + objBase = objLimit; + } + return ResFAIL; +} + + +/* AMCAddrObject -- find client pointer to object containing addr. + * addr is known to belong to seg, which belongs to pool. + * See job003589. + */ +static Res AMCAddrObject(Addr *pReturn, Pool pool, Seg seg, Addr addr) +{ + Res res; + Arena arena; + Addr base, limit; /* range of objects on segment */ + + AVER(pReturn != NULL); + AVERT(Pool, pool); + AVERT(Seg, seg); + AVER(SegPool(seg) == pool); + AVER(SegBase(seg) <= addr && addr < SegLimit(seg)); + + arena = PoolArena(pool); + base = SegBase(seg); + if (SegBuffer(seg) != NULL) { + /* We use BufferGetInit here (and not BufferScanLimit) because we + * want to be able to find objects that have been allocated and + * committed since the last flip. These objects lie between the + * addresses returned by BufferScanLimit (which returns the value + * of init at the last flip) and BufferGetInit. + * + * Strictly speaking we only need a limit that is at least the + * maximum of the objects on the segments. This is because addr + * *must* point inside a live object and we stop skipping once we + * have found it. The init pointer serves this purpose. + */ + limit = BufferGetInit(SegBuffer(seg)); + } else { + limit = SegLimit(seg); + } + + ShieldExpose(arena, seg); + res = amcAddrObjectSearch(pReturn, pool, base, limit, addr); + ShieldCover(arena, seg); + return res; +} + + /* AMCDescribe -- describe the contents of the AMC pool * * See . @@ -2415,6 +2510,7 @@ DEFINE_POOL_CLASS(AMCPoolClass, this) this->traceEnd = AMCTraceEnd; this->rampBegin = AMCRampBegin; this->rampEnd = AMCRampEnd; + this->addrObject = AMCAddrObject; this->walk = AMCWalk; this->bufferClass = amcBufClassGet; this->describe = AMCDescribe; diff --git a/mps/code/sacss.c b/mps/code/sacss.c index cb7606cb369..5d74edc62c6 100644 --- a/mps/code/sacss.c +++ b/mps/code/sacss.c @@ -173,7 +173,7 @@ static int testInArena(mps_arena_t arena) printf("MVFF\n\n"); die(stress(mps_class_mvff(), classCOUNT, classes, randomSize8, arena, - (size_t)65536, (size_t)32, (size_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE), + (size_t)65536, (size_t)32, (mps_align_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE), "stress MVFF"); printf("MV debug\n\n"); die(stress(mps_class_mv_debug(), classCOUNT, classes, randomSize8, arena, diff --git a/mps/code/spw3i6mv.c b/mps/code/spw3i6mv.c new file mode 100644 index 00000000000..751e9680e4b --- /dev/null +++ b/mps/code/spw3i6mv.c @@ -0,0 +1,67 @@ +/* spw3i6mv.c: STACK PROBE FOR 64-BIT WINDOWS + * + * $Id$ + * Copyright (c) 2013 Ravenbrook Limited. See end of file for license. + * + * The function StackProbe ensures that the stack has at least depth + * words available. It achieves this by exploiting an obscure but + * documented feature of Microsoft's function _alloca: "A stack + * overflow exception is generated if the space cannot be allocated." + * _alloca: http://msdn.microsoft.com/en-us/library/wb1s57t5.aspx + * + * The purpose of this function to ensure that the stack overflow + * exception is generated here (before taking the arena lock) where it + * can be handled safely rather than at some later point where the + * arena lock is held and so handling the exception may cause the MPS + * to be entered recursively. + */ + +#include "mpm.h" +#include + +void StackProbe(Size depth) +{ + _alloca(depth*sizeof(Word)); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2013 Ravenbrook Limited . + * All rights reserved. This is an open source license. Contact + * Ravenbrook for commercial licensing options. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Redistributions in any form must be accompanied by information on how + * to obtain complete source code for this software and any accompanying + * software that uses this software. The source code must either be + * included in the distribution or be available for no more than the cost + * of distribution plus a nominal fee, and must be freely redistributable + * under reasonable conditions. For an executable file, complete source + * code means the source code for all modules it contains. It does not + * include source code for modules or files that typically accompany the + * major components of the operating system on which the executable file + * runs. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/trace.c b/mps/code/trace.c index 596e2ed944b..5b24c7edea3 100644 --- a/mps/code/trace.c +++ b/mps/code/trace.c @@ -421,6 +421,8 @@ Res TraceCondemnZones(Trace trace, ZoneSet condemnedSet) } while (SegNext(&seg, arena, base)); } + EVENT3(TraceCondemnZones, trace, condemnedSet, trace->white); + /* The trace's white set must be a subset of the condemned set */ AVER(ZoneSetSuper(condemnedSet, trace->white)); diff --git a/mps/code/w3i6mv.nmk b/mps/code/w3i6mv.nmk index aa63bb8534d..2a4769a665f 100644 --- a/mps/code/w3i6mv.nmk +++ b/mps/code/w3i6mv.nmk @@ -13,7 +13,7 @@ PFMDEFS = /DCONFIG_PF_STRING="w3i6mv" /DCONFIG_PF_W3I6MV \ MASM = ml64 # MPM sources: core plus platform-specific. -MPM = $(MPMCOMMON) +MPM = $(MPMCOMMON) !INCLUDE commpre.nmk