mirror of
git://git.sv.gnu.org/emacs.git
synced 2025-12-25 23:10:47 -08:00
Merge branch/2016-09-06/job004006.
Copied from Perforce Change: 192365 ServerID: perforce.ravenbrook.com
This commit is contained in:
commit
62f3be6400
28 changed files with 418 additions and 95 deletions
|
|
@ -82,6 +82,14 @@ static Res ArenaNoPagesMarkAllocated(Arena arena, Chunk chunk,
|
|||
return ResUNIMPL;
|
||||
}
|
||||
|
||||
static Bool ArenaNoChunkPageMapped(Chunk chunk, Index index)
|
||||
{
|
||||
UNUSED(chunk);
|
||||
UNUSED(index);
|
||||
NOTREACHED;
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static Res ArenaNoCreate(Arena *arenaReturn, ArgList args)
|
||||
{
|
||||
UNUSED(arenaReturn);
|
||||
|
|
@ -122,6 +130,7 @@ DEFINE_CLASS(Arena, AbstractArena, klass)
|
|||
klass->chunkFinish = ArenaNoChunkFinish;
|
||||
klass->compact = ArenaTrivCompact;
|
||||
klass->pagesMarkAllocated = ArenaNoPagesMarkAllocated;
|
||||
klass->chunkPageMapped = ArenaNoChunkPageMapped;
|
||||
klass->sig = ArenaClassSig;
|
||||
}
|
||||
|
||||
|
|
@ -144,6 +153,7 @@ Bool ArenaClassCheck(ArenaClass klass)
|
|||
CHECKL(FUNCHECK(klass->chunkFinish));
|
||||
CHECKL(FUNCHECK(klass->compact));
|
||||
CHECKL(FUNCHECK(klass->pagesMarkAllocated));
|
||||
CHECKL(FUNCHECK(klass->chunkPageMapped));
|
||||
CHECKS(ArenaClass, klass);
|
||||
return TRUE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -383,6 +383,20 @@ static Res ClientArenaPagesMarkAllocated(Arena arena, Chunk chunk,
|
|||
}
|
||||
|
||||
|
||||
/* ClientChunkPageMapped -- determine if a page is mapped */
|
||||
|
||||
static Bool ClientChunkPageMapped(Chunk chunk, Index index)
|
||||
{
|
||||
UNUSED(chunk);
|
||||
UNUSED(index);
|
||||
|
||||
AVERT(Chunk, chunk);
|
||||
AVER(index < chunk->pages);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
||||
/* ClientArenaFree - free a region in the arena */
|
||||
|
||||
static void ClientArenaFree(Addr base, Size size, Pool pool)
|
||||
|
|
@ -443,6 +457,7 @@ DEFINE_CLASS(Arena, ClientArena, klass)
|
|||
klass->free = ClientArenaFree;
|
||||
klass->chunkInit = ClientChunkInit;
|
||||
klass->chunkFinish = ClientChunkFinish;
|
||||
klass->chunkPageMapped = ClientChunkPageMapped;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -932,6 +932,16 @@ static Res VMPagesMarkAllocated(Arena arena, Chunk chunk,
|
|||
}
|
||||
|
||||
|
||||
static Bool VMChunkPageMapped(Chunk chunk, Index index)
|
||||
{
|
||||
VMChunk vmChunk;
|
||||
AVERT(Chunk, chunk);
|
||||
AVER(index < chunk->pages);
|
||||
vmChunk = Chunk2VMChunk(chunk);
|
||||
return BTGet(vmChunk->pages.mapped, index);
|
||||
}
|
||||
|
||||
|
||||
/* chunkUnmapAroundPage -- unmap spare pages in a chunk including this one
|
||||
*
|
||||
* Unmap the spare page passed, and possibly other pages in the chunk,
|
||||
|
|
@ -1208,6 +1218,7 @@ DEFINE_CLASS(Arena, VMArena, klass)
|
|||
klass->chunkFinish = VMChunkFinish;
|
||||
klass->compact = VMCompact;
|
||||
klass->pagesMarkAllocated = VMPagesMarkAllocated;
|
||||
klass->chunkPageMapped = VMChunkPageMapped;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -167,8 +167,9 @@
|
|||
/* CONFIG_THREAD_SINGLE -- support single-threaded execution only
|
||||
*
|
||||
* This symbol causes the MPS to be built for single-threaded
|
||||
* execution only, where locks are not needed and so lock operations
|
||||
* can be defined as no-ops by lock.h.
|
||||
* execution only, where locks are not needed and so the generic
|
||||
* ("ANSI") lock module lockan.c can be used instead of the
|
||||
* platform-specific lock module.
|
||||
*/
|
||||
|
||||
#if !defined(CONFIG_THREAD_SINGLE)
|
||||
|
|
|
|||
|
|
@ -409,7 +409,7 @@ void GlobalsPrepareToDestroy(Globals arenaGlobals)
|
|||
arenaGlobals->defaultChain = NULL;
|
||||
ChainDestroy(defaultChain);
|
||||
|
||||
LockRelease(arenaGlobals->lock);
|
||||
ArenaLeave(arena);
|
||||
/* Theoretically, another thread could grab the lock here, but it's */
|
||||
/* not worth worrying about, since an attempt after the lock has been */
|
||||
/* destroyed would lead to a crash just the same. */
|
||||
|
|
@ -493,10 +493,9 @@ Ring GlobalsRememberedSummaryRing(Globals global)
|
|||
|
||||
/* ArenaEnter -- enter the state where you can look at the arena */
|
||||
|
||||
void (ArenaEnter)(Arena arena)
|
||||
void ArenaEnter(Arena arena)
|
||||
{
|
||||
AVERT(Arena, arena);
|
||||
ArenaEnter(arena);
|
||||
ArenaEnterLock(arena, FALSE);
|
||||
}
|
||||
|
||||
/* The recursive argument specifies whether to claim the lock
|
||||
|
|
@ -541,10 +540,10 @@ void ArenaEnterRecursive(Arena arena)
|
|||
|
||||
/* ArenaLeave -- leave the state where you can look at MPM data structures */
|
||||
|
||||
void (ArenaLeave)(Arena arena)
|
||||
void ArenaLeave(Arena arena)
|
||||
{
|
||||
AVERT(Arena, arena);
|
||||
ArenaLeave(arena);
|
||||
ArenaLeaveLock(arena, FALSE);
|
||||
}
|
||||
|
||||
void ArenaLeaveLock(Arena arena, Bool recursive)
|
||||
|
|
@ -574,6 +573,12 @@ void ArenaLeaveRecursive(Arena arena)
|
|||
ArenaLeaveLock(arena, TRUE);
|
||||
}
|
||||
|
||||
Bool ArenaBusy(Arena arena)
|
||||
{
|
||||
return LockIsHeld(ArenaGlobals(arena)->lock);
|
||||
}
|
||||
|
||||
|
||||
/* mps_exception_info -- pointer to exception info
|
||||
*
|
||||
* This is a hack to make exception info easier to find in a release
|
||||
|
|
|
|||
|
|
@ -78,6 +78,11 @@ extern void LockRelease(Lock lock);
|
|||
extern Bool LockCheck(Lock lock);
|
||||
|
||||
|
||||
/* LockIsHeld -- test whether lock is held by any thread */
|
||||
|
||||
extern Bool LockIsHeld(Lock lock);
|
||||
|
||||
|
||||
/* == Global locks == */
|
||||
|
||||
|
||||
|
|
@ -123,26 +128,6 @@ extern void LockClaimGlobal(void);
|
|||
extern void LockReleaseGlobal(void);
|
||||
|
||||
|
||||
#if defined(LOCK)
|
||||
/* Nothing to do: functions declared in all lock configurations. */
|
||||
#elif defined(LOCK_NONE)
|
||||
#define LockSize() MPS_PF_ALIGN
|
||||
#define LockInit(lock) UNUSED(lock)
|
||||
#define LockFinish(lock) UNUSED(lock)
|
||||
#define LockClaimRecursive(lock) UNUSED(lock)
|
||||
#define LockReleaseRecursive(lock) UNUSED(lock)
|
||||
#define LockClaim(lock) UNUSED(lock)
|
||||
#define LockRelease(lock) UNUSED(lock)
|
||||
#define LockCheck(lock) ((void)lock, TRUE)
|
||||
#define LockClaimGlobalRecursive()
|
||||
#define LockReleaseGlobalRecursive()
|
||||
#define LockClaimGlobal()
|
||||
#define LockReleaseGlobal()
|
||||
#else
|
||||
#error "No lock configuration."
|
||||
#endif /* LOCK */
|
||||
|
||||
|
||||
#endif /* lock_h */
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -79,6 +79,12 @@ void (LockReleaseRecursive)(Lock lock)
|
|||
--lock->claims;
|
||||
}
|
||||
|
||||
Bool (LockIsHeld)(Lock lock)
|
||||
{
|
||||
AVERT(Lock, lock);
|
||||
return lock->claims > 0;
|
||||
}
|
||||
|
||||
|
||||
/* Global locking is performed by normal locks.
|
||||
* A separate lock structure is used for recursive and
|
||||
|
|
|
|||
|
|
@ -39,20 +39,28 @@ int main(int argc, char *argv[])
|
|||
Insist(b != NULL);
|
||||
|
||||
LockInit(a);
|
||||
Insist(!LockIsHeld(a));
|
||||
LockInit(b);
|
||||
Insist(!LockIsHeld(b));
|
||||
LockClaimGlobal();
|
||||
LockClaim(a);
|
||||
Insist(LockIsHeld(a));
|
||||
LockClaimRecursive(b);
|
||||
Insist(LockIsHeld(b));
|
||||
LockClaimGlobalRecursive();
|
||||
LockReleaseGlobal();
|
||||
LockClaimGlobal();
|
||||
LockRelease(a);
|
||||
Insist(!LockIsHeld(a));
|
||||
LockClaimGlobalRecursive();
|
||||
LockReleaseGlobal();
|
||||
LockClaimRecursive(b);
|
||||
Insist(LockIsHeld(b));
|
||||
LockFinish(a);
|
||||
LockReleaseRecursive(b);
|
||||
Insist(LockIsHeld(b));
|
||||
LockReleaseRecursive(b);
|
||||
Insist(!LockIsHeld(b));
|
||||
LockFinish(b);
|
||||
LockInit(a);
|
||||
LockClaim(a);
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@
|
|||
|
||||
SRCID(lockix, "$Id$");
|
||||
|
||||
#if defined(LOCK)
|
||||
|
||||
/* LockStruct -- the MPS lock structure
|
||||
*
|
||||
|
|
@ -185,6 +186,21 @@ void (LockReleaseRecursive)(Lock lock)
|
|||
}
|
||||
|
||||
|
||||
/* LockIsHeld -- test whether lock is held */
|
||||
|
||||
Bool (LockIsHeld)(Lock lock)
|
||||
{
|
||||
AVERT(Lock, lock);
|
||||
if (pthread_mutex_trylock(&lock->mut) == 0) {
|
||||
Bool claimed = lock->claims > 0;
|
||||
int res = pthread_mutex_unlock(&lock->mut);
|
||||
AVER(res == 0);
|
||||
return claimed;
|
||||
}
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
||||
/* Global locks
|
||||
*
|
||||
* .global: The two "global" locks are statically allocated normal locks.
|
||||
|
|
@ -245,6 +261,13 @@ void (LockReleaseGlobal)(void)
|
|||
}
|
||||
|
||||
|
||||
#elif defined(LOCK_NONE)
|
||||
#include "lockan.c"
|
||||
#else
|
||||
#error "No lock configuration."
|
||||
#endif
|
||||
|
||||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
SRCID(lockw3, "$Id$");
|
||||
|
||||
#if defined(LOCK)
|
||||
|
||||
/* .lock.win32: Win32 lock structure; uses CRITICAL_SECTION */
|
||||
typedef struct LockStruct {
|
||||
|
|
@ -103,6 +104,15 @@ void (LockReleaseRecursive)(Lock lock)
|
|||
LeaveCriticalSection(&lock->cs);
|
||||
}
|
||||
|
||||
Bool (LockIsHeld)(Lock lock)
|
||||
{
|
||||
if (TryEnterCriticalSection(&lock->cs)) {
|
||||
Bool claimed = lock->claims > 0;
|
||||
LeaveCriticalSection(&lock->cs);
|
||||
return claimed;
|
||||
}
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
||||
/* Global locking is performed by normal locks.
|
||||
|
|
@ -156,6 +166,13 @@ void (LockReleaseGlobal)(void)
|
|||
}
|
||||
|
||||
|
||||
#elif defined(LOCK_NONE)
|
||||
#include "lockan.c"
|
||||
#else
|
||||
#error "No lock configuration."
|
||||
#endif
|
||||
|
||||
|
||||
/* C. COPYRIGHT AND LICENSE
|
||||
*
|
||||
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
|
||||
|
|
|
|||
|
|
@ -488,7 +488,6 @@ extern Bool ArenaAccess(Addr addr, AccessSet mode, MutatorFaultContext context);
|
|||
extern Res ArenaFreeLandInsert(Arena arena, Addr base, Addr limit);
|
||||
extern void ArenaFreeLandDelete(Arena arena, Addr base, Addr limit);
|
||||
|
||||
|
||||
extern Bool GlobalsCheck(Globals arena);
|
||||
extern Res GlobalsInit(Globals arena);
|
||||
extern void GlobalsFinish(Globals arena);
|
||||
|
|
@ -524,16 +523,12 @@ extern Bool ArenaGrainSizeCheck(Size size);
|
|||
extern void ArenaEnterLock(Arena arena, Bool recursive);
|
||||
extern void ArenaLeaveLock(Arena arena, Bool recursive);
|
||||
|
||||
extern void (ArenaEnter)(Arena arena);
|
||||
extern void (ArenaLeave)(Arena arena);
|
||||
extern void ArenaEnter(Arena arena);
|
||||
extern void ArenaLeave(Arena arena);
|
||||
extern void (ArenaPoll)(Globals globals);
|
||||
|
||||
#if defined(SHIELD)
|
||||
#define ArenaEnter(arena) ArenaEnterLock(arena, FALSE)
|
||||
#define ArenaLeave(arena) ArenaLeaveLock(arena, FALSE)
|
||||
#elif defined(SHIELD_NONE)
|
||||
#define ArenaEnter(arena) UNUSED(arena)
|
||||
#define ArenaLeave(arena) AVER(arena->busyTraces == TraceSetEMPTY)
|
||||
#define ArenaPoll(globals) UNUSED(globals)
|
||||
#else
|
||||
#error "No shield configuration."
|
||||
|
|
@ -546,10 +541,12 @@ extern Bool (ArenaStep)(Globals globals, double interval, double multiplier);
|
|||
extern void ArenaClamp(Globals globals);
|
||||
extern void ArenaRelease(Globals globals);
|
||||
extern void ArenaPark(Globals globals);
|
||||
extern void ArenaPostmortem(Globals globals);
|
||||
extern void ArenaExposeRemember(Globals globals, Bool remember);
|
||||
extern void ArenaRestoreProtection(Globals globals);
|
||||
extern Res ArenaStartCollect(Globals globals, int why);
|
||||
extern Res ArenaCollect(Globals globals, int why);
|
||||
extern Bool ArenaBusy(Arena arena);
|
||||
extern Bool ArenaHasAddr(Arena arena, Addr addr);
|
||||
extern Res ArenaAddrObject(Addr *pReturn, Arena arena, Addr addr);
|
||||
extern void ArenaChunkInsert(Arena arena, Chunk chunk);
|
||||
|
|
@ -895,7 +892,7 @@ extern void (ShieldFlush)(Arena arena);
|
|||
#define ShieldLower(arena, seg, mode) \
|
||||
BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END
|
||||
#define ShieldEnter(arena) BEGIN UNUSED(arena); END
|
||||
#define ShieldLeave(arena) BEGIN UNUSED(arena); END
|
||||
#define ShieldLeave(arena) AVER(arena->busyTraces == TraceSetEMPTY)
|
||||
#define ShieldExpose(arena, seg) \
|
||||
BEGIN UNUSED(arena); UNUSED(seg); END
|
||||
#define ShieldCover(arena, seg) \
|
||||
|
|
|
|||
|
|
@ -504,6 +504,7 @@ typedef struct mps_arena_class_s {
|
|||
ArenaChunkFinishMethod chunkFinish;
|
||||
ArenaCompactMethod compact;
|
||||
ArenaPagesMarkAllocatedMethod pagesMarkAllocated;
|
||||
ArenaChunkPageMappedMethod chunkPageMapped;
|
||||
Sig sig;
|
||||
} ArenaClassStruct;
|
||||
|
||||
|
|
|
|||
|
|
@ -124,6 +124,7 @@ typedef void (*ArenaCompactMethod)(Arena arena, Trace trace);
|
|||
typedef Res (*ArenaPagesMarkAllocatedMethod)(Arena arena, Chunk chunk,
|
||||
Index baseIndex, Count pages,
|
||||
Pool pool);
|
||||
typedef Bool (*ArenaChunkPageMappedMethod)(Chunk chunk, Index index);
|
||||
|
||||
|
||||
/* These are not generally exposed and public, but are part of a commercial
|
||||
|
|
|
|||
|
|
@ -435,6 +435,7 @@ typedef struct mps_fmt_fixed_s {
|
|||
extern void mps_arena_clamp(mps_arena_t);
|
||||
extern void mps_arena_release(mps_arena_t);
|
||||
extern void mps_arena_park(mps_arena_t);
|
||||
extern void mps_arena_postmortem(mps_arena_t);
|
||||
extern void mps_arena_expose(mps_arena_t);
|
||||
extern void mps_arena_unsafe_expose_remember_protection(mps_arena_t);
|
||||
extern void mps_arena_unsafe_restore_protection(mps_arena_t);
|
||||
|
|
@ -460,6 +461,7 @@ extern size_t mps_arena_spare_commit_limit(mps_arena_t);
|
|||
extern double mps_arena_pause_time(mps_arena_t);
|
||||
extern void mps_arena_pause_time_set(mps_arena_t, double);
|
||||
|
||||
extern mps_bool_t mps_arena_busy(mps_arena_t);
|
||||
extern mps_bool_t mps_arena_has_addr(mps_arena_t, mps_addr_t);
|
||||
extern mps_bool_t mps_addr_pool(mps_pool_t *, mps_arena_t, mps_addr_t);
|
||||
extern mps_bool_t mps_addr_fmt(mps_fmt_t *, mps_arena_t, mps_addr_t);
|
||||
|
|
|
|||
|
|
@ -260,6 +260,15 @@ void mps_arena_park(mps_arena_t arena)
|
|||
}
|
||||
|
||||
|
||||
void mps_arena_postmortem(mps_arena_t arena)
|
||||
{
|
||||
/* Don't call ArenaEnter -- one of the purposes of this function is
|
||||
* to release the arena lock if it's held */
|
||||
AVER(TESTT(Arena, arena));
|
||||
ArenaPostmortem(ArenaGlobals(arena));
|
||||
}
|
||||
|
||||
|
||||
void mps_arena_expose(mps_arena_t arena)
|
||||
{
|
||||
ArenaEnter(arena);
|
||||
|
|
@ -374,6 +383,17 @@ void mps_arena_destroy(mps_arena_t arena)
|
|||
}
|
||||
|
||||
|
||||
/* mps_arena_busy -- is the arena part way through an operation? */
|
||||
|
||||
mps_bool_t mps_arena_busy(mps_arena_t arena)
|
||||
{
|
||||
/* Don't call ArenaEnter -- the purpose of this function is to
|
||||
* determine if the arena lock is held */
|
||||
AVER(TESTT(Arena, arena));
|
||||
return ArenaBusy(arena);
|
||||
}
|
||||
|
||||
|
||||
/* mps_arena_has_addr -- is this address managed by this arena? */
|
||||
|
||||
mps_bool_t mps_arena_has_addr(mps_arena_t arena, mps_addr_t p)
|
||||
|
|
|
|||
|
|
@ -450,11 +450,13 @@ static void *test(void *arg, size_t s)
|
|||
mps_word_t c;
|
||||
size_t r;
|
||||
|
||||
Insist(!mps_arena_busy(arena));
|
||||
|
||||
c = mps_collections(arena);
|
||||
|
||||
if(collections != c) {
|
||||
collections = c;
|
||||
printf("\nCollection %"PRIuLONGEST", %lu objects.\n", (ulongest_t)c, i);
|
||||
printf("Collection %"PRIuLONGEST", %lu objects.\n", (ulongest_t)c, i);
|
||||
for(r = 0; r < exactRootsCOUNT; ++r) {
|
||||
cdie(exactRoots[r] == objNULL || dylan_check(exactRoots[r]),
|
||||
"all roots check");
|
||||
|
|
@ -565,7 +567,7 @@ int main(int argc, char *argv[])
|
|||
MPS_ARGS_ADD(args, MPS_KEY_PAUSE_TIME, rnd_pause_time());
|
||||
MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, TEST_ARENA_SIZE);
|
||||
die(mps_arena_create_k(&arena, mps_arena_class_vm(), args),
|
||||
"arena_create\n");
|
||||
"arena_create");
|
||||
} MPS_ARGS_END(args);
|
||||
die(mps_thread_reg(&thread, arena), "thread_reg");
|
||||
|
||||
|
|
@ -591,9 +593,17 @@ int main(int argc, char *argv[])
|
|||
}
|
||||
|
||||
mps_tramp(&r, test, arena, 0);
|
||||
mps_root_destroy(reg_root);
|
||||
mps_thread_dereg(thread);
|
||||
mps_arena_destroy(arena);
|
||||
switch (rnd() % 2) {
|
||||
default:
|
||||
case 0:
|
||||
mps_root_destroy(reg_root);
|
||||
mps_thread_dereg(thread);
|
||||
mps_arena_destroy(arena);
|
||||
break;
|
||||
case 1:
|
||||
mps_arena_postmortem(arena);
|
||||
break;
|
||||
}
|
||||
|
||||
printf("%s: Conclusion: Failed to find any defects.\n", argv[0]);
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -457,6 +457,7 @@ static mps_res_t scan1(mps_ss_t ss, mps_addr_t *objectIO)
|
|||
|
||||
static mps_res_t scan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit)
|
||||
{
|
||||
Insist(mps_arena_busy(arena));
|
||||
while(base < limit) {
|
||||
mps_res_t res;
|
||||
|
||||
|
|
|
|||
|
|
@ -77,9 +77,12 @@ static void pad(mps_addr_t addr, size_t size)
|
|||
}
|
||||
}
|
||||
|
||||
static mps_arena_t arena;
|
||||
|
||||
static mps_res_t scan(mps_ss_t ss, mps_addr_t base,
|
||||
mps_addr_t limit)
|
||||
{
|
||||
Insist(mps_arena_busy(arena));
|
||||
MPS_SCAN_BEGIN(ss) {
|
||||
mps_word_t *p = base;
|
||||
while (p < (mps_word_t *)limit) {
|
||||
|
|
@ -106,7 +109,7 @@ static mps_addr_t skip(mps_addr_t addr)
|
|||
}
|
||||
|
||||
|
||||
static void collect(mps_arena_t arena, size_t expected)
|
||||
static void collect(size_t expected)
|
||||
{
|
||||
size_t finalized = 0;
|
||||
mps_arena_collect(arena);
|
||||
|
|
@ -148,7 +151,6 @@ static const char *mode_name[] = {
|
|||
|
||||
static void test(int mode)
|
||||
{
|
||||
mps_arena_t arena;
|
||||
mps_thr_t thread;
|
||||
mps_root_t root;
|
||||
mps_fmt_t fmt;
|
||||
|
|
@ -214,7 +216,7 @@ static void test(int mode)
|
|||
die(mps_finalize(arena, &addr), "finalize");
|
||||
}
|
||||
|
||||
collect(arena, expected);
|
||||
collect(expected);
|
||||
|
||||
mps_arena_park(arena);
|
||||
mps_ap_destroy(ap);
|
||||
|
|
|
|||
|
|
@ -530,12 +530,11 @@ void TraceIdMessagesDestroy(Arena arena, TraceId ti)
|
|||
|
||||
|
||||
|
||||
/* -------- ArenaRelease, ArenaClamp, ArenaPark -------- */
|
||||
/* ----- ArenaRelease, ArenaClamp, ArenaPark, ArenaPostmortem ----- */
|
||||
|
||||
|
||||
/* ArenaRelease, ArenaClamp, ArenaPark -- allow/prevent collection work.
|
||||
*
|
||||
* These functions allow or prevent collection work.
|
||||
/* ArenaRelease, ArenaClamp, ArenaPark, ArenaPostmortem --
|
||||
* allow/prevent collection work.
|
||||
*/
|
||||
|
||||
|
||||
|
|
@ -596,6 +595,62 @@ void ArenaPark(Globals globals)
|
|||
AVER(!ArenaEmergency(arena));
|
||||
}
|
||||
|
||||
|
||||
/* arenaExpose -- discard all protection from MPS-managed memory
|
||||
*
|
||||
* This is called by ArenaPostmortem, which we expect only to be used
|
||||
* after a fatal error. So we use the lowest-level description of the
|
||||
* MPS-managed memory (the chunk ring page tables) to avoid the risk
|
||||
* of higher-level structures (like the segments) having been
|
||||
* corrupted.
|
||||
*
|
||||
* After calling this function memory may not be in a consistent
|
||||
* state, so it is not safe to continue running the MPS. If you need
|
||||
* to expose memory but continue running the MPS, use
|
||||
* ArenaExposeRemember instead.
|
||||
*/
|
||||
|
||||
static void arenaExpose(Arena arena)
|
||||
{
|
||||
Ring node, next;
|
||||
RING_FOR(node, &arena->chunkRing, next) {
|
||||
Chunk chunk = RING_ELT(Chunk, arenaRing, node);
|
||||
Index i;
|
||||
for (i = 0; i < chunk->pages; ++i) {
|
||||
if (Method(Arena, arena, chunkPageMapped)(chunk, i)) {
|
||||
ProtSet(PageIndexBase(chunk, i), PageIndexBase(chunk, i + 1),
|
||||
AccessSetEMPTY);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* ArenaPostmortem -- enter the postmortem state */
|
||||
|
||||
void ArenaPostmortem(Globals globals)
|
||||
{
|
||||
Arena arena = GlobalsArena(globals);
|
||||
|
||||
/* Ensure lock is released. */
|
||||
while (LockIsHeld(globals->lock)) {
|
||||
LockReleaseRecursive(globals->lock);
|
||||
}
|
||||
|
||||
/* Remove the arena from the global arena ring so that it no longer
|
||||
* handles protection faults. (Don't call arenaDenounce because that
|
||||
* needs to claim the global ring lock, but that might already be
|
||||
* held, for example if we are inside ArenaAccess.) */
|
||||
RingRemove(&globals->globalRing);
|
||||
|
||||
/* Clamp the arena so that ArenaPoll does nothing. */
|
||||
ArenaClamp(globals);
|
||||
|
||||
/* Remove all protection from mapped pages. */
|
||||
arenaExpose(arena);
|
||||
}
|
||||
|
||||
|
||||
/* ArenaStartCollect -- start a collection of everything in the
|
||||
* arena; leave unclamped. */
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue