1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-03-27 01:01:52 -07:00

Merge pull request from branch/2020-08-29/page-sparering-elim

GitHub-reference: https://github.com/Ravenbrook/mps/issues/29
This commit is contained in:
Gareth Rees 2022-01-17 20:44:45 +00:00
commit 4ba2db1346
7 changed files with 266 additions and 252 deletions

View file

@ -969,19 +969,11 @@ Res ArenaFreeLandInsert(Arena arena, Addr base, Addr limit)
}
/* ArenaFreeLandDelete -- remove range from arena's free land, maybe
* extending block pool
*
* This is called from ChunkFinish in order to remove address space from
* the arena.
*
* IMPORTANT: May only be called on whole chunk ranges, because we don't
* deal with the case where the range is coalesced. This restriction would
* be easy to lift by extending the block pool on error, but doesn't happen,
* so we can't test that path.
/* ArenaFreeLandDelete -- remove range from arena's free land if
* possible without extending the block pool
*/
void ArenaFreeLandDelete(Arena arena, Addr base, Addr limit)
Res ArenaFreeLandDelete(Arena arena, Addr base, Addr limit)
{
RangeStruct range, oldRange;
Res res;
@ -991,10 +983,7 @@ void ArenaFreeLandDelete(Arena arena, Addr base, Addr limit)
land = ArenaFreeLand(arena);
res = LandDelete(&oldRange, land, &range);
/* Shouldn't be any other kind of failure because we were only deleting
a non-coalesced block. See .chunk.no-coalesce and
<code/cbs.c#.delete.alloc>. */
AVER(res == ResOK);
return res;
}

View file

@ -72,24 +72,28 @@ typedef struct VMArenaStruct { /* VM arena structure */
ArenaStruct arenaStruct;
VMStruct vmStruct; /* VM descriptor for VM containing arena */
char vmParams[VMParamSize]; /* VM parameter block */
Size spareSize; /* total size of spare pages */
Size extendBy; /* desired arena increment */
Size extendMin; /* minimum arena increment */
ArenaVMExtendedCallback extended;
ArenaVMContractedCallback contracted;
RingStruct spareRing; /* spare (free but mapped) tracts */
MFSStruct cbsBlockPoolStruct; /* stores blocks for CBSs */
CBSStruct spareLandStruct; /* spare memory */
Sig sig; /* <design/sig> */
} VMArenaStruct;
#define VMArenaVM(vmarena) (&(vmarena)->vmStruct)
#define VMArenaCBSBlockPool(vmarena) MFSPool(&(vmarena)->cbsBlockPoolStruct)
#define VMArenaSpareLand(vmarena) CBSLand(&(vmarena)->spareLandStruct)
/* Forward declarations */
static void VMFree(Addr base, Size size, Pool pool);
static Size VMPurgeSpare(Arena arena, Size size);
static void chunkUnmapSpare(Chunk chunk);
static Size vmArenaUnmapSpare(Arena arena, Size size, Chunk filter);
DECLARE_CLASS(Arena, VMArena, AbstractArena);
static void VMCompact(Arena arena, Trace trace);
static void pageDescUnmap(VMChunk vmChunk, Index basePI, Index limitPI);
/* VMChunkCheck -- check the consistency of a VM chunk */
@ -163,8 +167,6 @@ static Bool VMArenaCheck(VMArena vmArena)
CHECKS(VMArena, vmArena);
arena = MustBeA(AbstractArena, vmArena);
CHECKD(Arena, arena);
/* spare pages are committed, so must be less spare than committed. */
CHECKL(vmArena->spareSize <= arena->committed);
CHECKL(vmArena->extendBy > 0);
CHECKL(vmArena->extendMin <= vmArena->extendBy);
@ -177,7 +179,9 @@ static Bool VMArenaCheck(VMArena vmArena)
CHECKL(VMMapped(VMChunkVM(primary)) <= arena->committed);
}
CHECKD_NOSIG(Ring, &vmArena->spareRing);
CHECKD(Pool, VMArenaCBSBlockPool(vmArena));
CHECKD(Land, VMArenaSpareLand(vmArena));
CHECKL((LandSize)(VMArenaSpareLand(vmArena)) == arena->spareCommitted);
/* FIXME: Can't check VMParams */
@ -202,12 +206,17 @@ static Res VMArenaDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
if (res != ResOK)
return res;
res = WriteF(stream, depth,
" spareSize: $U\n", (WriteFU)vmArena->spareSize,
res = WriteF(stream, depth + 2,
"extendBy: $U\n", (WriteFU)vmArena->extendBy,
"extendMin: $U\n", (WriteFU)vmArena->extendMin,
NULL);
if(res != ResOK)
return res;
res = LandDescribe(VMArenaSpareLand(vmArena), stream, depth + 2);
if (res != ResOK)
return res;
/* TODO: incomplete -- some fields are not Described */
return ResOK;
@ -257,6 +266,29 @@ static void vmArenaUnmap(VMArena vmArena, VM vm, Addr base, Addr limit)
}
/* chunkUnmapRange -- unmap range of addresses in a chunk */
static void chunkUnmapRange(Chunk chunk, Addr base, Addr limit)
{
VMArena vmArena;
VMChunk vmChunk;
Index basePI, limitPI, i;
AVERT(Chunk, chunk);
AVER(base < limit);
vmArena = MustBeA(VMArena, ChunkArena(chunk));
vmChunk = Chunk2VMChunk(chunk);
basePI = INDEX_OF_ADDR(chunk, base);
limitPI = INDEX_OF_ADDR(chunk, limit);
for (i = basePI; i < limitPI; ++i)
PageInit(chunk, i);
vmArenaUnmap(vmArena, VMChunkVM(vmChunk), base, limit);
pageDescUnmap(vmChunk, basePI, limitPI);
}
/* VMChunkCreate -- create a chunk
*
* chunkReturn, return parameter for the created chunk.
@ -406,7 +438,7 @@ static Bool vmChunkDestroy(Tree tree, void *closure)
vmChunk = Chunk2VMChunk(chunk);
AVERT(VMChunk, vmChunk);
chunkUnmapSpare(chunk);
(void)vmArenaUnmapSpare(ChunkArena(chunk), ChunkSize(chunk), chunk);
SparseArrayFinish(&vmChunk->pages);
@ -606,10 +638,35 @@ static Res VMArenaCreate(Arena *arenaReturn, ArgList args)
arena->reserved = VMReserved(vm);
arena->committed = VMMapped(vm);
/* Initialize a pool to hold the CBS blocks for the spare memory
land. This pool can't be allowed to extend itself using
ArenaAlloc because it is needed to implement ArenaAlloc (in the
case where allocation hits the commit limit and so spare memory
needs to be purged), so MFSExtendSelf is set to FALSE. Failures
to extend are handled where the spare memory land is used. */
MPS_ARGS_BEGIN(piArgs) {
MPS_ARGS_ADD(piArgs, MPS_KEY_MFS_UNIT_SIZE, sizeof(RangeTreeStruct));
MPS_ARGS_ADD(piArgs, MPS_KEY_EXTEND_BY, grainSize);
MPS_ARGS_ADD(piArgs, MFSExtendSelf, FALSE);
res = PoolInit(VMArenaCBSBlockPool(vmArena), arena, PoolClassMFS(), piArgs);
} MPS_ARGS_END(piArgs);
AVER(res == ResOK); /* no allocation, no failure expected */
if (res != ResOK)
goto failMFSInit;
/* Initialise spare land. */
MPS_ARGS_BEGIN(liArgs) {
MPS_ARGS_ADD(liArgs, CBSBlockPool, VMArenaCBSBlockPool(vmArena));
res = LandInit(VMArenaSpareLand(vmArena), CLASS(CBS), arena,
grainSize, arena, liArgs);
} MPS_ARGS_END(liArgs);
AVER(res == ResOK); /* no allocation, no failure expected */
if (res != ResOK)
goto failLandInit;
++ ArenaGlobals(arena)->systemPools;
/* Copy VM descriptor into its place in the arena. */
VMCopy(VMArenaVM(vmArena), vm);
vmArena->spareSize = 0;
RingInit(&vmArena->spareRing);
/* Copy the stack-allocated VM parameters into their home in the VMArena. */
AVER(sizeof(vmArena->vmParams) == sizeof(vmParams));
@ -669,6 +726,10 @@ static Res VMArenaCreate(Arena *arenaReturn, ArgList args)
return ResOK;
failChunkCreate:
LandFinish(VMArenaSpareLand(vmArena));
failLandInit:
PoolFinish(VMArenaCBSBlockPool(vmArena));
failMFSInit:
NextMethod(Inst, VMArena, finish)(MustBeA(Inst, arena));
failArenaInit:
VMUnmap(vm, VMBase(vm), VMLimit(vm));
@ -679,24 +740,50 @@ failVMInit:
}
/* VMArenaDestroy -- destroy the arena */
static void vmArenaMFSFreeExtent(Pool pool, Addr base, Size size, void *closure)
{
Chunk chunk = NULL; /* suppress "may be used uninitialized" */
Bool foundChunk;
AVERT(Pool, pool);
AVER(closure == UNUSED_POINTER);
UNUSED(closure);
foundChunk = ChunkOfAddr(&chunk, PoolArena(pool), base);
AVER(foundChunk);
chunkUnmapRange(chunk, base, AddrAdd(base, size));
}
static void VMArenaDestroy(Arena arena)
{
VMArena vmArena = MustBeA(VMArena, arena);
Land spareLand = VMArenaSpareLand(vmArena);
VMStruct vmStruct;
VM vm = &vmStruct;
/* Unmap all remaining spare memory. */
VMPurgeSpare(arena, LandSize(spareLand));
AVER(LandSize(spareLand) == 0);
AVER(arena->spareCommitted == 0);
/* The CBS block pool can't free its own memory via ArenaFree
because that would attempt to insert the freed memory into the
spare memory land, which uses blocks from the block pool. */
MFSFinishExtents(VMArenaCBSBlockPool(vmArena), vmArenaMFSFreeExtent,
UNUSED_POINTER);
PoolFinish(VMArenaCBSBlockPool(vmArena));
/* Destroy all chunks, including the primary. See
* <design/arena#.chunk.delete> */
arena->primary = NULL;
TreeTraverseAndDelete(&arena->chunkTree, vmChunkDestroy,
UNUSED_POINTER);
TreeTraverseAndDelete(&arena->chunkTree, vmChunkDestroy, UNUSED_POINTER);
/* Destroying the chunks should have purged and removed all spare pages. */
RingFinish(&vmArena->spareRing);
/* Must wait until the chunks are destroyed, since vmChunkDestroy
calls vmArenaUnmapSpare which uses the spare land. */
LandFinish(VMArenaSpareLand(vmArena));
/* Destroying the chunks should leave only the arena's own VM. */
/* Destroying the chunks must leave only the arena's own VM. */
AVER(arena->reserved == VMReserved(VMArenaVM(vmArena)));
AVER(arena->committed == VMMapped(VMArenaVM(vmArena)));
@ -778,37 +865,51 @@ vmArenaGrow_Done:
}
/* pageState -- determine page state, even if unmapped
/* spareRangeRelease -- release a range of spare memory in a chunk
*
* Parts of the page table may be unmapped if their corresponding pages are
* free.
* Temporarily leaves data structures in an inconsistent state (the
* spare memory is still marked as SPARE in the chunk's page table,
* but it is no longer in the spare memory land). The caller must
* either allocate the memory or unmap it.
*/
static unsigned pageState(VMChunk vmChunk, Index pi)
{
Chunk chunk = VMChunk2Chunk(vmChunk);
if (SparseArrayIsMapped(&vmChunk->pages, pi))
return PageState(ChunkPage(chunk, pi));
return PageStateFREE;
}
/* sparePageRelease -- releases a spare page
*
* Either to allocate it or to purge it.
* Temporarily leaves it in an inconsistent state.
*/
static void sparePageRelease(VMChunk vmChunk, Index pi)
static void spareRangeRelease(VMChunk vmChunk, Index piBase, Index piLimit)
{
Chunk chunk = VMChunk2Chunk(vmChunk);
Arena arena = ChunkArena(chunk);
Page page = ChunkPage(chunk, pi);
VMArena vmArena = VMChunkVMArena(vmChunk);
Land spareLand = VMArenaSpareLand(vmArena);
RangeStruct range, containingRange;
Res res;
AVER(PageState(page) == PageStateSPARE);
AVER(arena->spareCommitted >= ChunkPageSize(chunk));
AVER(piBase < piLimit);
RangeInit(&range, PageIndexBase(chunk, piBase),
PageIndexBase(chunk, piLimit));
arena->spareCommitted -= ChunkPageSize(chunk);
RingRemove(PageSpareRing(page));
res = LandDelete(&containingRange, spareLand, &range);
if (res != ResOK) {
/* Range could not be deleted from the spare memory land because
it splits the containing range and so needs to allocate a
block but the block pool is full. Use the first grain of the
containing range to extend the block pool. */
Addr extendBase = RangeBase(&containingRange);
Index extendBasePI = INDEX_OF_ADDR(chunk, extendBase);
Addr extendLimit = AddrAdd(extendBase, ArenaGrainSize(arena));
RangeStruct extendRange;
AVER(res == ResLIMIT);
RangeInit(&extendRange, extendBase, extendLimit);
AVER(!RangesOverlap(&extendRange, &range));
res = LandDelete(&containingRange, spareLand, &extendRange);
AVER(res == ResOK);
AVER(arena->spareCommitted >= RangeSize(&extendRange));
arena->spareCommitted -= RangeSize(&extendRange);
PageAlloc(chunk, extendBasePI, VMArenaCBSBlockPool(vmArena));
MFSExtend(VMArenaCBSBlockPool(vmArena), extendBase, extendLimit);
res = LandDelete(&containingRange, spareLand, &range);
AVER(res == ResOK);
}
AVER(arena->spareCommitted >= RangeSize(&range));
arena->spareCommitted -= RangeSize(&range);
}
@ -856,10 +957,10 @@ static Res pagesMarkAllocated(VMArena vmArena, VMChunk vmChunk,
cursor = basePI;
while (BTFindLongResRange(&j, &k, vmChunk->pages.mapped, cursor, limitPI, 1)) {
for (i = cursor; i < j; ++i) {
sparePageRelease(vmChunk, i);
if (cursor < j)
spareRangeRelease(vmChunk, cursor, j);
for (i = cursor; i < j; ++i)
PageAlloc(chunk, i, pool);
}
res = pageDescMap(vmChunk, j, k);
if (res != ResOK)
goto failSAMap;
@ -875,24 +976,20 @@ static Res pagesMarkAllocated(VMArena vmArena, VMChunk vmChunk,
if (cursor == limitPI)
return ResOK;
}
for (i = cursor; i < limitPI; ++i) {
sparePageRelease(vmChunk, i);
if (cursor < limitPI)
spareRangeRelease(vmChunk, cursor, limitPI);
for (i = cursor; i < limitPI; ++i)
PageAlloc(chunk, i, pool);
}
return ResOK;
failVMMap:
pageDescUnmap(vmChunk, j, k);
failSAMap:
/* region from basePI to j needs deallocating */
/* TODO: Consider making pages spare instead, then purging. */
/* Region from basePI to j was allocated but can't be used */
if (basePI < j) {
vmArenaUnmap(vmArena, VMChunkVM(vmChunk),
PageIndexBase(chunk, basePI),
PageIndexBase(chunk, j));
for (i = basePI; i < j; ++i)
PageFree(chunk, i);
pageDescUnmap(vmChunk, basePI, j);
VMFree(PageIndexBase(chunk, basePI),
ChunkPagesToSize(chunk, j - basePI),
pool);
}
return res;
}
@ -944,119 +1041,72 @@ static Bool VMChunkPageMapped(Chunk chunk, Index index)
}
/* chunkUnmapAroundPage -- unmap spare pages in a chunk including this one
/* vmArenaUnmapSpare -- unmap spare memory
*
* Unmap the spare page passed, and possibly other pages in the chunk,
* unmapping at least the size passed if available. The amount unmapped
* may exceed the size by up to one page. Returns the amount of memory
* unmapped.
*
* To minimse unmapping calls, the page passed is coalesced with spare
* pages above and below, even though these may have been more recently
* made spare.
* The size is the desired amount to unmap, and the amount that was
* unmapped is returned. If filter is not NULL, then only memory
* within that chunk is unmapped.
*/
static Size chunkUnmapAroundPage(Chunk chunk, Size size, Page page)
typedef struct VMArenaUnmapSpareClosureStruct {
Arena arena; /* arena owning the spare memory */
Size size; /* desired amount of spare memory to unmap */
Chunk filter; /* NULL or chunk to unmap from */
Size unmapped; /* actual amount unmapped */
} VMArenaUnmapSpareClosureStruct, *VMArenaUnmapSpareClosure;
static Bool vmArenaUnmapSpareRange(Bool *deleteReturn, Land land, Range range,
void *p)
{
VMChunk vmChunk;
Size purged = 0;
Size pageSize;
Index basePI, limitPI;
VMArenaUnmapSpareClosure closure = p;
Arena arena;
Chunk chunk = NULL; /* suppress "may be used uninitialized" */
Bool foundChunk;
AVERT(Chunk, chunk);
vmChunk = Chunk2VMChunk(chunk);
AVERT(VMChunk, vmChunk);
AVER(PageState(page) == PageStateSPARE);
/* size is arbitrary */
AVER(deleteReturn != NULL);
AVERT(Land, land);
AVERT(Range, range);
AVER(p != NULL);
pageSize = ChunkPageSize(chunk);
arena = closure->arena;
foundChunk = ChunkOfAddr(&chunk, arena, RangeBase(range));
AVER(foundChunk);
basePI = (Index)(page - chunk->pageTable);
AVER(basePI < chunk->pages); /* page is within chunk's page table */
limitPI = basePI;
do {
sparePageRelease(vmChunk, limitPI);
++limitPI;
purged += pageSize;
} while (purged < size &&
limitPI < chunk->pages &&
pageState(vmChunk, limitPI) == PageStateSPARE);
while (purged < size &&
basePI > 0 &&
pageState(vmChunk, basePI - 1) == PageStateSPARE) {
--basePI;
sparePageRelease(vmChunk, basePI);
purged += pageSize;
if (closure->filter == NULL || closure->filter == chunk) {
Size size = RangeSize(range);
chunkUnmapRange(chunk, RangeBase(range), RangeLimit(range));
AVER(arena->spareCommitted >= size);
arena->spareCommitted -= size;
closure->unmapped += size;
*deleteReturn = TRUE;
}
vmArenaUnmap(VMChunkVMArena(vmChunk),
VMChunkVM(vmChunk),
PageIndexBase(chunk, basePI),
PageIndexBase(chunk, limitPI));
pageDescUnmap(vmChunk, basePI, limitPI);
return purged;
return closure->unmapped < closure->size;
}
/* arenaUnmapSpare -- return spare pages to the OS
*
* The size is the desired amount to purge, and the amount that was purged is
* returned. If filter is not NULL, then only pages within that chunk are
* unmapped.
*/
static Size arenaUnmapSpare(Arena arena, Size size, Chunk filter)
static Size vmArenaUnmapSpare(Arena arena, Size size, Chunk filter)
{
VMArena vmArena = MustBeA(VMArena, arena);
Ring node;
Size purged = 0;
Land spareLand = VMArenaSpareLand(vmArena);
VMArenaUnmapSpareClosureStruct closure;
if (filter != NULL)
AVERT(Chunk, filter);
/* Start by looking at the oldest page on the spare ring, to try to
get some LRU behaviour from the spare pages cache. */
/* RING_FOR won't work here, because chunkUnmapAroundPage deletes many
entries from the spareRing, often including the "next" entry. However,
it doesn't delete entries from other chunks, so we can use them to step
around the ring. */
node = &vmArena->spareRing;
while (RingNext(node) != &vmArena->spareRing && purged < size) {
Ring next = RingNext(node);
Page page = PageOfSpareRing(next);
Chunk chunk = NULL; /* suppress uninit warning */
Bool b;
/* Use the fact that the page table resides in the chunk to find the
chunk that owns the page. */
b = ChunkOfAddr(&chunk, arena, (Addr)page);
AVER(b);
if (filter == NULL || chunk == filter) {
purged += chunkUnmapAroundPage(chunk, size - purged, page);
/* chunkUnmapAroundPage must delete the page it's passed from the ring,
or we can't make progress and there will be an infinite loop */
AVER(RingNext(node) != next);
} else
node = next;
}
closure.arena = arena;
closure.size = size;
closure.filter = filter;
closure.unmapped = 0;
(void)LandIterateAndDelete(spareLand, vmArenaUnmapSpareRange, &closure);
return purged;
AVER(LandSize(spareLand) == arena->spareCommitted);
return closure.unmapped;
}
static Size VMPurgeSpare(Arena arena, Size size)
{
return arenaUnmapSpare(arena, size, NULL);
}
/* chunkUnmapSpare -- unmap all spare pages in a chunk */
static void chunkUnmapSpare(Chunk chunk)
{
AVERT(Chunk, chunk);
(void)arenaUnmapSpare(ChunkArena(chunk), ChunkSize(chunk), chunk);
return vmArenaUnmapSpare(arena, size, NULL);
}
@ -1066,17 +1116,21 @@ static void VMFree(Addr base, Size size, Pool pool)
{
Arena arena;
VMArena vmArena;
Land spareLand;
Chunk chunk = NULL; /* suppress "may be used uninitialized" */
Count pages;
Index pi, piBase, piLimit;
Bool foundChunk;
Size spareCommitted;
RangeStruct range, containingRange;
Res res;
AVER(base != NULL);
AVER(size > (Size)0);
AVERT(Pool, pool);
arena = PoolArena(pool);
vmArena = MustBeA(VMArena, arena);
spareLand = VMArenaSpareLand(vmArena);
/* All chunks have same pageSize. */
AVER(SizeIsAligned(size, ChunkPageSize(arena->primary)));
@ -1092,24 +1146,44 @@ static void VMFree(Addr base, Size size, Pool pool)
AVER(piBase < piLimit);
AVER(piLimit <= chunk->pages);
/* loop from pageBase to pageLimit-1 inclusive */
/* Finish each Tract found, then convert them to spare pages. */
/* Finish each Tract in the region. */
for(pi = piBase; pi < piLimit; ++pi) {
Page page = ChunkPage(chunk, pi);
Tract tract = PageTract(page);
AVER(TractPool(tract) == pool);
TractFinish(tract);
PageSetPool(page, NULL);
PageSetType(page, PageStateSPARE);
/* We must init the page's rings because it is a union with the
tract and will contain junk. */
RingInit(PageSpareRing(page));
RingAppend(&vmArena->spareRing, PageSpareRing(page));
}
arena->spareCommitted += ChunkPagesToSize(chunk, piLimit - piBase);
BTResRange(chunk->allocTable, piBase, piLimit);
/* Freed range is now spare memory, so add it to spare memory land. */
RangeInitSize(&range, base, size);
res = LandInsert(&containingRange, spareLand, &range);
if (res != ResOK) {
/* The freed range could not be inserted into the spare memory
land because the block pool is full. Allocate the first grain
of the freed range and use it to extend the block pool. */
Addr extendLimit = AddrAdd(base, ArenaGrainSize(arena));
res = ArenaFreeLandDelete(arena, base, extendLimit);
if (res != ResOK) {
/* Give up and unmap the memory immediately. */
chunkUnmapRange(chunk, RangeBase(&range), RangeLimit(&range));
return;
}
PageAlloc(chunk, INDEX_OF_ADDR(chunk, base), VMArenaCBSBlockPool(vmArena));
MFSExtend(VMArenaCBSBlockPool(vmArena), base, extendLimit);
/* Adjust the freed range and try again. This time the insertion
must succeed since we just extended the block pool. */
RangeSetBase(&range, extendLimit);
AVERT(Range, &range);
if (!RangeIsEmpty(&range)) {
res = LandInsert(&containingRange, spareLand, &range);
AVER(res == ResOK);
}
}
arena->spareCommitted += RangeSize(&range);
/* Consider returning memory to the OS. */
/* Purging spare memory can cause page descriptors to be unmapped,
causing ArenaCommitted to fall, so we can't be sure to unmap

View file

@ -473,7 +473,7 @@ extern Res ArenaDescribe(Arena arena, mps_lib_FILE *stream, Count depth);
extern Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream, Count depth);
extern Bool ArenaAccess(Addr addr, AccessSet mode, MutatorContext context);
extern Res ArenaFreeLandInsert(Arena arena, Addr base, Addr limit);
extern void ArenaFreeLandDelete(Arena arena, Addr base, Addr limit);
extern Res ArenaFreeLandDelete(Arena arena, Addr base, Addr limit);
extern Bool GlobalsCheck(Globals arena);
extern Res GlobalsInit(Globals arena);

View file

@ -60,7 +60,7 @@ void TractInit(Tract tract, Pool pool, Addr base)
AVER_CRITICAL(tract != NULL);
AVERT_CRITICAL(Pool, pool);
tract->pool.pool = pool;
tract->pool = pool;
tract->base = base;
tract->seg = NULL;
@ -77,7 +77,7 @@ void TractFinish(Tract tract)
/* Check that there's no segment - and hence no shielding. */
AVER(!TractHasSeg(tract));
tract->pool.pool = NULL;
tract->pool = NULL;
}
@ -258,10 +258,14 @@ void ChunkFinish(Chunk chunk)
AVER(BTIsResRange(chunk->allocTable, 0, chunk->pages));
arena = ChunkArena(chunk);
if (arena->hasFreeLand)
ArenaFreeLandDelete(arena,
PageIndexBase(chunk, chunk->allocBase),
chunk->limit);
if (arena->hasFreeLand) {
Res res = ArenaFreeLandDelete(arena,
PageIndexBase(chunk, chunk->allocBase),
chunk->limit);
/* Can't fail because the range can't split because we passed the
whole chunk and chunks never coalesce. */
AVER(res == ResOK);
}
ArenaChunkRemoved(arena, chunk);
@ -477,9 +481,7 @@ void PageInit(Chunk chunk, Index pi)
page = ChunkPage(chunk, pi);
BTRes(chunk->allocTable, pi);
PageSetPool(page, NULL);
PageSetType(page, PageStateFREE);
RingInit(PageSpareRing(page));
page->pool = NULL;
}

View file

@ -14,26 +14,6 @@
#include "tree.h"
/* Page states
*
* .states: The first word of the page descriptor contains a pointer to
* the page's owning pool if the page is allocated. The bottom two bits
* indicate the page state. Note that the page descriptor itself may
* not be mapped since it is stored in a SparseArray.
*/
#define PageStateALLOC 0 /* allocated to a pool as a tract */
#define PageStateSPARE 1 /* free but mapped to backing store */
#define PageStateFREE 2 /* free and unmapped (address space only) */
#define PageStateWIDTH 2 /* bitfield width */
typedef union PagePoolUnion {
unsigned state : PageStateWIDTH; /* see .states */
Pool pool;
} PagePoolUnion;
/* TractStruct -- tract structure
*
* .tract: Tracts represent the grains of memory allocation from
@ -41,7 +21,7 @@ typedef union PagePoolUnion {
*/
typedef struct TractStruct { /* Tract structure */
PagePoolUnion pool; /* MUST BE FIRST <design/arena#.tract.field.pool> */
Pool pool; /* MUST BE FIRST <design/arena#.tract.field.pool> */
Seg seg; /* NULL or segment containing tract */
Addr base; /* Base address of the tract */
} TractStruct;
@ -51,11 +31,10 @@ extern Addr (TractBase)(Tract tract);
#define TractBase(tract) ((tract)->base)
extern Addr TractLimit(Tract tract, Arena arena);
#define TractHasPool(tract) \
((tract)->pool.state == PageStateALLOC && TractPool(tract))
#define TractPool(tract) ((tract)->pool.pool)
#define TractHasPool(tract) (TractPool(tract) != NULL)
#define TractPool(tract) RVALUE((tract)->pool)
#define TractHasSeg(tract) ((tract)->seg != NULL)
#define TractSeg(tract) ((tract)->seg)
#define TractSeg(tract) RVALUE((tract)->seg)
extern Bool TractCheck(Tract tract);
extern void TractInit(Tract tract, Pool pool, Addr base);
@ -86,39 +65,16 @@ extern void TractFinish(Tract tract);
* field of this union. <design/arena#.tract.field.pool>.
*/
typedef struct PageSpareStruct {
PagePoolUnion pool; /* spare tract, pool.state == PoolStateSPARE */
RingStruct spareRing; /* link in arena spare ring, LRU order */
} PageSpareStruct;
typedef union PageUnion { /* page structure */
PagePoolUnion pool; /* pool.state is the discriminator */
TractStruct alloc; /* allocated tract, pool.state == PoolStateALLOC */
PageSpareStruct spare; /* spare page, pool.state == PoolStateSPARE */
Pool pool; /* discriminator */
TractStruct alloc; /* allocated tract, pool != NULL */
} PageUnion;
#define PageTract(page) (&(page)->alloc)
#define PageOfTract(tract) PARENT(PageUnion, alloc, tract)
#define PagePool(page) RVALUE((page)->pool.pool)
#define PageIsAllocated(page) RVALUE(PagePool(page) != NULL)
#define PageState(page) RVALUE((page)->pool.state)
#define PageSpareRing(page) (&(page)->spare.spareRing)
#define PageOfSpareRing(node) PARENT(PageUnion, spare, RING_ELT(PageSpare, spareRing, node))
#define PageSetPool(page, _pool) \
BEGIN \
Page _page = (page); \
_page->pool.pool = (_pool); \
AVER(PageState(_page) == PageStateALLOC); \
END
#define PageSetType(page, _state) \
BEGIN \
Page _page = (page); \
AVER(PagePool(_page) == NULL); \
_page->pool.state = (_state); \
END
#define PagePool(page) RVALUE((page)->pool)
#define PageIsAllocated(page) (PagePool(page) != NULL)
/* Chunks */

View file

@ -275,13 +275,12 @@ implementation detail, not a requirement.
_`.tract.structure`: The tract structure definition looks like this::
typedef struct TractStruct { /* Tract structure */
PagePoolUnion pool; /* MUST BE FIRST (design.mps.arena.tract.field.pool) */
void *p; /* pointer for use of owning pool */
Addr base; /* Base address of the tract */
BOOLFIELD(hasSeg); /* does tract have a seg in p? */
Pool pool; /* MUST BE FIRST <design/arena#.tract.field.pool> */
Seg seg; /* NULL or segment containing tract */
Addr base; /* Base address of the tract */
} TractStruct;
_`.tract.field.pool`: The pool.pool field indicates to which pool the tract
_`.tract.field.pool`: The pool field indicates to which pool the tract
has been allocated (`.req.fun.trans.pool`_). Tracts are only valid
when they are allocated to pools. When tracts are not allocated to
pools, arena classes are free to reuse tract objects in undefined
@ -294,20 +293,9 @@ that the private representation can share a common prefix with
private representation whether such an object is allocated or not,
without requiring an extra field.
_`.tract.field.p`: The ``p`` field is used by pools to associate tracts
with other data (`.req.fun.trans.arbitrary`_). It's used by the
segment module to indicate which segment a tract belongs to. If a pool
doesn't use segments it may use the ``p`` field for its own purposes.
This field has the non-specific type ``(void *)`` so that pools can
use it for any purpose.
_`.tract.field.hasSeg`: The ``hasSeg`` bit-field is a Boolean which
indicates whether the ``p`` field is being used by the segment module.
If this field is ``TRUE``, then the value of ``p`` is a ``Seg``. See
design.mps.type.bool.bitfield_ for why this is declared using the
``BOOLFIELD`` macro.
.. _design.mps.type.bool.bitfield: type#.bool.bitfield
_`.tract.field.base`: The seg field is a pointer to the segment
containing the tract, or ``NULL`` if the tract is not contained in any
segment.
_`.tract.field.base`: The base field contains the base address of the
memory represented by the tract.

View file

@ -1,7 +1,7 @@
/*
TEST_HEADER
id = $Id$
summary = test arena extension and compaction
summary = grey-box test of arena extension and compaction
language = c
link = testlib.o
parameters = CHUNKSIZE=1024*1024 ITERATIONS=100
@ -44,7 +44,12 @@ static void test(void *stack_pointer)
for (i = ITERATIONS; i > 0; --i) {
mps_free(pool, block[i - 1], CHUNKSIZE);
mps_arena_collect(arena); /* ensure ArenaCompact is called */
check_chunks(arena, i);
/* The first chunk to be freed from the pool to the arena gets
* some of its memory stolen for the spare memory land's block
* pool, and this prevents the chunk from being destroyed.
* Subsequent chunks can be freed in their entirety because the
* spare memory land has enough blocks. */
check_chunks(arena, i + 1);
}
mps_pool_destroy(pool);