1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-01-30 04:10:54 -08:00
Copied from Perforce
 Change: 184354
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Richard Brooksby 2014-02-17 16:45:12 +00:00
parent d8cb7e9a01
commit 4fc8971802
8 changed files with 52 additions and 65 deletions

View file

@ -392,7 +392,7 @@ static Res chunkAlloc(Addr *baseReturn, Tract *baseTractReturn,
clChunk->freePages -= pages;
*baseReturn = PageIndexBase(chunk, baseIndex);
*baseTractReturn = PageTract(&chunk->pageTable[baseIndex]);
*baseTractReturn = PageTract(ChunkPage(chunk, baseIndex));
return ResOK;
}
@ -471,8 +471,7 @@ static void ClientFree(Addr base, Size size, Pool pool)
AVER(limitIndex <= chunk->pages);
for(pi = baseIndex; pi < limitIndex; pi++) {
Page page = &chunk->pageTable[pi];
Tract tract = PageTract(page);
Tract tract = PageTract(ChunkPage(chunk, pi));
AVER(TractPool(tract) == pool);
TractFinish(tract);

View file

@ -105,15 +105,15 @@ static Bool VMChunkCheck(VMChunk vmchunk)
CHECKL(VMCheck(vmchunk->vm));
CHECKL(VMAlign(vmchunk->vm) == ChunkPageSize(chunk));
CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable);
/* check pageTableMapped table */
/* FIXME: Check sa's tables */
CHECKD(SparseArray, &vmchunk->pages);
#if 0
CHECKL(vmchunk->pageTableMapped != NULL);
CHECKL((Addr)vmchunk->pageTableMapped >= chunk->base);
CHECKL(AddrAdd((Addr)vmchunk->pageTableMapped, BTSize(chunk->pageTablePages))
<= vmchunk->overheadMappedLimit);
#endif
/* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck
makes sure they're where they're expected to be (in the chunk). */
CHECKL(chunk->base < (Addr)vmchunk->pages.mapped);
CHECKL(AddrAdd(vmchunk->pages.mapped, BTSize(chunk->pages)) <=
vmchunk->overheadMappedLimit);
CHECKL(chunk->base < (Addr)vmchunk->pages.pages);
CHECKL(AddrAdd(vmchunk->pages.pages, BTSize(chunk->pageTablePages)) <=
vmchunk->overheadMappedLimit);
/* .improve.check-table: Could check the consistency of the tables. */
return TRUE;
@ -1039,7 +1039,7 @@ static unsigned pageState(VMChunk vmChunk, Index pi)
{
Chunk chunk = VMChunk2Chunk(vmChunk);
if (SparseArrayIsMapped(&vmChunk->pages, pi))
return PageState(&chunk->pageTable[pi]);
return PageState(ChunkPage(chunk, pi));
return PageStateFREE;
}
@ -1053,7 +1053,7 @@ static void sparePageRelease(VMChunk vmChunk, Index pi)
{
Chunk chunk = VMChunk2Chunk(vmChunk);
Arena arena = ChunkArena(chunk);
Page page = &chunk->pageTable[pi];
Page page = ChunkPage(chunk, pi);
AVER(PageState(page) == PageStateSPARE);
AVER(arena->spareCommitted >= ChunkPageSize(chunk));
@ -1214,7 +1214,7 @@ static Res vmAllocComm(Addr *baseReturn, Tract *baseTractReturn,
}
base = PageIndexBase(chunk, baseIndex);
baseTract = PageTract(&chunk->pageTable[baseIndex]);
baseTract = PageTract(ChunkPage(chunk, baseIndex));
limit = AddrAdd(base, size);
zones = ZoneSetOfRange(arena, base, limit);
@ -1410,7 +1410,7 @@ static void VMFree(Addr base, Size size, Pool pool)
/* loop from pageBase to pageLimit-1 inclusive */
/* Finish each Tract found, then convert them to spare pages. */
for(pi = piBase; pi < piLimit; ++pi) {
Page page = &chunk->pageTable[pi];
Page page = ChunkPage(chunk, pi);
Tract tract = PageTract(page);
AVER(TractPool(tract) == pool);

View file

@ -44,10 +44,11 @@ void SparseArrayFinish(SparseArray sa)
Bool SparseArrayCheck(SparseArray sa)
{
CHECKL(sa != NULL);
CHECKL(sa->sig == SparseArraySig);
CHECKS(SparseArray, sa);
CHECKL(sa->base != NULL);
CHECKL(sa->elementSize >= 1);
CHECKL(VMCheck(sa->vm)); /* TODO: CHECKD(VM, sa->vm); */
/* TODO: CHECKD(VM, sa->vm); once VMStruct becomes visible */
CHECKL(VMCheck(sa->vm));
CHECKL(sa->elementSize <= VMAlign(sa->vm));
CHECKL(sa->length > 0);
CHECKL(BTCheck(sa->mapped));

View file

@ -21,7 +21,7 @@
typedef struct SparseArrayStruct *SparseArray;
#define SparseArraySig ((Sig)0x5195BA66) /* SIGnature SParse ARRAy */
#define SparseArraySig ((Sig)0x5195BA66) /* SIGnature SParse ARRay */
typedef struct SparseArrayStruct {
Sig sig;

View file

@ -446,8 +446,7 @@ Bool TractOfAddr(Tract *tractReturn, Arena arena, Addr addr)
/* either the page is free or it is */
/* part of the arena tables (see .ullagepages). */
if (BTGet(chunk->allocTable, i)) {
Page page = &chunk->pageTable[i];
*tractReturn = PageTract(page);
*tractReturn = PageTract(ChunkPage(chunk, i));
return TRUE;
}
@ -494,15 +493,15 @@ static Bool tractSearchInChunk(Tract *tractReturn, Chunk chunk, Index i)
AVER_CRITICAL(chunk->allocBase <= i);
AVER_CRITICAL(i <= chunk->pages);
while(i < chunk->pages
&& !(BTGet(chunk->allocTable, i)
&& PageIsAllocated(&chunk->pageTable[i]))) {
while (i < chunk->pages
&& !(BTGet(chunk->allocTable, i)
&& PageIsAllocated(ChunkPage(chunk, i)))) {
++i;
}
if (i == chunk->pages)
return FALSE;
AVER(i < chunk->pages);
*tractReturn = PageTract(&chunk->pageTable[i]);
*tractReturn = PageTract(ChunkPage(chunk, i));
return TRUE;
}
@ -602,7 +601,7 @@ void PageAlloc(Chunk chunk, Index pi, Pool pool)
AVER(!BTGet(chunk->allocTable, pi));
AVERT(Pool, pool);
page = &chunk->pageTable[pi];
page = ChunkPage(chunk, pi);
tract = PageTract(page);
base = PageIndexBase(chunk, pi);
BTSet(chunk->allocTable, pi);
@ -614,15 +613,18 @@ void PageAlloc(Chunk chunk, Index pi, Pool pool)
void PageInit(Chunk chunk, Index pi)
{
Page page;
AVERT(Chunk, chunk);
AVER(pi < chunk->pages);
page = ChunkPage(chunk, pi);
BTRes(chunk->allocTable, pi);
PageSetPool(&chunk->pageTable[pi], NULL);
PageSetType(&chunk->pageTable[pi], PageStateFREE);
RingInit(PageSpareRing(&chunk->pageTable[pi]));
RingInit(PageFreeRing(&chunk->pageTable[pi]));
return;
PageSetPool(page, NULL);
PageSetType(page, PageStateFREE);
RingInit(PageSpareRing(page));
RingInit(PageFreeRing(page));
}

View file

@ -15,31 +15,16 @@
/* Page states
*
* .states: Pages (hence PageUnions that describe them) can be in
* one of 3 states:
* allocated (to a pool as tracts)
* allocated pages are mapped
* BTGet(allocTable, i) == 1
* PageState() == PageStateALLOC
* PagePool()->pool == pool
* spare
* these pages are mapped
* BTGet(allocTable, i) == 0
* PageState() == PageStateSPARE
* PagePool() == NULL
* free
* these pages are not mapped
* BTGet(allocTable, i) == 0
* PTE may itself be unmapped, but when it is (use pageTableMapped
* to determine whether page occupied by page table is mapped):
* PagePool() == NULL
* PageState() == PageStateFREE
* .states: The first word of the page descriptor contains a pointer to
* the page's owning pool if the page is allocated. The bottom two bits
* indicate the page state. Note that the page descriptor itself may
* not be mapped since it is stored in a SparseArray.
*/
#define PageStateALLOC 0
#define PageStateSPARE 1
#define PageStateFREE 2
#define PageStateWIDTH 2 /* bitfield width */
#define PageStateALLOC 0 /* allocated to a pool as a tract */
#define PageStateSPARE 1 /* free but mapped to backing store */
#define PageStateFREE 2 /* free and unmapped (address space only) */
#define PageStateWIDTH 2 /* bitfield width */
typedef union PagePoolUnion {
unsigned state : PageStateWIDTH; /* see .states */
@ -168,11 +153,12 @@ typedef struct ChunkStruct {
} ChunkStruct;
#define ChunkArena(chunk) ((chunk)->arena)
#define ChunkPageSize(chunk) ((chunk)->pageSize)
#define ChunkPageShift(chunk) ((chunk)->pageShift)
#define ChunkArena(chunk) RVALUE((chunk)->arena)
#define ChunkPageSize(chunk) RVALUE((chunk)->pageSize)
#define ChunkPageShift(chunk) RVALUE((chunk)->pageShift)
#define ChunkPagesToSize(chunk, pages) ((Size)(pages) << (chunk)->pageShift)
#define ChunkSizeToPages(chunk, size) ((Count)((size) >> (chunk)->pageShift))
#define ChunkPage(chunk, pi) (&(chunk)->pageTable[pi])
extern Bool ChunkCheck(Chunk chunk);
extern Res ChunkInit(Chunk chunk, Arena arena,

View file

@ -233,14 +233,14 @@ associating their own data with each allocation grain.
_`.tract.structure`: The tract structure definition looks like this::
typedef struct TractStruct { /* Tract structure */
Pool pool; /* MUST BE FIRST (design.mps.arena.tract.field.pool) */
PagePoolUnion pool; /* MUST BE FIRST (design.mps.arena.tract.field.pool) */
void *p; /* pointer for use of owning pool */
Addr base; /* Base address of the tract */
TraceSet white : TRACE_MAX; /* traces for which tract is white */
unsigned int hasSeg : 1; /* does tract have a seg in p? */
} TractStruct;
_`.tract.field.pool`: The pool field indicates to which pool the tract
_`.tract.field.pool`: The pool.pool field indicates to which pool the tract
has been allocated (`.req.fun.trans.pool`_). Tracts are only valid
when they are allocated to pools. When tracts are not allocated to
pools, arena classes are free to reuse tract objects in undefined
@ -546,6 +546,8 @@ Document History
- 2013-03-11 GDR_ Converted to reStructuredText.
- 2014-02-17 RB_ Updated first field of tract structure.
.. _RB: http://www.ravenbrook.com/consultants/rb/
.. _GDR: http://www.ravenbrook.com/consultants/gdr/

View file

@ -167,13 +167,7 @@ the base address in the VM. Namely:
- base address to index: ``index = (base-address - arena-base) / page-size``
_`.table.page.partial`: The table is partially mapped on an
"as-needed" basis. The function ``unusedTablePages()`` identifies
entirely unused pages occupied by the page table itself (that is,
those pages of the page table which are occupied by ``PageStruct``
objects which all describe free pages). Tract allocation and freeing
use this function to map and unmap the page table with no hysteresis.
(There is restriction on the parameters you may pass to
``unusedTablePages()``.)
"as-needed" basis using the SparseArray abstract type.
_`.table.page.tract`: Each page table entry contains a tract, which is
only valid if it is allocated to a pool. If it is not allocated to a
@ -215,6 +209,9 @@ Document History
- 2013-05-24 GDR_ Converted to reStructuredText.
- 2014-02-17 RB_ Updated to note use of SparseArray rather than direct
management of page table mapping.
.. _RB: http://www.ravenbrook.com/consultants/rb/
.. _GDR: http://www.ravenbrook.com/consultants/gdr/