1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-03-24 07:41:54 -07:00

Abstracting partially mapped page tables into a sparsearray abstract datatype, removing a great deal of complexity from the vm arena, and some unnecessary double-initialisation and scanning loops during allocation.

Copied from Perforce
 Change: 184333
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Richard Brooksby 2014-02-12 17:32:20 +00:00
parent b4c03d0ef1
commit 4ce753ec45
7 changed files with 402 additions and 323 deletions

View file

@ -145,7 +145,9 @@ failBootInit:
static Res ClientChunkInit(Chunk chunk, BootBlock boot)
{
Res res;
ClientChunk clChunk;
void *p;
/* chunk is supposed to be uninitialized, so don't check it. */
clChunk = Chunk2ClientChunk(chunk);
@ -154,6 +156,13 @@ static Res ClientChunkInit(Chunk chunk, BootBlock boot)
clChunk->freePages = chunk->pages; /* too large @@@@ */
/* Put the page table as late as possible, as in VM systems we don't want */
/* to map it. */
res = BootAlloc(&p, boot, chunk->pageTablePages << chunk->pageShift, chunk->pageSize);
if (res != ResOK)
return res;
chunk->pageTable = p;
return ResOK;
}

View file

@ -24,6 +24,7 @@
#include "boot.h"
#include "tract.h"
#include "bt.h"
#include "sa.h"
#include "mpm.h"
#include "mpsavm.h"
@ -45,8 +46,8 @@ typedef struct VMChunkStruct *VMChunk;
typedef struct VMChunkStruct {
ChunkStruct chunkStruct; /* generic chunk */
VM vm; /* virtual memory handle */
Addr overheadMappedLimit; /* limit of pages mapped for overhead */
BT pageTableMapped; /* indicates mapped state of page table */
Addr overheadMappedLimit; /* limit of pages mapped for overhead */
SparseArrayStruct pages; /* to manage backing store of page table */
Sig sig; /* <design/sig/> */
} VMChunkStruct;
@ -112,10 +113,14 @@ static Bool VMChunkCheck(VMChunk vmchunk)
CHECKL(VMAlign(vmchunk->vm) == ChunkPageSize(chunk));
CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable);
/* check pageTableMapped table */
/* FIXME: Check sa's tables */
CHECKD(SparseArray, &vmchunk->pages);
#if 0
CHECKL(vmchunk->pageTableMapped != NULL);
CHECKL((Addr)vmchunk->pageTableMapped >= chunk->base);
CHECKL(AddrAdd((Addr)vmchunk->pageTableMapped, BTSize(chunk->pageTablePages))
<= vmchunk->overheadMappedLimit);
#endif
/* .improve.check-table: Could check the consistency of the tables. */
return TRUE;
@ -367,24 +372,36 @@ failVMCreate:
static Res VMChunkInit(Chunk chunk, BootBlock boot)
{
size_t btSize;
VMChunk vmChunk;
Addr overheadLimit;
void *p;
Res res;
BT saMapped, saPages;
/* chunk is supposed to be uninitialized, so don't check it. */
vmChunk = Chunk2VMChunk(chunk);
AVERT(BootBlock, boot);
btSize = (size_t)BTSize(chunk->pageTablePages);
res = BootAlloc(&p, boot, btSize, MPS_PF_ALIGN);
res = BootAlloc(&p, boot, BTSize(chunk->pages), MPS_PF_ALIGN);
if (res != ResOK)
goto failPageTableMapped;
vmChunk->pageTableMapped = p;
goto failSaMapped;
saMapped = p;
res = BootAlloc(&p, boot, BTSize(chunk->pageTablePages), MPS_PF_ALIGN);
if (res != ResOK)
goto failSaPages;
saPages = p;
overheadLimit = AddrAdd(chunk->base, (Size)BootAllocated(boot));
/* Put the page table as late as possible, as in VM systems we don't want */
/* to map it. */
res = BootAlloc(&p, boot, chunk->pageTablePages << chunk->pageShift, chunk->pageSize);
if (res != ResOK)
goto failAllocPageTable;
chunk->pageTable = p;
/* Actually commit all the tables. <design/arenavm/>.@@@@ */
overheadLimit = AddrAdd(chunk->base, (Size)BootAllocated(boot));
if (vmChunk->overheadMappedLimit < overheadLimit) {
overheadLimit = AddrAlignUp(overheadLimit, ChunkPageSize(chunk));
res = vmArenaMap(VMChunkVMArena(vmChunk), vmChunk->vm,
@ -394,13 +411,19 @@ static Res VMChunkInit(Chunk chunk, BootBlock boot)
vmChunk->overheadMappedLimit = overheadLimit;
}
BTResRange(vmChunk->pageTableMapped, 0, chunk->pageTablePages);
SparseArrayInit(&vmChunk->pages,
chunk->pageTable,
sizeof(PageUnion),
chunk->pages,
saMapped, saPages, vmChunk->vm);
return ResOK;
/* .no-clean: No clean-ups needed for boot, as we will discard the chunk. */
failTableMap:
failPageTableMapped:
failSaPages:
failAllocPageTable:
failSaMapped:
return res;
}
@ -418,10 +441,8 @@ static void vmChunkDestroy(Chunk chunk)
chunkUnmapSpare(chunk);
/* This check will also ensure that there are no non-free pages in the
chunk, because those pages would require mapped page table entries. */
AVER(BTIsResRange(vmChunk->pageTableMapped, 0, chunk->pageTablePages));
SparseArrayFinish(&vmChunk->pages);
vmChunk->sig = SigInvalid;
vm = vmChunk->vm;
ChunkFinish(chunk);
@ -679,153 +700,6 @@ static Size VMArenaReserved(Arena arena)
}
/* Page Table Partial Mapping
*
* Some helper functions
*/
/* tablePageBaseIndex -- index of the first page descriptor falling
* (at least partially) on this table page
*
* .repr.table-page: Table pages are passed as the page's base address.
*
* .division: We calculate it by dividing the offset from the beginning
* of the page table by the size of a table element. This relies on
* .vm.addr-is-star.
*/
#define tablePageBaseIndex(chunk, tablePage) \
(AddrOffset((Addr)(chunk)->pageTable, (tablePage)) \
/ sizeof(PageUnion))
/* tablePageWholeBaseIndex
*
* Index of the first page descriptor wholly on this table page.
* Table page specified by address (not index).
*/
#define tablePageWholeBaseIndex(chunk, tablePage) \
(AddrOffset((Addr)(chunk)->pageTable, \
AddrAdd((tablePage), sizeof(PageUnion)-1)) \
/ sizeof(PageUnion))
/* tablePageLimitIndex -- index of the first page descriptor falling
* (wholly) on the next table page
*
* Similar to tablePageBaseIndex, see .repr.table-page and .division.
*/
#define tablePageLimitIndex(chunk, tablePage) \
((AddrOffset((Addr)(chunk)->pageTable, (tablePage)) \
+ ChunkPageSize(chunk) - 1) \
/ sizeof(PageUnion) \
+ 1)
/* tablePageWholeLimitIndex
*
* Index of the first page descriptor falling partially on the next
* table page.
*/
#define tablePageWholeLimitIndex(chunk, tablePage) \
((AddrOffset((Addr)(chunk)->pageTable, (tablePage)) \
+ ChunkPageSize(chunk)) \
/ sizeof(PageUnion))
/* tablePagesUsed
*
* Takes a range of pages identified by [pageBase, pageLimit), and
* returns the pages occupied by the page table which store the
* PageUnion descriptors for those pages.
*/
static void tablePagesUsed(Index *tableBaseReturn, Index *tableLimitReturn,
Chunk chunk, Index pageBase, Index pageLimit)
{
*tableBaseReturn =
PageTablePageIndex(chunk,
AddrPageBase(chunk, addrOfPageDesc(chunk, pageBase)));
*tableLimitReturn =
PageTablePageIndex(chunk,
AddrAlignUp(addrOfPageDesc(chunk, pageLimit),
ChunkPageSize(chunk)));
return;
}
/* tablePagesEnsureMapped -- ensure needed part of page table is mapped
*
* Pages from baseIndex to limitIndex are about to be allocated.
* Ensure that the relevant pages occupied by the page table are mapped.
*/
static Res tablePagesEnsureMapped(VMChunk vmChunk,
Index baseIndex, Index limitIndex)
{
/* tableBaseIndex, tableLimitIndex, tableCursorIndex, */
/* unmappedBase, unmappedLimit are all indexes of pages occupied */
/* by the page table. */
Index tableBaseIndex, tableLimitIndex;
Index tableCursorIndex;
Index unmappedBaseIndex, unmappedLimitIndex;
Index i;
Chunk chunk;
Res res;
chunk = VMChunk2Chunk(vmChunk);
tablePagesUsed(&tableBaseIndex, &tableLimitIndex,
chunk, baseIndex, limitIndex);
tableCursorIndex = tableBaseIndex;
while(BTFindLongResRange(&unmappedBaseIndex, &unmappedLimitIndex,
vmChunk->pageTableMapped,
tableCursorIndex, tableLimitIndex,
1)) {
Addr unmappedBase = TablePageIndexBase(chunk, unmappedBaseIndex);
Addr unmappedLimit = TablePageIndexBase(chunk, unmappedLimitIndex);
/* There might be a page descriptor overlapping the beginning */
/* of the range of table pages we are about to map. */
/* We need to work out whether we should touch it. */
if (unmappedBaseIndex == tableBaseIndex
&& unmappedBaseIndex > 0
&& !BTGet(vmChunk->pageTableMapped, unmappedBaseIndex - 1)) {
/* Start with first descriptor wholly on page */
baseIndex = tablePageWholeBaseIndex(chunk, unmappedBase);
} else {
/* start with first descriptor partially on page */
baseIndex = tablePageBaseIndex(chunk, unmappedBase);
}
/* Similarly for the potentially overlapping page descriptor */
/* at the end. */
if (unmappedLimitIndex == tableLimitIndex
&& unmappedLimitIndex < chunk->pageTablePages
&& !BTGet(vmChunk->pageTableMapped, unmappedLimitIndex)) {
/* Finish with last descriptor wholly on page */
limitIndex = tablePageBaseIndex(chunk, unmappedLimit);
} else if (unmappedLimitIndex == chunk->pageTablePages) {
/* Finish with last descriptor in chunk */
limitIndex = chunk->pages;
} else {
/* Finish with last descriptor partially on page */
limitIndex = tablePageWholeBaseIndex(chunk, unmappedLimit);
}
res = vmArenaMap(VMChunkVMArena(vmChunk),
vmChunk->vm, unmappedBase, unmappedLimit);
if (res != ResOK)
return res;
BTSetRange(vmChunk->pageTableMapped, unmappedBaseIndex, unmappedLimitIndex);
for(i = baseIndex; i < limitIndex; ++i) {
PageInit(chunk, i);
}
tableCursorIndex = unmappedLimitIndex;
if (tableCursorIndex == tableLimitIndex)
break;
}
return ResOK;
}
/* pagesFindFreeInArea -- find a range of free pages in a given address range
*
* Search for a free run of pages in the free table, between the given
@ -1210,31 +1084,6 @@ static Res VMNZAllocPolicy(Index *baseIndexReturn, VMChunk *chunkReturn,
}
/* pageDescIsMapped -- is the page descriptor for a page mapped? */
static Bool pageDescIsMapped(VMChunk vmChunk, Index pi)
{
Index pageTableBaseIndex;
Index pageTableLimitIndex;
Chunk chunk = VMChunk2Chunk(vmChunk);
AVER(pi < chunk->pages);
/* Note that unless the pi'th PageUnion crosses a page boundary */
/* Base and Limit will differ by exactly 1. */
/* They will differ by at most 2 assuming that */
/* sizeof(PageUnion) <= ChunkPageSize(chunk) (!) */
tablePagesUsed(&pageTableBaseIndex, &pageTableLimitIndex, chunk, pi, pi+1);
/* using unsigned arithmetic overflow to use just one comparison */
AVER(pageTableLimitIndex - pageTableBaseIndex - 1 < 2);
/* We can examine the page descriptor iff both table pages */
/* are mapped. */
return BTGet(vmChunk->pageTableMapped, pageTableBaseIndex) &&
BTGet(vmChunk->pageTableMapped, pageTableLimitIndex - 1);
}
/* pageState -- determine page state, even if unmapped
*
* Parts of the page table may be unmapped if their corresponding pages are
@ -1244,7 +1093,7 @@ static Bool pageDescIsMapped(VMChunk vmChunk, Index pi)
static unsigned pageState(VMChunk vmChunk, Index pi)
{
Chunk chunk = VMChunk2Chunk(vmChunk);
if (pageDescIsMapped(vmChunk, pi))
if (SparseArrayIsMapped(&vmChunk->pages, pi))
return PageState(&chunk->pageTable[pi]);
return PageStateFREE;
}
@ -1270,150 +1119,81 @@ static void sparePageRelease(VMChunk vmChunk, Index pi)
}
/* tablePagesUnmap -- unmap page table pages describing a page range
*
* The pages in the range [basePI, limitPI) have been freed, and this
* function then attempts to unmap the corresponding part of the page
* table. This may not be possible because other parts of those pages may
* be in use. This function extends the range as far as possible across
* free pages, so that such cases will be cleaned up eventually.
*
* This code corresponds to tablePagesEnsureMapped, but is defensive, and
* not constructed in the same way. We expect only to find one extra
* page table page at the top and bottom of the range that we could unmap,
* because previous unmappings should have cleaned up, but if we find more
* then this function cleans them up too.
*/
static void tablePagesUnmap(VMChunk vmChunk, Index basePI, Index limitPI)
static Res pageDescMap(VMChunk vmChunk, Index basePI, Index limitPI)
{
Addr base, limit;
Chunk chunk;
chunk = VMChunk2Chunk(vmChunk);
AVER(basePI < chunk->pages);
AVER(limitPI <= chunk->pages);
AVER(basePI < limitPI);
Size before = VMMapped(vmChunk->vm);
Arena arena = VMArena2Arena(VMChunkVMArena(vmChunk));
Res res = SparseArrayMap(&vmChunk->pages, basePI, limitPI);
arena->committed += VMMapped(vmChunk->vm) - before;
return res;
}
/* Now attempt to unmap the part of the page table that's no longer
in use because we've made a run of pages free. This scan will
also catch any adjacent unused pages, though they ought to have
been caught by previous scans. */
/* Lower basePI until we reach a desciptor we can't unmap, or the
beginning of the table. We scan right down to page zero even
though allocations start at chunk->allocBase so that the first table
page can be unmapped. */
AVER(pageState(vmChunk, basePI) == PageStateFREE);
while (basePI > 0) {
Bool mapped = pageDescIsMapped(vmChunk, basePI - 1);
if (mapped && PageState(&chunk->pageTable[basePI - 1]) != PageStateFREE)
break;
--basePI;
if (!mapped)
break;
}
AVER(pageState(vmChunk, basePI) == PageStateFREE);
/* Calculate the base of the range we can unmap. */
base = AddrAlignUp(addrOfPageDesc(chunk, basePI), ChunkPageSize(chunk));
/* Raise limitPI until we reach a descriptor we can't unmap, or the end
of the table. */
AVER(pageState(vmChunk, limitPI - 1) == PageStateFREE);
while (limitPI < chunk->pages) {
Bool mapped = pageDescIsMapped(vmChunk, limitPI);
if (mapped && PageState(&chunk->pageTable[limitPI]) != PageStateFREE)
break;
++limitPI;
if (!mapped)
break;
}
AVER(pageState(vmChunk, limitPI - 1) == PageStateFREE);
/* Calculate the limit of the range we can unmap. */
if (limitPI < chunk->pages)
limit = AddrAlignDown(addrOfPageDesc(chunk, limitPI), ChunkPageSize(chunk));
else
limit = AddrAlignUp(addrOfPageDesc(chunk, limitPI), ChunkPageSize(chunk));
/* Base and limit may be equal or out of order, if there were few
descriptors in the range. In that case, we can't unmap anything. */
if (base < limit) {
vmArenaUnmap(VMChunkVMArena(vmChunk), vmChunk->vm, base, limit);
BTResRange(vmChunk->pageTableMapped,
PageTablePageIndex(chunk, base),
PageTablePageIndex(chunk, limit));
}
static void pageDescUnmap(VMChunk vmChunk, Index basePI, Index limitPI)
{
Size before = VMMapped(vmChunk->vm);
Arena arena = VMArena2Arena(VMChunkVMArena(vmChunk));
SparseArrayUnmap(&vmChunk->pages, basePI, limitPI);
arena->committed += VMMapped(vmChunk->vm) - before;
}
/* pagesMarkAllocated -- Mark the pages allocated */
static Res pagesMarkAllocated(VMArena vmArena, VMChunk vmChunk,
Index baseIndex, Count pages, Pool pool)
Index basePI, Count pages, Pool pool)
{
Index i, mappedLimit, limitIndex;
Index cursor, i, j, k;
Index limitPI;
Chunk chunk = VMChunk2Chunk(vmChunk);
Res res;
limitPI = basePI + pages;
AVER(limitPI <= chunk->pages);
/* Ensure that the page descriptors we need are on mapped pages. */
limitIndex = baseIndex + pages;
AVER(limitIndex <= chunk->pages);
res = tablePagesEnsureMapped(vmChunk, baseIndex, limitIndex);
if (res != ResOK)
goto failTableMap;
/* NOTE: We could find a reset bit range in vmChunk->pages.pages in order
to skip across hundreds of pages at once. That could speed up really
big block allocations (hundreds of pages long). */
/* We're not expecting zero-sized allocations. */
AVER(baseIndex < limitIndex);
i = baseIndex;
mappedLimit = baseIndex;
while (i < limitIndex) {
Addr freeBase;
/* Allocate a run of spare pages. */
while(i < limitIndex && PageState(&chunk->pageTable[i]) == PageStateSPARE) {
cursor = basePI;
while (BTFindLongResRange(&j, &k, vmChunk->pages.mapped, cursor, limitPI, 1)) {
for (i = cursor; i < j; ++i) {
sparePageRelease(vmChunk, i);
PageAlloc(chunk, i, pool);
++i;
}
if (i >= limitIndex)
return ResOK;
/* Allocate a run of free pages. */
freeBase = PageIndexBase(chunk, i);
AVER(PageState(&chunk->pageTable[i]) == PageStateFREE);
while (i < limitIndex && PageState(&chunk->pageTable[i]) == PageStateFREE) {
PageAlloc(chunk, i, pool);
++i;
}
/* Map the memory for those free pages. */
res = vmArenaMap(vmArena, vmChunk->vm, freeBase, PageIndexBase(chunk, i));
res = pageDescMap(vmChunk, j, k);
if (res != ResOK)
goto failPagesMap;
mappedLimit = i;
goto failSAMap;
res = vmArenaMap(vmArena, vmChunk->vm,
PageIndexBase(chunk, j), PageIndexBase(chunk, k));
if (res != ResOK)
goto failVMMap;
for (i = j; i < k; ++i) {
PageInit(chunk, i);
PageAlloc(chunk, i, pool);
}
cursor = k;
if (cursor == limitPI)
return ResOK;
}
for (i = cursor; i < limitPI; ++i) {
sparePageRelease(vmChunk, i);
PageAlloc(chunk, i, pool);
}
return ResOK;
failPagesMap:
/* region from baseIndex to mappedLimit needs unmapping */
/* TODO: Consider making them spare instead, then purging. */
if (baseIndex < mappedLimit) {
failVMMap:
pageDescUnmap(vmChunk, j, k);
failSAMap:
/* region from basePI to j needs deallocating */
/* TODO: Consider making pages spare instead, then purging. */
if (basePI < j) {
vmArenaUnmap(vmArena, vmChunk->vm,
PageIndexBase(chunk, baseIndex),
PageIndexBase(chunk, mappedLimit));
PageIndexBase(chunk, basePI),
PageIndexBase(chunk, j));
for (i = basePI; i < j; ++i)
PageFree(chunk, i);
pageDescUnmap(vmChunk, basePI, j);
}
while (i > baseIndex) {
--i;
TractFinish(PageTract(&chunk->pageTable[i]));
PageFree(chunk, i);
}
tablePagesUnmap(vmChunk, baseIndex, limitIndex);
failTableMap:
return res;
}
@ -1569,7 +1349,6 @@ static Size chunkUnmapAroundPage(Chunk chunk, Size size, Page page)
do {
sparePageRelease(vmChunk, limitPI);
PageInit(chunk, limitPI);
++limitPI;
purged += pageSize;
} while (purged < size &&
@ -1580,7 +1359,6 @@ static Size chunkUnmapAroundPage(Chunk chunk, Size size, Page page)
pageState(vmChunk, basePI - 1) == PageStateSPARE) {
--basePI;
sparePageRelease(vmChunk, basePI);
PageInit(chunk, basePI);
purged += pageSize;
}
@ -1589,7 +1367,7 @@ static Size chunkUnmapAroundPage(Chunk chunk, Size size, Page page)
PageIndexBase(chunk, basePI),
PageIndexBase(chunk, limitPI));
tablePagesUnmap(vmChunk, basePI, limitPI);
pageDescUnmap(vmChunk, basePI, limitPI);
return purged;
}

View file

@ -73,6 +73,7 @@
#include "abq.c"
#include "range.c"
#include "freelist.c"
#include "sa.c"
/* Additional pool classes */

View file

@ -1160,6 +1160,8 @@
3104B02F156D39F2000A585A /* amssshe.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = amssshe.c; sourceTree = "<group>"; };
3104B03D156D3AD7000A585A /* segsmss */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = segsmss; sourceTree = BUILT_PRODUCTS_DIR; };
3107DC4E173B03D100F705C8 /* arg.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = arg.h; sourceTree = "<group>"; };
3112ED3A18ABC57F00CC531A /* sa.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sa.h; sourceTree = "<group>"; };
3112ED3B18ABC75200CC531A /* sa.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = sa.c; sourceTree = "<group>"; };
3114A590156E913C001E0AA3 /* locv */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = locv; sourceTree = BUILT_PRODUCTS_DIR; };
3114A5A1156E9168001E0AA3 /* locv.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = locv.c; sourceTree = "<group>"; };
3114A5A7156E92C0001E0AA3 /* qs */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = qs; sourceTree = BUILT_PRODUCTS_DIR; };
@ -1908,6 +1910,8 @@
311F2F7A17398B8E00C15B6A /* tract.h */,
31EEAC44156AB32500714D05 /* version.c */,
31EEAC0E156AB27B00714D05 /* walk.c */,
3112ED3A18ABC57F00CC531A /* sa.h */,
3112ED3B18ABC75200CC531A /* sa.c */,
);
name = "MPM Core";
sourceTree = "<group>";

208
mps/code/sa.c Normal file
View file

@ -0,0 +1,208 @@
/* sa.c: SPARSE ARRAY IMPLEMENTATION
*
* $Id$
* Copyright (c) 2014 Ravenbrook Limited. See end of file for license.
*/
#include "sa.h"
#include "mpmst.h"
static Index pagesLength(SparseArray sa)
{
return (sa->length * sa->elementSize + VMAlign(sa->vm) - 1) >> sa->shift;
}
void SparseArrayInit(SparseArray sa,
void *base, Size elementSize, Index length,
BT mapped, BT pages, VM vm)
{
AVER(sa != NULL);
sa->base = base;
sa->elementSize = elementSize;
sa->length = length;
sa->mapped = mapped;
sa->pages = pages;
sa->vm = vm;
AVER(SizeIsP2(VMAlign(vm)));
sa->shift = SizeLog2(VMAlign(vm));
BTResRange(mapped, 0, length);
BTResRange(pages, 0, pagesLength(sa));
sa->sig = SparseArraySig;
AVERT(SparseArray, sa);
}
void SparseArrayFinish(SparseArray sa)
{
AVERT(SparseArray, sa);
AVER(BTIsResRange(sa->mapped, 0, sa->length));
AVER(BTIsResRange(sa->pages, 0, pagesLength(sa)));
sa->sig = SigInvalid;
}
Bool SparseArrayCheck(SparseArray sa)
{
CHECKL(sa != NULL);
CHECKL(sa->sig == SparseArraySig);
CHECKL(sa->base != NULL);
CHECKL(sa->elementSize >= 1);
CHECKL(VMCheck(sa->vm)); /* TODO: CHECKD(VM, sa->vm); */
CHECKL(sa->elementSize <= VMAlign(sa->vm));
CHECKL(sa->length > 0);
CHECKL(BTCheck(sa->mapped));
CHECKL(BTCheck(sa->pages));
CHECKL(sa->shift == SizeLog2(VMAlign(sa->vm)));
return TRUE;
}
/* SparseArrayMap -- map memory for a range of elements in the array
*
* Ensures that the array elements in the unmapped range [baseEI, limitEI)
* have memory. The array elements may then be accessed, but their contents
* will be undefined.
*
* In the MPS we expect this to be called frequently when allocating in
* the arena, and so it's worth having the pages bit table to make this
* fast. Compare with SparseArrayUnmap.
*/
Res SparseArrayMap(SparseArray sa, Index baseEI, Index limitEI)
{
Index baseMI, limitMI;
AVERT(SparseArray, sa);
AVER(NONNEGATIVE(baseEI));
AVER(baseEI < limitEI);
AVER(limitEI <= sa->length);
AVER(BTIsResRange(sa->mapped, baseEI, limitEI));
/* Calculate the index of the page on which the base element resides.
If that's already mapped (because some other element below baseEI
is defined) bump up to the next page. */
baseMI = (baseEI * sa->elementSize) >> sa->shift;
if (BTGet(sa->pages, baseMI))
++baseMI;
/* Calculate the index of the page on which the last element resides.
If that's already mapped (because some other element not below
limitEI is defined) bump down to the previous page. */
limitMI = ((limitEI * sa->elementSize - 1) >> sa->shift) + 1;
if (BTGet(sa->pages, limitMI - 1))
--limitMI;
if (baseMI < limitMI) {
Addr base, limit;
Res res;
AVER(BTIsResRange(sa->pages, baseMI, limitMI));
base = AddrAdd(sa->base, baseMI << sa->shift);
limit = AddrAdd(sa->base, limitMI << sa->shift);
res = VMMap(sa->vm, base, limit);
if (res != ResOK)
return res;
BTSetRange(sa->pages, baseMI, limitMI);
}
BTSetRange(sa->mapped, baseEI, limitEI);
return ResOK;
}
/* SparseArrayUnmap -- unmap memory for a range of elements in the array
*
* Declare that the array elements in the range [baseEI, limitEI) can be
* unmapped. After this call they may not be accessed.
*
* In the MPS we expect this to be called infrequently when purging large
* numbers of spare pages at once, so scanning a range of bits to determine
* whether we can unmap isn't too bad.
*
* TODO: Consider keeping a count of the number of array elements defined
* on each page, rather than a bit table, then we can unmap pages with
* zero counts rather than scanning.
*/
void SparseArrayUnmap(SparseArray sa, Index baseEI, Index limitEI)
{
Index baseMI, limitMI, i;
AVERT(SparseArray, sa);
AVER(NONNEGATIVE(baseEI));
AVER(baseEI < limitEI);
AVER(limitEI <= sa->length);
AVER(BTIsSetRange(sa->mapped, baseEI, limitEI));
/* Calculate the index of the lowest element that might be occupying
the page on which the base element resides. If any elements between
there and baseMI are defined, we can't unmap that page, so bump up. */
baseMI = (baseEI * sa->elementSize) >> sa->shift;
i = SizeAlignDown(baseEI * sa->elementSize, VMAlign(sa->vm)) / sa->elementSize;
if (i < baseEI && !BTIsResRange(sa->mapped, i, baseEI))
++baseMI;
/* Calculate the index of the highest element that might be occupying
the page on which the last element resides. If any elements between
limitMI and there are defined, we can't unmap that page, so bump down. */
limitMI = ((limitEI * sa->elementSize - 1) >> sa->shift) + 1;
i = (SizeAlignUp(limitEI * sa->elementSize, VMAlign(sa->vm)) +
sa->elementSize - 1) / sa->elementSize;
if (i > sa->length)
i = sa->length;
if (i > limitEI && !BTIsResRange(sa->mapped, limitEI, i))
--limitMI;
if (baseMI < limitMI) {
Addr base, limit;
AVER(BTIsSetRange(sa->pages, baseMI, limitMI));
base = AddrAdd(sa->base, baseMI << sa->shift);
limit = AddrAdd(sa->base, limitMI << sa->shift);
VMUnmap(sa->vm, base, limit);
BTResRange(sa->pages, baseMI, limitMI);
}
BTResRange(sa->mapped, baseEI, limitEI);
}
/* C. COPYRIGHT AND LICENSE
*
* Copyright (C) 2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Redistributions in any form must be accompanied by information on how
* to obtain complete source code for this software and any accompanying
* software that uses this software. The source code must either be
* included in the distribution or be available for no more than the cost
* of distribution plus a nominal fee, and must be freely redistributable
* under reasonable conditions. For an executable file, complete source
* code means the source code for all modules it contains. It does not
* include source code for modules or files that typically accompany the
* major components of the operating system on which the executable file
* runs.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
* PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

89
mps/code/sa.h Normal file
View file

@ -0,0 +1,89 @@
/* sa.h: SPARSE ARRAY INTERFACE
*
* $Id$
* Copyright (c) 2014 Ravenbrook Limited. See end of file for license.
*
* A sparse array is an array whose storage is partially mapped from a VM.
* Each element in the array is its own "mapped" status, and may only
* be used if it is mapped.
*
* The main use of sparse arrays is partially mapped page tables in the
* VM arena, where they provide a fast lookup from an address within
* a chunk to a page descriptor, while avoiding mapping memory for
* page descriptors for unused areas of address space, such as unused
* zone stripes or gaps between those stripes.
*/
#ifndef sa_h
#define sa_h
#include "mpmtypes.h"
typedef struct SparseArrayStruct *SparseArray;
#define SparseArraySig ((Sig)0x5195BA66) /* SIGnature SParse ARRAy */
typedef struct SparseArrayStruct {
Sig sig;
void *base; /* base of array, page aligned */
Size elementSize; /* size of array elements, <= page size */
Index length; /* number of elements in the array */
BT mapped; /* whether elements exist in the array */
BT pages; /* whether underlying pages are mapped */
VM vm; /* where pages are mapped from */
Shift shift; /* SizeLog2(VMAlign(vm)) TODO: VMShift(vm) */
} SparseArrayStruct;
extern void SparseArrayInit(SparseArray sa,
void *base, Size elementSize, Index length,
BT defined, BT mapped, VM vm);
extern void SparseArrayFinish(SparseArray sa);
extern Bool SparseArrayCheck(SparseArray sa);
#define SparseArrayIsMapped(sa, i) BTGet((sa)->mapped, i)
extern Res SparseArrayMap(SparseArray sa, Index baseEI, Index limitEI);
extern void SparseArrayUnmap(SparseArray sa, Index baseEI, Index limitEI);
#endif /* sa_h */
/* C. COPYRIGHT AND LICENSE
*
* Copyright (C) 2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Redistributions in any form must be accompanied by information on how
* to obtain complete source code for this software and any accompanying
* software that uses this software. The source code must either be
* included in the distribution or be available for no more than the cost
* of distribution plus a nominal fee, and must be freely redistributable
* under reasonable conditions. For an executable file, complete source
* code means the source code for all modules it contains. It does not
* include source code for modules or files that typically accompany the
* major components of the operating system on which the executable file
* runs.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
* PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

View file

@ -159,7 +159,6 @@ Res ChunkInit(Chunk chunk, Arena arena,
{
Size size;
Count pages;
Page pageTable;
Shift pageShift;
Size pageTableSize;
void *p;
@ -199,13 +198,6 @@ Res ChunkInit(Chunk chunk, Arena arena,
if (res != ResOK)
goto failClassInit;
/* Put the page table as late as possible, as in VM systems we don't want */
/* to map it. */
res = BootAlloc(&p, boot, (size_t)pageTableSize, (size_t)pageSize);
if (res != ResOK)
goto failAllocPageTable;
chunk->pageTable = pageTable = p;
/* @@@@ Is BootAllocated always right? */
/* Last thing we BootAlloc'd is pageTable. We requested pageSize */
/* alignment, and pageTableSize is itself pageSize aligned, so */
@ -221,8 +213,6 @@ Res ChunkInit(Chunk chunk, Arena arena,
return ResOK;
/* .no-clean: No clean-ups needed for boot, as we will discard the chunk. */
failAllocPageTable:
(arena->class->chunkFinish)(chunk);
failClassInit:
failAllocTable:
return res;