1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-04-23 14:32:12 -07:00
Copied from Perforce
 Change: 186662
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Gareth Rees 2014-06-17 15:15:32 +01:00
parent 230d7cf721
commit 5fb05e31b6
14 changed files with 110 additions and 80 deletions

View file

@ -89,9 +89,10 @@ static Bool ClientArenaCheck(ClientArena clientArena)
/* clientChunkCreate -- create a ClientChunk */
static Res clientChunkCreate(Chunk *chunkReturn, Addr base, Addr limit,
ClientArena clientArena, Size arenaGrainSize)
static Res clientChunkCreate(Chunk *chunkReturn, ClientArena clientArena,
Addr base, Addr limit)
{
Arena arena;
ClientChunk clChunk;
Chunk chunk;
Addr alignedBase;
@ -101,15 +102,15 @@ static Res clientChunkCreate(Chunk *chunkReturn, Addr base, Addr limit,
void *p;
AVER(chunkReturn != NULL);
AVERT(ClientArena, clientArena);
arena = ClientArena2Arena(clientArena);
AVER(base != (Addr)0);
/* TODO: Should refuse on small chunks, instead of AVERring. */
AVER(limit != (Addr)0);
AVER(limit > base);
AVERT(ArenaGrainSize, arenaGrainSize);
/* Initialize boot block. */
/* Chunk has to be page-aligned, and the boot allocs must be within it. */
alignedBase = AddrAlignUp(base, arenaGrainSize);
alignedBase = AddrAlignUp(base, ArenaGrainSize(arena));
AVER(alignedBase < limit);
res = BootBlockInit(boot, (void *)alignedBase, (void *)limit);
if (res != ResOK)
@ -122,9 +123,8 @@ static Res clientChunkCreate(Chunk *chunkReturn, Addr base, Addr limit,
goto failChunkAlloc;
clChunk = p; chunk = ClientChunk2Chunk(clChunk);
res = ChunkInit(chunk, ClientArena2Arena(clientArena),
alignedBase, AddrAlignDown(limit, arenaGrainSize),
arenaGrainSize, boot);
res = ChunkInit(chunk, arena, alignedBase,
AddrAlignDown(limit, ArenaGrainSize(arena)), boot);
if (res != ResOK)
goto failChunkInit;
@ -278,7 +278,7 @@ static Res ClientArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args)
/* have to have a valid arena before calling ChunkCreate */
clientArena->sig = ClientArenaSig;
res = clientChunkCreate(&chunk, chunkBase, limit, clientArena, grainSize);
res = clientChunkCreate(&chunk, clientArena, chunkBase, limit);
if (res != ResOK)
goto failChunkCreate;
arena->primary = chunk;
@ -338,8 +338,7 @@ static Res ClientArenaExtend(Arena arena, Addr base, Size size)
limit = AddrAdd(base, size);
clientArena = Arena2ClientArena(arena);
res = clientChunkCreate(&chunk, base, limit, clientArena,
ArenaGrainSize(arena));
res = clientChunkCreate(&chunk, clientArena, base, limit);
return res;
}

View file

@ -102,7 +102,7 @@ static Bool VMChunkCheck(VMChunk vmchunk)
chunk = VMChunk2Chunk(vmchunk);
CHECKD(Chunk, chunk);
CHECKD_NOSIG(VM, vmchunk->vm); /* <design/check/#hidden-type> */
CHECKL(SizeIsAligned(ChunkPageSize(chunk), VMPageSize()));
CHECKL(SizeIsAligned(ChunkPageSize(chunk), VMPageSize(vmchunk->vm)));
CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable);
CHECKD(SparseArray, &vmchunk->pages);
/* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck
@ -272,11 +272,10 @@ static void vmArenaUnmap(VMArena vmArena, VM vm, Addr base, Addr limit)
* chunkReturn, return parameter for the created chunk.
* vmArena, the parent VMArena.
* size, approximate amount of virtual address that the chunk should reserve.
* grainSize, arena grain size.
*/
static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size,
Size grainSize)
static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size)
{
Arena arena;
Res res;
Addr base, limit, chunkStructLimit;
VM vm;
@ -287,10 +286,10 @@ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size,
AVER(chunkReturn != NULL);
AVERT(VMArena, vmArena);
arena = VMArena2Arena(vmArena);
AVER(size > 0);
AVERT(ArenaGrainSize, grainSize);
res = VMCreate(&vm, size, grainSize, vmArena->vmParams);
res = VMCreate(&vm, size, ArenaGrainSize(arena), vmArena->vmParams);
if (res != ResOK)
goto failVMCreate;
@ -308,15 +307,14 @@ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size,
goto failChunkAlloc;
vmChunk = p;
/* Calculate the limit of the grain where the chunkStruct resides. */
chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), grainSize);
chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), ArenaGrainSize(arena));
res = vmArenaMap(vmArena, vm, base, chunkStructLimit);
if (res != ResOK)
goto failChunkMap;
vmChunk->overheadMappedLimit = chunkStructLimit;
vmChunk->vm = vm;
res = ChunkInit(VMChunk2Chunk(vmChunk), VMArena2Arena(vmArena),
base, limit, grainSize, boot);
res = ChunkInit(VMChunk2Chunk(vmChunk), arena, base, limit, boot);
if (res != ResOK)
goto failChunkInit;
@ -512,7 +510,7 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args)
size = arg.val.size;
if (ArgPick(&arg, args, MPS_KEY_ARENA_GRAIN_SIZE))
grainSize = arg.val.size;
grainSize = SizeAlignUp(grainSize, VMPageSize());
grainSize = SizeAlignUp(grainSize, PageSize());
AVER(size > 0);
AVERT(ArenaGrainSize, grainSize);
@ -563,7 +561,7 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args)
/* have to have a valid arena before calling ChunkCreate */
vmArena->sig = VMArenaSig;
res = VMChunkCreate(&chunk, vmArena, size, grainSize);
res = VMChunkCreate(&chunk, vmArena, size);
if (res != ResOK)
goto failChunkCreate;
@ -725,8 +723,7 @@ static Res VMArenaGrow(Arena arena, SegPref pref, Size size)
VMArenaReserved(VMArena2Arena(vmArena)));
return res;
}
res = VMChunkCreate(&newChunk, vmArena, chunkSize,
ArenaGrainSize(arena));
res = VMChunkCreate(&newChunk, vmArena, chunkSize);
if(res == ResOK)
goto vmArenaGrow_Done;
}

View file

@ -80,7 +80,7 @@ Bool MPMCheck(void)
/* The granularity of memory mapping must be a multiple of the
* granularity of protection (or we might not be able to protect an
* arena grain). */
CHECKL(VMPageSize() % ProtGranularity() == 0);
CHECKL(PageSize() % ProtGranularity() == 0);
return TRUE;
}

View file

@ -1002,7 +1002,8 @@ extern Res RootsIterate(Globals arena, RootIterateFn f, void *p);
/* VM Interface -- see <code/vm*.c> */
extern Size VMPageSize(void);
extern Size PageSize(void);
extern Size VMPageSize(VM vm);
extern Bool VMCheck(VM vm);
extern Res VMParamFromArgs(void *params, size_t paramSize, ArgList args);
extern Res VMCreate(VM *VMReturn, Size size, Size grainSize, void *params);

View file

@ -143,7 +143,7 @@ void ProtSetup(void)
Size ProtGranularity(void)
{
/* Individual pages can be protected. */
return VMPageSize();
return PageSize();
}

View file

@ -121,7 +121,7 @@ void ProtSetup(void)
Size ProtGranularity(void)
{
/* Individual pages can be protected. */
return VMPageSize();
return PageSize();
}

View file

@ -404,7 +404,7 @@ void ProtSetup(void)
Size ProtGranularity(void)
{
/* Individual pages can be protected. */
return VMPageSize();
return PageSize();
}

View file

@ -10,7 +10,7 @@
static Index pagesLength(SparseArray sa)
{
return (sa->length * sa->elementSize + VMPageSize() - 1) >> sa->shift;
return (sa->length * sa->elementSize + VMPageSize(sa->vm) - 1) >> sa->shift;
}
void SparseArrayInit(SparseArray sa,
@ -25,8 +25,8 @@ void SparseArrayInit(SparseArray sa,
sa->mapped = mapped;
sa->pages = pages;
sa->vm = vm;
AVER(SizeIsP2(VMPageSize()));
sa->shift = SizeLog2(VMPageSize());
AVER(SizeIsP2(VMPageSize(sa->vm)));
sa->shift = SizeLog2(VMPageSize(sa->vm));
BTResRange(mapped, 0, length);
BTResRange(pages, 0, pagesLength(sa));
@ -49,11 +49,11 @@ Bool SparseArrayCheck(SparseArray sa)
CHECKL(sa->base != NULL);
CHECKL(sa->elementSize >= 1);
CHECKD_NOSIG(VM, sa->vm); /* <design/check/#hidden-type> */
CHECKL(sa->elementSize <= VMPageSize());
CHECKL(sa->elementSize <= VMPageSize(sa->vm));
CHECKL(sa->length > 0);
CHECKD_NOSIG(BT, sa->mapped);
CHECKD_NOSIG(BT, sa->pages);
CHECKL(sa->shift == SizeLog2(VMPageSize()));
CHECKL(sa->shift == SizeLog2(VMPageSize(sa->vm)));
return TRUE;
}
@ -139,7 +139,7 @@ void SparseArrayUnmap(SparseArray sa, Index baseEI, Index limitEI)
the page on which the base element resides. If any elements between
there and baseMI are defined, we can't unmap that page, so bump up. */
baseMI = (baseEI * sa->elementSize) >> sa->shift;
i = SizeAlignDown(baseEI * sa->elementSize, VMPageSize()) / sa->elementSize;
i = SizeAlignDown(baseEI * sa->elementSize, VMPageSize(sa->vm)) / sa->elementSize;
if (i < baseEI && !BTIsResRange(sa->mapped, i, baseEI))
++baseMI;
@ -147,7 +147,7 @@ void SparseArrayUnmap(SparseArray sa, Index baseEI, Index limitEI)
the page on which the last element resides. If any elements between
limitMI and there are defined, we can't unmap that page, so bump down. */
limitMI = ((limitEI * sa->elementSize - 1) >> sa->shift) + 1;
i = (SizeAlignUp(limitEI * sa->elementSize, VMPageSize()) +
i = (SizeAlignUp(limitEI * sa->elementSize, VMPageSize(sa->vm)) +
sa->elementSize - 1) / sa->elementSize;
if (i > sa->length)
i = sa->length;

View file

@ -31,7 +31,7 @@ typedef struct SparseArrayStruct {
BT mapped; /* whether elements exist in the array */
BT pages; /* whether underlying pages are mapped */
VM vm; /* where pages are mapped from */
Shift shift; /* SizeLog2(VMPageSize()) TODO: VMShift() */
Shift shift; /* SizeLog2(VMPageSize(sa->vm)) */
} SparseArrayStruct;
extern void SparseArrayInit(SparseArray sa,

View file

@ -162,8 +162,7 @@ Bool ChunkCheck(Chunk chunk)
/* ChunkInit -- initialize generic part of chunk */
Res ChunkInit(Chunk chunk, Arena arena,
Addr base, Addr limit, Align pageSize, BootBlock boot)
Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, BootBlock boot)
{
Size size;
Count pages;
@ -175,19 +174,17 @@ Res ChunkInit(Chunk chunk, Arena arena,
/* chunk is supposed to be uninitialized, so don't check it. */
AVERT(Arena, arena);
AVER(base != NULL);
AVER(AddrIsAligned(base, pageSize));
AVER(AddrIsAligned(base, ArenaGrainSize(arena)));
AVER(base < limit);
AVER(AddrIsAligned(limit, pageSize));
AVERT(Align, pageSize);
AVER(pageSize >= MPS_PF_ALIGN);
AVER(AddrIsAligned(limit, ArenaGrainSize(arena)));
AVERT(BootBlock, boot);
chunk->serial = (arena->chunkSerial)++;
chunk->arena = arena;
RingInit(&chunk->chunkRing);
chunk->pageSize = pageSize;
chunk->pageShift = pageShift = SizeLog2(pageSize);
chunk->pageSize = ArenaGrainSize(arena);
chunk->pageShift = pageShift = SizeLog2(chunk->pageSize);
chunk->base = base;
chunk->limit = limit;
size = ChunkSize(chunk);
@ -198,7 +195,7 @@ Res ChunkInit(Chunk chunk, Arena arena,
goto failAllocTable;
chunk->allocTable = p;
pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), pageSize);
pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), chunk->pageSize);
chunk->pageTablePages = pageTableSize >> pageShift;
res = (arena->class->chunkInit)(chunk, boot);
@ -209,7 +206,7 @@ Res ChunkInit(Chunk chunk, Arena arena,
/* Last thing we BootAlloc'd is pageTable. We requested pageSize */
/* alignment, and pageTableSize is itself pageSize aligned, so */
/* BootAllocated should also be pageSize aligned. */
AVER(AddrIsAligned(BootAllocated(boot), pageSize));
AVER(AddrIsAligned(BootAllocated(boot), chunk->pageSize));
chunk->allocBase = (Index)(BootAllocated(boot) >> pageShift);
/* Init allocTable after class init, because it might be mapped there. */

View file

@ -162,7 +162,7 @@ typedef struct ChunkStruct {
extern Bool ChunkCheck(Chunk chunk);
extern Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit,
Align pageSize, BootBlock boot);
BootBlock boot);
extern void ChunkFinish(Chunk chunk);
extern Compare ChunkCompare(Tree tree, TreeKey key);
extern TreeKey ChunkKey(Tree tree);

View file

@ -19,6 +19,7 @@ SRCID(vman, "$Id$");
/* ANSI fake VM structure, see <design/vman/> */
typedef struct VMStruct {
Sig sig; /* <design/sig/> */
Size pageSize; /* VMAN_PAGE_SIZE */
void *block; /* pointer to malloc'd block, for free() */
Addr base, limit; /* aligned boundaries of malloc'd memory */
Size reserved; /* total reserved address space */
@ -34,9 +35,9 @@ Bool VMCheck(VM vm)
CHECKL(vm->base != (Addr)0);
CHECKL(vm->limit != (Addr)0);
CHECKL(vm->base < vm->limit);
CHECKL(ArenaGrainSizeCheck(VMPageSize()));
CHECKL(AddrIsAligned(vm->base, VMPageSize()));
CHECKL(AddrIsAligned(vm->limit, VMPageSize()));
CHECKL(ArenaGrainSizeCheck(vm->pageSize));
CHECKL(AddrIsAligned(vm->base, vm->pageSize));
CHECKL(AddrIsAligned(vm->limit, vm->pageSize));
CHECKL(vm->block != NULL);
CHECKL((Addr)vm->block <= vm->base);
CHECKL(vm->mapped <= vm->reserved);
@ -44,14 +45,24 @@ Bool VMCheck(VM vm)
}
/* VMPageSize -- return the page size */
/* PageSize -- return the page size */
Size VMPageSize(void)
Size PageSize(void)
{
return VMAN_PAGE_SIZE;
}
/* VMPageSize -- return the page size cached in the VM */
Size VMPageSize(VM vm)
{
AVERT(VM, vm);
return vm->pageSize;
}
Res VMParamFromArgs(void *params, size_t paramSize, ArgList args)
{
AVER(params != NULL);
@ -73,14 +84,14 @@ Res VMCreate(VM *vmReturn, Size size, Size grainSize, void *params)
AVER(size > 0);
AVER(params != NULL);
pageSize = VMPageSize();
pageSize = PageSize();
/* Grains must consist of whole pages. */
AVER(grainSize % pageSize == 0);
/* Check that the rounded-up sizes will fit in a Size. */
size = SizeRoundUp(size, grainSize);
if (size < VMAN_PAGE_SIZE || size > (Size)(size_t)-1)
if (size < grainSize || size > (Size)(size_t)-1)
return ResRESOURCE;
/* Note that because we add a whole grainSize here (not grainSize -
* pageSize), we are not in danger of overflowing vm->limit even if
@ -101,6 +112,7 @@ Res VMCreate(VM *vmReturn, Size size, Size grainSize, void *params)
return ResMEMORY;
}
vm->pageSize = pageSize;
vm->base = AddrAlignUp((Addr)vm->block, grainSize);
vm->limit = AddrAdd(vm->base, size);
AVER(vm->base < vm->limit); /* can't overflow, as discussed above */
@ -189,8 +201,8 @@ Res VMMap(VM vm, Addr base, Addr limit)
AVER(vm->base <= base);
AVER(base < limit);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(limit, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
size = AddrOffset(base, limit);
memset((void *)base, (int)0, size);
@ -213,8 +225,8 @@ void VMUnmap(VM vm, Addr base, Addr limit)
AVER(vm->base <= base);
AVER(base < limit);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(limit, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
size = AddrOffset(base, limit);
memset((void *)base, 0xCD, size);

View file

@ -64,6 +64,7 @@ SRCID(vmix, "$Id$");
typedef struct VMStruct {
Sig sig; /* <design/sig/> */
Size pageSize; /* operating system page size */
void *block; /* unaligned base of mmap'd memory */
Addr base, limit; /* aligned boundaries of reserved space */
Size reserved; /* total reserved address space */
@ -71,9 +72,9 @@ typedef struct VMStruct {
} VMStruct;
/* VMPageSize -- return operating system page size */
/* PageSize -- return operating system page size */
Size VMPageSize(void)
Size PageSize(void)
{
int pageSize;
@ -87,6 +88,16 @@ Size VMPageSize(void)
}
/* VMPageSize -- return the page size cached in the VM */
Size VMPageSize(VM vm)
{
AVERT(VM, vm);
return vm->pageSize;
}
/* VMCheck -- check a VM */
Bool VMCheck(VM vm)
@ -96,9 +107,9 @@ Bool VMCheck(VM vm)
CHECKL(vm->limit != 0);
CHECKL(vm->base < vm->limit);
CHECKL(vm->mapped <= vm->reserved);
CHECKL(ArenaGrainSizeCheck(VMPageSize()));
CHECKL(AddrIsAligned(vm->base, VMPageSize()));
CHECKL(AddrIsAligned(vm->limit, VMPageSize()));
CHECKL(ArenaGrainSizeCheck(vm->pageSize));
CHECKL(AddrIsAligned(vm->base, vm->pageSize));
CHECKL(AddrIsAligned(vm->limit, vm->pageSize));
return TRUE;
}
@ -126,7 +137,7 @@ Res VMCreate(VM *vmReturn, Size size, Size grainSize, void *params)
AVER(size > 0);
AVER(params != NULL);
pageSize = VMPageSize();
pageSize = PageSize();
/* Grains must consist of whole pages. */
AVER(grainSize % pageSize == 0);
@ -165,6 +176,7 @@ Res VMCreate(VM *vmReturn, Size size, Size grainSize, void *params)
goto failReserve;
}
vm->pageSize = pageSize;
vm->block = addr;
vm->base = AddrAlignUp(addr, grainSize);
vm->limit = AddrAdd(vm->base, size);
@ -206,7 +218,7 @@ void VMDestroy(VM vm)
r = munmap(vm->block, vm->reserved);
AVER(r == 0);
r = munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), VMPageSize()));
r = munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), vm->pageSize));
AVER(r == 0);
}
@ -262,8 +274,8 @@ Res VMMap(VM vm, Addr base, Addr limit)
AVER(base < limit);
AVER(base >= vm->base);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
size = AddrOffset(base, limit);
@ -295,8 +307,8 @@ void VMUnmap(VM vm, Addr base, Addr limit)
AVER(base < limit);
AVER(base >= vm->base);
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
size = AddrOffset(base, limit);

View file

@ -58,6 +58,7 @@ SRCID(vmw3, "$Id$");
typedef struct VMStruct {
Sig sig; /* <design/sig/> */
Size pageSize; /* operating system page size */
void *block; /* unaligned base of VirtualAlloc'd space */
Addr base, limit; /* aligned boundaries of reserved space */
Size reserved; /* total reserved address space */
@ -65,9 +66,9 @@ typedef struct VMStruct {
} VMStruct;
/* VMPageSize -- return the operating system page size */
/* PageSize -- return the operating system page size */
Size VMPageSize(void)
Size PageSize(void)
{
SYSTEM_INFO si;
@ -81,6 +82,16 @@ Size VMPageSize(void)
}
/* VMPageSize -- return the page size cached in the VM */
Size VMPageSize(VM vm)
{
AVERT(VM, vm);
return vm->pageSize;
}
/* VMCheck -- check a VM structure */
Bool VMCheck(VM vm)
@ -90,9 +101,9 @@ Bool VMCheck(VM vm)
CHECKL(vm->limit != 0);
CHECKL(vm->base < vm->limit);
CHECKL(vm->mapped <= vm->reserved);
CHECKL(ArenaGrainSizeCheck(VMPageSize()));
CHECKL(AddrIsAligned(vm->base, VMPageSize()));
CHECKL(AddrIsAligned(vm->limit, VMPageSize()));
CHECKL(ArenaGrainSizeCheck(vm->pageSize));
CHECKL(AddrIsAligned(vm->base, vm->pageSize));
CHECKL(AddrIsAligned(vm->limit, vm->pageSize));
return TRUE;
}
@ -140,7 +151,7 @@ Res VMCreate(VM *vmReturn, Size size, Size grainSize, void *params)
AVER(COMPATTYPE(LPVOID, Addr)); /* .assume.lpvoid-addr */
AVER(COMPATTYPE(SIZE_T, Size));
pageSize = VMPageSize();
pageSize = PageSize();
/* Grains must consist of whole pages. */
AVER(grainSize % pageSize == 0);
@ -174,6 +185,7 @@ Res VMCreate(VM *vmReturn, Size size, Size grainSize, void *params)
AVER(AddrIsAligned(vbase, pageSize));
vm->pageSize = pageSize;
vm->block = vbase;
vm->base = AddrAlignUp(vbase, grainSize);
vm->limit = AddrAdd(vm->base, size);
@ -268,8 +280,8 @@ Res VMMap(VM vm, Addr base, Addr limit)
LPVOID b;
AVERT(VM, vm);
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
AVER(vm->base <= base);
AVER(base < limit);
AVER(limit <= vm->limit);
@ -298,8 +310,8 @@ void VMUnmap(VM vm, Addr base, Addr limit)
BOOL b;
AVERT(VM, vm);
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
AVER(vm->base <= base);
AVER(base < limit);
AVER(limit <= vm->limit);