1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-04-25 07:40:40 -07:00

Catch-up merge from branch/2014-05-28/align to branch/2014-06-14/vm.

Copied from Perforce
 Change: 186664
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Gareth Rees 2014-06-17 15:52:16 +01:00
commit 96a62c5d4f
15 changed files with 82 additions and 74 deletions

View file

@ -89,9 +89,10 @@ static Bool ClientArenaCheck(ClientArena clientArena)
/* clientChunkCreate -- create a ClientChunk */
static Res clientChunkCreate(Chunk *chunkReturn, Addr base, Addr limit,
ClientArena clientArena, Size arenaGrainSize)
static Res clientChunkCreate(Chunk *chunkReturn, ClientArena clientArena,
Addr base, Addr limit)
{
Arena arena;
ClientChunk clChunk;
Chunk chunk;
Addr alignedBase;
@ -101,15 +102,15 @@ static Res clientChunkCreate(Chunk *chunkReturn, Addr base, Addr limit,
void *p;
AVER(chunkReturn != NULL);
AVERT(ClientArena, clientArena);
arena = ClientArena2Arena(clientArena);
AVER(base != (Addr)0);
/* TODO: Should refuse on small chunks, instead of AVERring. */
AVER(limit != (Addr)0);
AVER(limit > base);
AVERT(ArenaGrainSize, arenaGrainSize);
/* Initialize boot block. */
/* Chunk has to be page-aligned, and the boot allocs must be within it. */
alignedBase = AddrAlignUp(base, arenaGrainSize);
alignedBase = AddrAlignUp(base, ArenaGrainSize(arena));
AVER(alignedBase < limit);
res = BootBlockInit(boot, (void *)alignedBase, (void *)limit);
if (res != ResOK)
@ -122,9 +123,8 @@ static Res clientChunkCreate(Chunk *chunkReturn, Addr base, Addr limit,
goto failChunkAlloc;
clChunk = p; chunk = ClientChunk2Chunk(clChunk);
res = ChunkInit(chunk, ClientArena2Arena(clientArena),
alignedBase, AddrAlignDown(limit, arenaGrainSize),
arenaGrainSize, boot);
res = ChunkInit(chunk, arena, alignedBase,
AddrAlignDown(limit, ArenaGrainSize(arena)), boot);
if (res != ResOK)
goto failChunkInit;
@ -278,7 +278,7 @@ static Res ClientArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args)
/* have to have a valid arena before calling ChunkCreate */
clientArena->sig = ClientArenaSig;
res = clientChunkCreate(&chunk, chunkBase, limit, clientArena, grainSize);
res = clientChunkCreate(&chunk, clientArena, chunkBase, limit);
if (res != ResOK)
goto failChunkCreate;
arena->primary = chunk;
@ -338,8 +338,7 @@ static Res ClientArenaExtend(Arena arena, Addr base, Size size)
limit = AddrAdd(base, size);
clientArena = Arena2ClientArena(arena);
res = clientChunkCreate(&chunk, base, limit, clientArena,
ArenaGrainSize(arena));
res = clientChunkCreate(&chunk, clientArena, base, limit);
return res;
}

View file

@ -105,7 +105,7 @@ static Bool VMChunkCheck(VMChunk vmchunk)
chunk = VMChunk2Chunk(vmchunk);
CHECKD(Chunk, chunk);
CHECKD(VM, VMChunkVM(vmchunk));
CHECKL(SizeIsAligned(ChunkPageSize(chunk), VMPageSize()));
CHECKL(SizeIsAligned(ChunkPageSize(chunk), VMPageSize(VMChunkVM(vmchunk))));
CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable);
CHECKD(SparseArray, &vmchunk->pages);
/* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck
@ -275,11 +275,10 @@ static void vmArenaUnmap(VMArena vmArena, VM vm, Addr base, Addr limit)
* chunkReturn, return parameter for the created chunk.
* vmArena, the parent VMArena.
* size, approximate amount of virtual address that the chunk should reserve.
* grainSize, arena grain size.
*/
static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size,
Size grainSize)
static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size)
{
Arena arena;
Res res;
Addr base, limit, chunkStructLimit;
VMStruct vmStruct;
@ -291,11 +290,10 @@ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size,
AVER(chunkReturn != NULL);
AVERT(VMArena, vmArena);
arena = VMArena2Arena(vmArena);
AVER(size > 0);
AVERT(ArenaGrainSize, grainSize);
/* Store VM descriptor on the stack until we have a chunk to put it in. */
res = VMCreate(vm, size, grainSize, vmArena->vmParams);
res = VMCreate(vm, size, ArenaGrainSize(arena), vmArena->vmParams);
if (res != ResOK)
goto failVMCreate;
@ -313,7 +311,7 @@ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size,
goto failChunkAlloc;
vmChunk = p;
/* Calculate the limit of the grain where the chunkStruct resides. */
chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), grainSize);
chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), ArenaGrainSize(arena));
res = vmArenaMap(vmArena, vm, base, chunkStructLimit);
if (res != ResOK)
goto failChunkMap;
@ -321,8 +319,7 @@ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size,
/* Copy VM descriptor into its place in the chunk. */
(void)mps_lib_memcpy(VMChunkVM(vmChunk), vm, sizeof vmStruct);
res = ChunkInit(VMChunk2Chunk(vmChunk), VMArena2Arena(vmArena),
base, limit, grainSize, boot);
res = ChunkInit(VMChunk2Chunk(vmChunk), arena, base, limit, boot);
if (res != ResOK)
goto failChunkInit;
@ -525,7 +522,7 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args)
size = arg.val.size;
if (ArgPick(&arg, args, MPS_KEY_ARENA_GRAIN_SIZE))
grainSize = arg.val.size;
grainSize = SizeAlignUp(grainSize, VMPageSize());
grainSize = SizeAlignUp(grainSize, PageSize());
AVER(size > 0);
AVERT(ArenaGrainSize, grainSize);
@ -578,7 +575,7 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args)
/* have to have a valid arena before calling ChunkCreate */
vmArena->sig = VMArenaSig;
res = VMChunkCreate(&chunk, vmArena, size, grainSize);
res = VMChunkCreate(&chunk, vmArena, size);
if (res != ResOK)
goto failChunkCreate;
@ -743,8 +740,7 @@ static Res VMArenaGrow(Arena arena, SegPref pref, Size size)
VMArenaReserved(VMArena2Arena(vmArena)));
return res;
}
res = VMChunkCreate(&newChunk, vmArena, chunkSize,
ArenaGrainSize(arena));
res = VMChunkCreate(&newChunk, vmArena, chunkSize);
if(res == ResOK)
goto vmArenaGrow_Done;
}

View file

@ -82,7 +82,7 @@ Bool MPMCheck(void)
/* The granularity of memory mapping must be a multiple of the
* granularity of protection (or we might not be able to protect an
* arena grain). */
CHECKL(VMPageSize() % ProtGranularity() == 0);
CHECKL(PageSize() % ProtGranularity() == 0);
return TRUE;
}

View file

@ -143,7 +143,7 @@ void ProtSetup(void)
Size ProtGranularity(void)
{
/* Individual pages can be protected. */
return VMPageSize();
return PageSize();
}

View file

@ -122,7 +122,7 @@ void ProtSetup(void)
Size ProtGranularity(void)
{
/* Individual pages can be protected. */
return VMPageSize();
return PageSize();
}

View file

@ -405,7 +405,7 @@ void ProtSetup(void)
Size ProtGranularity(void)
{
/* Individual pages can be protected. */
return VMPageSize();
return PageSize();
}

View file

@ -11,7 +11,7 @@
static Index pagesLength(SparseArray sa)
{
return (sa->length * sa->elementSize + VMPageSize() - 1) >> sa->shift;
return (sa->length * sa->elementSize + VMPageSize(sa->vm) - 1) >> sa->shift;
}
void SparseArrayInit(SparseArray sa,
@ -26,8 +26,8 @@ void SparseArrayInit(SparseArray sa,
sa->mapped = mapped;
sa->pages = pages;
sa->vm = vm;
AVER(SizeIsP2(VMPageSize()));
sa->shift = SizeLog2(VMPageSize());
AVER(SizeIsP2(VMPageSize(vm)));
sa->shift = SizeLog2(VMPageSize(vm));
BTResRange(mapped, 0, length);
BTResRange(pages, 0, pagesLength(sa));
@ -50,11 +50,11 @@ Bool SparseArrayCheck(SparseArray sa)
CHECKL(sa->base != NULL);
CHECKL(sa->elementSize >= 1);
CHECKD_NOSIG(VM, sa->vm); /* <design/check/#hidden-type> */
CHECKL(sa->elementSize <= VMPageSize());
CHECKL(sa->elementSize <= VMPageSize(sa->vm));
CHECKL(sa->length > 0);
CHECKD_NOSIG(BT, sa->mapped);
CHECKD_NOSIG(BT, sa->pages);
CHECKL(sa->shift == SizeLog2(VMPageSize()));
CHECKL(sa->shift == SizeLog2(VMPageSize(sa->vm)));
return TRUE;
}
@ -140,7 +140,7 @@ void SparseArrayUnmap(SparseArray sa, Index baseEI, Index limitEI)
the page on which the base element resides. If any elements between
there and baseMI are defined, we can't unmap that page, so bump up. */
baseMI = (baseEI * sa->elementSize) >> sa->shift;
i = SizeAlignDown(baseEI * sa->elementSize, VMPageSize()) / sa->elementSize;
i = SizeAlignDown(baseEI * sa->elementSize, VMPageSize(sa->vm)) / sa->elementSize;
if (i < baseEI && !BTIsResRange(sa->mapped, i, baseEI))
++baseMI;
@ -148,7 +148,7 @@ void SparseArrayUnmap(SparseArray sa, Index baseEI, Index limitEI)
the page on which the last element resides. If any elements between
limitMI and there are defined, we can't unmap that page, so bump down. */
limitMI = ((limitEI * sa->elementSize - 1) >> sa->shift) + 1;
i = (SizeAlignUp(limitEI * sa->elementSize, VMPageSize()) +
i = (SizeAlignUp(limitEI * sa->elementSize, VMPageSize(sa->vm)) +
sa->elementSize - 1) / sa->elementSize;
if (i > sa->length)
i = sa->length;

View file

@ -31,7 +31,7 @@ typedef struct SparseArrayStruct {
BT mapped; /* whether elements exist in the array */
BT pages; /* whether underlying pages are mapped */
VM vm; /* where pages are mapped from */
Shift shift; /* SizeLog2(VMPageSize()) TODO: VMShift() */
Shift shift; /* SizeLog2(VMPageSize(vm)) TODO: VMShift(vm) */
} SparseArrayStruct;
extern void SparseArrayInit(SparseArray sa,

View file

@ -162,8 +162,7 @@ Bool ChunkCheck(Chunk chunk)
/* ChunkInit -- initialize generic part of chunk */
Res ChunkInit(Chunk chunk, Arena arena,
Addr base, Addr limit, Align pageSize, BootBlock boot)
Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, BootBlock boot)
{
Size size;
Count pages;
@ -175,19 +174,17 @@ Res ChunkInit(Chunk chunk, Arena arena,
/* chunk is supposed to be uninitialized, so don't check it. */
AVERT(Arena, arena);
AVER(base != NULL);
AVER(AddrIsAligned(base, pageSize));
AVER(AddrIsAligned(base, ArenaGrainSize(arena)));
AVER(base < limit);
AVER(AddrIsAligned(limit, pageSize));
AVERT(Align, pageSize);
AVER(pageSize >= MPS_PF_ALIGN);
AVER(AddrIsAligned(limit, ArenaGrainSize(arena)));
AVERT(BootBlock, boot);
chunk->serial = (arena->chunkSerial)++;
chunk->arena = arena;
RingInit(&chunk->chunkRing);
chunk->pageSize = pageSize;
chunk->pageShift = pageShift = SizeLog2(pageSize);
chunk->pageSize = ArenaGrainSize(arena);
chunk->pageShift = pageShift = SizeLog2(chunk->pageSize);
chunk->base = base;
chunk->limit = limit;
size = ChunkSize(chunk);
@ -198,7 +195,7 @@ Res ChunkInit(Chunk chunk, Arena arena,
goto failAllocTable;
chunk->allocTable = p;
pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), pageSize);
pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), chunk->pageSize);
chunk->pageTablePages = pageTableSize >> pageShift;
res = (arena->class->chunkInit)(chunk, boot);
@ -209,7 +206,7 @@ Res ChunkInit(Chunk chunk, Arena arena,
/* Last thing we BootAlloc'd is pageTable. We requested pageSize */
/* alignment, and pageTableSize is itself pageSize aligned, so */
/* BootAllocated should also be pageSize aligned. */
AVER(AddrIsAligned(BootAllocated(boot), pageSize));
AVER(AddrIsAligned(BootAllocated(boot), chunk->pageSize));
chunk->allocBase = (Index)(BootAllocated(boot) >> pageShift);
/* Init allocTable after class init, because it might be mapped there. */

View file

@ -162,7 +162,7 @@ typedef struct ChunkStruct {
extern Bool ChunkCheck(Chunk chunk);
extern Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit,
Align pageSize, BootBlock boot);
BootBlock boot);
extern void ChunkFinish(Chunk chunk);
extern Compare ChunkCompare(Tree tree, TreeKey key);
extern TreeKey ChunkKey(Tree tree);

View file

@ -20,9 +20,9 @@ Bool VMCheck(VM vm)
CHECKL(vm->base != (Addr)0);
CHECKL(vm->limit != (Addr)0);
CHECKL(vm->base < vm->limit);
CHECKL(ArenaGrainSizeCheck(VMPageSize()));
CHECKL(AddrIsAligned(vm->base, VMPageSize()));
CHECKL(AddrIsAligned(vm->limit, VMPageSize()));
CHECKL(ArenaGrainSizeCheck(vm->pageSize));
CHECKL(AddrIsAligned(vm->base, vm->pageSize));
CHECKL(AddrIsAligned(vm->limit, vm->pageSize));
CHECKL(vm->block != NULL);
CHECKL((Addr)vm->block <= vm->base);
CHECKL(vm->mapped <= vm->reserved);
@ -30,6 +30,16 @@ Bool VMCheck(VM vm)
}
/* VMPageSize -- return the page size cached in the VM */
Size (VMPageSize)(VM vm)
{
AVERT(VM, vm);
return VMPageSize(vm);
}
/* VMBase -- return the base address of the memory reserved */
Addr (VMBase)(VM vm)

View file

@ -16,6 +16,7 @@
typedef struct VMStruct {
Sig sig; /* <design/sig/> */
Size pageSize; /* operating system page size */
void *block; /* unaligned base of mmap'd memory */
Addr base, limit; /* aligned boundaries of reserved space */
Size reserved; /* total reserved address space */
@ -23,12 +24,14 @@ typedef struct VMStruct {
} VMStruct;
#define VMPageSize(vm) RVALUE((vm)->pageSize)
#define VMBase(vm) RVALUE((vm)->base)
#define VMLimit(vm) RVALUE((vm)->limit)
#define VMReserved(vm) RVALUE((vm)->reserved)
#define VMMapped(vm) RVALUE((vm)->mapped)
extern Size VMPageSize(void);
extern Size PageSize(void);
extern Size (VMPageSize)(VM vm);
extern Bool VMCheck(VM vm);
extern Res VMParamFromArgs(void *params, size_t paramSize, ArgList args);
extern Res VMCreate(VM vmReturn, Size size, Size grainSize, void *params);

View file

@ -12,9 +12,9 @@
SRCID(vman, "$Id$");
/* VMPageSize -- return the page size */
/* PageSize -- return the page size */
Size VMPageSize(void)
Size PageSize(void)
{
return VMAN_PAGE_SIZE;
}
@ -41,14 +41,14 @@ Res VMCreate(VM vm, Size size, Size grainSize, void *params)
AVER(size > 0);
AVER(params != NULL);
pageSize = VMPageSize();
pageSize = PageSize();
/* Grains must consist of whole pages. */
AVER(grainSize % pageSize == 0);
/* Check that the rounded-up sizes will fit in a Size. */
size = SizeRoundUp(size, grainSize);
if (size < VMAN_PAGE_SIZE || size > (Size)(size_t)-1)
if (size < grainSize || size > (Size)(size_t)-1)
return ResRESOURCE;
/* Note that because we add a whole grainSize here (not grainSize -
* pageSize), we are not in danger of overflowing vm->limit even if
@ -63,6 +63,7 @@ Res VMCreate(VM vm, Size size, Size grainSize, void *params)
return ResMEMORY;
(void)mps_lib_memset(vbase, VMJunkBYTE, reserved);
vm->pageSize = pageSize;
vm->block = vbase;
vm->base = AddrAlignUp(vbase, grainSize);
vm->limit = AddrAdd(vm->base, size);
@ -109,8 +110,8 @@ Res VMMap(VM vm, Addr base, Addr limit)
AVER(VMBase(vm) <= base);
AVER(base < limit);
AVER(limit <= VMLimit(vm));
AVER(AddrIsAligned(base, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(limit, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
size = AddrOffset(base, limit);
(void)mps_lib_memset((void *)base, VMJunkByte, size);
@ -133,8 +134,8 @@ void VMUnmap(VM vm, Addr base, Addr limit)
AVER(VMBase(vm) <= base);
AVER(base < limit);
AVER(limit <= VMLimit(vm));
AVER(AddrIsAligned(base, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(limit, VMAN_PAGE_SIZE));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
size = AddrOffset(base, limit);
AVER(VMMapped(vm) >= size);

View file

@ -59,9 +59,9 @@
SRCID(vmix, "$Id$");
/* VMPageSize -- return operating system page size */
/* PageSize -- return operating system page size */
Size VMPageSize(void)
Size PageSize(void)
{
int pageSize;
@ -96,7 +96,7 @@ Res VMCreate(VM vm, Size size, Size grainSize, void *params)
AVER(size > 0);
AVER(params != NULL);
pageSize = VMPageSize();
pageSize = PageSize();
/* Grains must consist of whole pages. */
AVER(grainSize % pageSize == 0);
@ -122,6 +122,7 @@ Res VMCreate(VM vm, Size size, Size grainSize, void *params)
return ResRESOURCE;
}
vm->pageSize = pageSize;
vm->block = vbase;
vm->base = AddrAlignUp(vbase, grainSize);
vm->limit = AddrAdd(vm->base, size);
@ -171,8 +172,8 @@ Res VMMap(VM vm, Addr base, Addr limit)
AVER(base < limit);
AVER(base >= VMBase(vm));
AVER(limit <= VMLimit(vm));
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
size = AddrOffset(base, limit);
@ -204,8 +205,8 @@ void VMUnmap(VM vm, Addr base, Addr limit)
AVER(base < limit);
AVER(base >= VMBase(vm));
AVER(limit <= VMLimit(vm));
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
size = AddrOffset(base, limit);
AVER(size <= VMMapped(vm));

View file

@ -50,9 +50,9 @@
SRCID(vmw3, "$Id$");
/* VMPageSize -- return the operating system page size */
/* PageSize -- return the operating system page size */
Size VMPageSize(void)
Size PageSize(void)
{
SYSTEM_INFO si;
@ -107,7 +107,7 @@ Res VMCreate(VM vm, Size size, Size grainSize, void *params)
AVER(COMPATTYPE(LPVOID, Addr)); /* .assume.lpvoid-addr */
AVER(COMPATTYPE(SIZE_T, Size));
pageSize = VMPageSize();
pageSize = PageSize();
/* Grains must consist of whole pages. */
AVER(grainSize % pageSize == 0);
@ -132,6 +132,7 @@ Res VMCreate(VM vm, Size size, Size grainSize, void *params)
AVER(AddrIsAligned(vbase, pageSize));
vm->pageSize = pageSize;
vm->block = vbase;
vm->base = AddrAlignUp(vbase, grainSize);
vm->limit = AddrAdd(vm->base, size);
@ -177,8 +178,8 @@ Res VMMap(VM vm, Addr base, Addr limit)
LPVOID b;
AVERT(VM, vm);
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
AVER(VMBase(vm) <= base);
AVER(base < limit);
AVER(limit <= VMLimit(vm));
@ -208,8 +209,8 @@ void VMUnmap(VM vm, Addr base, Addr limit)
Size size;
AVERT(VM, vm);
AVER(AddrIsAligned(base, VMPageSize()));
AVER(AddrIsAligned(limit, VMPageSize()));
AVER(AddrIsAligned(base, vm->pageSize));
AVER(AddrIsAligned(limit, vm->pageSize));
AVER(VMBase(vm) <= base);
AVER(base < limit);
AVER(limit <= VMLimit(vm));