1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-04-27 16:51:06 -07:00

Eliminating vmarena2arena and arena2vmarena in favour of mustbea.

Copied from Perforce
 Change: 191306
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Richard Brooksby 2016-04-19 22:51:51 +01:00
parent cb620cf755
commit dd14518ade

View file

@ -1,7 +1,7 @@
/* arenavm.c: VIRTUAL MEMORY ARENA CLASS
*
* $Id$
* Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license.
* Copyright (c) 2001-2016 Ravenbrook Limited. See end of file for license.
*
*
* DESIGN
@ -56,7 +56,7 @@ typedef struct VMChunkStruct {
/* VMChunkVMArena -- get the VM arena from a VM chunk */
#define VMChunkVMArena(vmchunk) \
Arena2VMArena(ChunkArena(VMChunk2Chunk(vmchunk)))
MustBeA(VMArena, ChunkArena(VMChunk2Chunk(vmchunk)))
/* VMArena
@ -81,8 +81,6 @@ typedef struct VMArenaStruct { /* VM arena structure */
Sig sig; /* <design/sig/> */
} VMArenaStruct;
#define Arena2VMArena(arena) PARENT(VMArenaStruct, arenaStruct, arena)
#define VMArena2Arena(vmarena) (&(vmarena)->arenaStruct)
#define VMArenaVM(vmarena) (&(vmarena)->vmStruct)
@ -163,7 +161,7 @@ static Bool VMArenaCheck(VMArena vmArena)
VMChunk primary;
CHECKS(VMArena, vmArena);
arena = VMArena2Arena(vmArena);
arena = MustBeA(AbstractArena, vmArena);
CHECKD(Arena, arena);
/* spare pages are committed, so must be less spare than committed. */
CHECKL(vmArena->spareSize <= arena->committed);
@ -192,15 +190,12 @@ static Bool VMArenaCheck(VMArena vmArena)
static Res VMArenaDescribe(Arena arena, mps_lib_FILE *stream, Count depth)
{
Res res;
VMArena vmArena;
VMArena vmArena = CouldBeA(VMArena, arena);
if (!TESTT(Arena, arena))
return ResFAIL;
if (!TESTC(VMArena, vmArena))
return ResPARAM;
if (stream == NULL)
return ResFAIL;
vmArena = Arena2VMArena(arena);
if (!TESTT(VMArena, vmArena))
return ResFAIL;
return ResPARAM;
/* Describe the superclass fields first via next-method call */
/* ...but the next method is ArenaTrivDescribe, so don't call it;
@ -233,14 +228,12 @@ static Res VMArenaDescribe(Arena arena, mps_lib_FILE *stream, Count depth)
*/
static Res vmArenaMap(VMArena vmArena, VM vm, Addr base, Addr limit)
{
Arena arena;
Size size;
Arena arena = MustBeA(AbstractArena, vmArena);
Size size = AddrOffset(base, limit);
Res res;
/* no checking as function is local to module */
arena = VMArena2Arena(vmArena);
size = AddrOffset(base, limit);
/* committed can't overflow (since we can't commit more memory than */
/* address space), but we're paranoid. */
AVER(arena->committed < arena->committed + size);
@ -258,18 +251,14 @@ static Res vmArenaMap(VMArena vmArena, VM vm, Addr base, Addr limit)
static void vmArenaUnmap(VMArena vmArena, VM vm, Addr base, Addr limit)
{
Arena arena;
Size size;
Arena arena = MustBeA(AbstractArena, vmArena);
Size size = AddrOffset(base, limit);
/* no checking as function is local to module */
arena = VMArena2Arena(vmArena);
size = AddrOffset(base, limit);
AVER(size <= arena->committed);
VMUnmap(vm, base, limit);
arena->committed -= size;
return;
}
@ -281,7 +270,7 @@ static void vmArenaUnmap(VMArena vmArena, VM vm, Addr base, Addr limit)
*/
static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size)
{
Arena arena;
Arena arena = MustBeA(AbstractArena, vmArena);
Res res;
Addr base, limit, chunkStructLimit;
VMStruct vmStruct;
@ -293,7 +282,6 @@ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size)
AVER(chunkReturn != NULL);
AVERT(VMArena, vmArena);
arena = VMArena2Arena(vmArena);
AVER(size > 0);
res = VMInit(vm, size, ArenaGrainSize(arena), vmArena->vmParams);
@ -552,7 +540,7 @@ static Res VMArenaCreate(Arena *arenaReturn, ArgList args)
goto failVMMap;
vmArena = (VMArena)VMBase(vm);
arena = VMArena2Arena(vmArena);
arena = CouldBeA(AbstractArena, vmArena);
res = NextMethod(Arena, VMArena, init)(arena, grainSize, args);
if (res != ResOK)
@ -622,12 +610,9 @@ failVMInit:
static void VMArenaDestroy(Arena arena)
{
VMArena vmArena = MustBeA(VMArena, arena);
VMStruct vmStruct;
VM vm = &vmStruct;
VMArena vmArena;
vmArena = Arena2VMArena(arena);
AVERT(VMArena, vmArena);
EVENT1(ArenaDestroy, vmArena);
@ -690,14 +675,10 @@ static Size vmArenaChunkSize(VMArena vmArena, Size size)
*/
static Res VMArenaGrow(Arena arena, LocusPref pref, Size size)
{
VMArena vmArena = MustBeA(VMArena, arena);
Chunk newChunk;
Size chunkSize;
Res res;
VMArena vmArena;
AVERT(Arena, arena);
vmArena = Arena2VMArena(arena);
AVERT(VMArena, vmArena);
/* TODO: Ensure that extended arena will be able to satisfy pref. */
AVERT(LocusPref, pref);
@ -705,8 +686,7 @@ static Res VMArenaGrow(Arena arena, LocusPref pref, Size size)
chunkSize = vmArenaChunkSize(vmArena, size);
EVENT3(vmArenaExtendStart, size, chunkSize,
ArenaReserved(VMArena2Arena(vmArena)));
EVENT3(vmArenaExtendStart, size, chunkSize, ArenaReserved(arena));
/* .chunk-create.fail: If we fail, try again with a smaller size */
{
@ -729,8 +709,7 @@ static Res VMArenaGrow(Arena arena, LocusPref pref, Size size)
/* remove slices, down to chunkHalf but no further */
for(; chunkSize > chunkHalf; chunkSize -= sliceSize) {
if(chunkSize < chunkMin) {
EVENT2(vmArenaExtendFail, chunkMin,
ArenaReserved(VMArena2Arena(vmArena)));
EVENT2(vmArenaExtendFail, chunkMin, ArenaReserved(arena));
return res;
}
res = VMChunkCreate(&newChunk, vmArena, chunkSize);
@ -741,8 +720,8 @@ static Res VMArenaGrow(Arena arena, LocusPref pref, Size size)
}
vmArenaGrow_Done:
EVENT2(vmArenaExtendDone, chunkSize, ArenaReserved(VMArena2Arena(vmArena)));
vmArena->extended(VMArena2Arena(vmArena),
EVENT2(vmArenaExtendDone, chunkSize, ArenaReserved(arena));
vmArena->extended(arena,
newChunk->base,
AddrOffset(newChunk->base, newChunk->limit));
@ -787,7 +766,7 @@ static void sparePageRelease(VMChunk vmChunk, Index pi)
static Res pageDescMap(VMChunk vmChunk, Index basePI, Index limitPI)
{
Size before = VMMapped(VMChunkVM(vmChunk));
Arena arena = VMArena2Arena(VMChunkVMArena(vmChunk));
Arena arena = MustBeA(AbstractArena, VMChunkVMArena(vmChunk));
Res res = SparseArrayMap(&vmChunk->pages, basePI, limitPI);
Size after = VMMapped(VMChunkVM(vmChunk));
AVER(before <= after);
@ -799,7 +778,7 @@ static void pageDescUnmap(VMChunk vmChunk, Index basePI, Index limitPI)
{
Size size, after;
Size before = VMMapped(VMChunkVM(vmChunk));
Arena arena = VMArena2Arena(VMChunkVMArena(vmChunk));
Arena arena = MustBeA(AbstractArena, VMChunkVMArena(vmChunk));
SparseArrayUnmap(&vmChunk->pages, basePI, limitPI);
after = VMMapped(VMChunkVM(vmChunk));
AVER(after <= before);
@ -873,6 +852,7 @@ static Res VMPagesMarkAllocated(Arena arena, Chunk chunk,
Index baseIndex, Count pages, Pool pool)
{
Res res;
VMArena vmArena = MustBeA(VMArena, arena);
AVERT(Arena, arena);
AVERT(Chunk, chunk);
@ -881,7 +861,7 @@ static Res VMPagesMarkAllocated(Arena arena, Chunk chunk,
AVER(baseIndex + pages <= chunk->pages);
AVERT(Pool, pool);
res = pagesMarkAllocated(Arena2VMArena(arena),
res = pagesMarkAllocated(vmArena,
Chunk2VMChunk(chunk),
baseIndex,
pages,
@ -895,7 +875,7 @@ static Res VMPagesMarkAllocated(Arena arena, Chunk chunk,
success if we have enough spare pages. */
if (VMPurgeSpare(arena, pages * ChunkPageSize(chunk)) == 0)
break;
res = pagesMarkAllocated(Arena2VMArena(arena),
res = pagesMarkAllocated(vmArena,
Chunk2VMChunk(chunk),
baseIndex,
pages,
@ -971,13 +951,10 @@ static Size chunkUnmapAroundPage(Chunk chunk, Size size, Page page)
static Size arenaUnmapSpare(Arena arena, Size size, Chunk filter)
{
VMArena vmArena = MustBeA(VMArena, arena);
Ring node;
Size purged = 0;
VMArena vmArena;
AVERT(Arena, arena);
vmArena = Arena2VMArena(arena);
AVERT(VMArena, vmArena);
if (filter != NULL)
AVERT(Chunk, filter);
@ -1039,9 +1016,7 @@ static void VMFree(Addr base, Size size, Pool pool)
AVER(size > (Size)0);
AVERT(Pool, pool);
arena = PoolArena(pool);
AVERT(Arena, arena);
vmArena = Arena2VMArena(arena);
AVERT(VMArena, vmArena);
vmArena = MustBeA(VMArena, arena);
/* All chunks have same pageSize. */
AVER(SizeIsAligned(size, ChunkPageSize(arena->primary)));
@ -1097,13 +1072,10 @@ static Bool vmChunkCompact(Tree tree, void *closure)
{
Chunk chunk;
Arena arena = closure;
VMArena vmArena;
VMArena vmArena = MustBeA(VMArena, arena);
AVERT(Tree, tree);
AVERT(Arena, arena);
vmArena = Arena2VMArena(arena);
AVERT(VMArena, vmArena);
chunk = ChunkOfTree(tree);
AVERT(Chunk, chunk);
if(chunk != arena->primary
@ -1125,11 +1097,8 @@ static Bool vmChunkCompact(Tree tree, void *closure)
static void VMCompact(Arena arena, Trace trace)
{
VMArena vmArena;
Size vmem1;
vmArena = Arena2VMArena(arena);
AVERT(VMArena, vmArena);
AVERT(Trace, trace);
vmem1 = ArenaReserved(arena);
@ -1165,8 +1134,7 @@ mps_res_t mps_arena_vm_growth(mps_arena_t mps_arena,
ArenaEnter(arena);
AVERT(Arena, arena);
vmArena = Arena2VMArena(arena);
AVERT(VMArena, vmArena);
vmArena = MustBeA(VMArena, arena);
/* Must desire at least the minimum increment! */
AVER(desired >= minimum);
@ -1210,7 +1178,7 @@ mps_arena_class_t mps_arena_class_vm(void)
/* C. COPYRIGHT AND LICENSE
*
* Copyright (C) 2001-2014 Ravenbrook Limited <http://www.ravenbrook.com/>.
* Copyright (C) 2001-2016 Ravenbrook Limited <http://www.ravenbrook.com/>.
* All rights reserved. This is an open source license. Contact
* Ravenbrook for commercial licensing options.
*