diff --git a/mps/code/arena.c b/mps/code/arena.c index 034c8d54f3b..b81e1f415bd 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -954,6 +954,7 @@ Res ArenaFreeLandInsert(Arena arena, Addr base, Addr limit) Res res; AVERT(Arena, arena); + AVER(base < limit); RangeInit(&range, base, limit); res = arenaFreeLandInsertExtend(&oldRange, arena, &range); diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c index 07f57fe65d5..e7251cb4fab 100644 --- a/mps/code/arenavm.c +++ b/mps/code/arenavm.c @@ -308,8 +308,7 @@ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size) if (res != ResOK) goto failBootInit; - /* Allocate and map the descriptor. */ - /* See .@@@@ */ + /* .overhead.chunk-struct: Allocate and map the chunk structure. */ res = BootAlloc(&p, boot, sizeof(VMChunkStruct), MPS_PF_ALIGN); if (res != ResOK) goto failChunkAlloc; @@ -361,11 +360,13 @@ static Res VMChunkInit(Chunk chunk, BootBlock boot) vmChunk = Chunk2VMChunk(chunk); AVERT(BootBlock, boot); + /* .overhead.sa-mapped: Chunk overhead for sparse array 'mapped' table. */ res = BootAlloc(&p, boot, BTSize(chunk->pages), MPS_PF_ALIGN); if (res != ResOK) goto failSaMapped; saMapped = p; + /* .overhead.sa-pages: Chunk overhead for sparse array 'pages' table. */ res = BootAlloc(&p, boot, BTSize(chunk->pageTablePages), MPS_PF_ALIGN); if (res != ResOK) goto failSaPages; @@ -373,8 +374,8 @@ static Res VMChunkInit(Chunk chunk, BootBlock boot) overheadLimit = AddrAdd(chunk->base, (Size)BootAllocated(boot)); - /* Put the page table as late as possible, as in VM systems we don't want */ - /* to map it. */ + /* .overhead.page-table: Put the page table as late as possible, as + * in VM systems we don't want to map it. */ res = BootAlloc(&p, boot, chunk->pageTablePages << chunk->pageShift, chunk->pageSize); if (res != ResOK) goto failAllocPageTable; @@ -491,6 +492,64 @@ static void vmArenaTrivContracted(Arena arena, Addr base, Size size) } +/* vmArenaChunkSize -- compute chunk size + * + * Compute the size of the smallest chunk that has size bytes of usable + * address space (that is, after all overheads are accounted for). + * + * If successful, update *chunkSizeReturn with the computed chunk size + * and return ResOK. If size is too large for a chunk, leave + * *chunkSizeReturn unchanged and return ResRESOURCE. + */ +static Res vmArenaChunkSize(Size *chunkSizeReturn, VMArena vmArena, Size size) +{ + Size grainSize; /* Arena grain size. */ + Shift grainShift; /* The corresponding Shift. */ + Count pages; /* Number of usable pages in chunk. */ + Size pageTableSize; /* Size of the page table. */ + Count pageTablePages; /* Number of pages in the page table. */ + Size chunkSize; /* Size of the chunk. */ + Size overhead; /* Total overheads for the chunk. */ + + AVER(chunkSizeReturn != NULL); + AVERT(VMArena, vmArena); + AVER(size > 0); + + grainSize = ArenaGrainSize(VMArena2Arena(vmArena)); + grainShift = SizeLog2(grainSize); + + overhead = 0; + do { + chunkSize = size + overhead; + + /* See .overhead.chunk-struct. */ + overhead = SizeAlignUp(sizeof(VMChunkStruct), MPS_PF_ALIGN); + + /* See , */ + pages = chunkSize >> grainShift; + overhead += SizeAlignUp(BTSize(pages), MPS_PF_ALIGN); + + /* See .overhead.sa-mapped. */ + overhead += SizeAlignUp(BTSize(pages), MPS_PF_ALIGN); + + /* See .overhead.sa-pages. */ + pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), grainSize); + pageTablePages = pageTableSize >> grainShift; + overhead += SizeAlignUp(BTSize(pageTablePages), MPS_PF_ALIGN); + + /* See .overhead.page-table. */ + overhead = SizeAlignUp(overhead, grainSize); + overhead += SizeAlignUp(pageTableSize, grainSize); + + if (SizeMAX - overhead < size) + return ResRESOURCE; + } while (chunkSize < size + overhead); + + *chunkSizeReturn = chunkSize; + return ResOK; +} + + /* VMArenaInit -- create and initialize the VM arena * * .arena.init: Once the arena has been allocated, we call ArenaInit @@ -589,6 +648,19 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, ArgList args) if (res != ResOK) goto failChunkCreate; +#if defined(AVER_AND_CHECK_ALL) + /* Check that the computation of the chunk size in vmArenaChunkSize + * was correct, now that we have the actual chunk for comparison. */ + { + Size usableSize, computedChunkSize; + usableSize = AddrOffset(PageIndexBase(chunk, chunk->allocBase), + chunk->limit); + res = vmArenaChunkSize(&computedChunkSize, vmArena, usableSize); + AVER(res == ResOK); + AVER(computedChunkSize == ChunkSize(chunk)); + } +#endif + /* .zoneshift: Set the zone shift to divide the chunk into the same */ /* number of stripes as will fit into a reference set (the number of */ /* bits in a word). Fail if the chunk is so small stripes are smaller */ @@ -655,42 +727,16 @@ static void VMArenaFinish(Arena arena) } -/* vmArenaChunkSize -- choose chunk size for arena extension - * - * .vmchunk.overhead: This code still lacks a proper estimate of - * the overhead required by a vmChunk for chunkStruct, page tables - * etc. For now, estimate it as 10%. RHSK 2007-12-21 - */ -static Size vmArenaChunkSize(VMArena vmArena, Size size) -{ - Size fraction = 10; /* 10% -- see .vmchunk.overhead */ - Size chunkSize; - Size chunkOverhead; - - /* 1: use extendBy, if it is big enough for size + overhead */ - chunkSize = vmArena->extendBy; - chunkOverhead = chunkSize / fraction; - if(chunkSize > size && (chunkSize - size) >= chunkOverhead) - return chunkSize; - - /* 2: use size + overhead (unless it overflows SizeMAX) */ - chunkOverhead = size / (fraction - 1); - if((SizeMAX - size) >= chunkOverhead) - return size + chunkOverhead; - - /* 3: use SizeMAX */ - return SizeMAX; -} - - /* VMArenaGrow -- Extend the arena by making a new chunk * - * The size arg specifies how much we wish to allocate after the extension. + * size specifies how much we wish to allocate after the extension. + * pref specifies the preference for the location of the allocation. */ static Res VMArenaGrow(Arena arena, LocusPref pref, Size size) { Chunk newChunk; Size chunkSize; + Size chunkMin; Res res; VMArena vmArena; @@ -702,7 +748,10 @@ static Res VMArenaGrow(Arena arena, LocusPref pref, Size size) AVERT(LocusPref, pref); UNUSED(pref); - chunkSize = vmArenaChunkSize(vmArena, size); + res = vmArenaChunkSize(&chunkMin, vmArena, size); + if (res != ResOK) + return res; + chunkSize = vmArena->extendBy; EVENT3(vmArenaExtendStart, size, chunkSize, ArenaReserved(VMArena2Arena(vmArena))); @@ -711,7 +760,6 @@ static Res VMArenaGrow(Arena arena, LocusPref pref, Size size) { unsigned fidelity = 8; /* max fraction of addr-space we may 'waste' */ Size chunkHalf; - Size chunkMin = 4 * 1024; /* typical single page */ Size sliceSize; if (vmArena->extendMin > chunkMin) diff --git a/mps/code/land.c b/mps/code/land.c index 7a2101cc673..cac8e6e91d9 100644 --- a/mps/code/land.c +++ b/mps/code/land.c @@ -198,6 +198,7 @@ Res LandInsert(Range rangeReturn, Land land, Range range) AVERT(Land, land); AVERT(Range, range); AVER(RangeIsAligned(range, land->alignment)); + AVER(!RangeIsEmpty(range)); landEnter(land); res = (*land->class->insert)(rangeReturn, land, range); diff --git a/mps/code/tract.c b/mps/code/tract.c index 9aa815f47ba..6883afb9337 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -174,6 +174,7 @@ Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved, Count pages; Shift pageShift; Size pageTableSize; + Addr allocBase; void *p; Res res; @@ -196,6 +197,7 @@ Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved, chunk->reserved = reserved; size = ChunkSize(chunk); + /* .overhead.pages: Chunk overhead for the page allocation table. */ chunk->pages = pages = size >> pageShift; res = BootAlloc(&p, boot, (size_t)BTSize(pages), MPS_PF_ALIGN); if (res != ResOK) @@ -219,12 +221,14 @@ Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved, /* Init allocTable after class init, because it might be mapped there. */ BTResRange(chunk->allocTable, 0, pages); + /* Check that there is some usable address space remaining in the chunk. */ + allocBase = PageIndexBase(chunk, chunk->allocBase); + AVER(allocBase < chunk->limit); + /* Add the chunk's free address space to the arena's freeLand, so that we can allocate from it. */ if (arena->hasFreeLand) { - res = ArenaFreeLandInsert(arena, - PageIndexBase(chunk, chunk->allocBase), - chunk->limit); + res = ArenaFreeLandInsert(arena, allocBase, chunk->limit); if (res != ResOK) goto failLandInsert; }