1
Fork 0
mirror of git://git.sv.gnu.org/emacs.git synced 2026-01-26 23:20:29 -08:00

Change.dylan.cottonwood.170544

Copied from Perforce
 Change: 19037
 ServerID: perforce.ravenbrook.com
This commit is contained in:
Gavin Matthews 1997-12-01 17:23:22 +00:00
parent 38f5117ee5
commit 0a6ba2ed34
4 changed files with 106 additions and 58 deletions

View file

@ -1,6 +1,6 @@
/* impl.c.arena: ARENA IMPLEMENTATION
*
* $HopeName: MMsrc!arena.c(trunk.11) $
* $HopeName: MMsrc!arena.c(trunk.12) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* .readership: Any MPS developer
@ -40,7 +40,7 @@
/* finalization */
#include "poolmrg.h"
SRCID(arena, "$HopeName: MMsrc!arena.c(trunk.11) $");
SRCID(arena, "$HopeName: MMsrc!arena.c(trunk.12) $");
/* All static data objects are declared here. See .static */
@ -900,6 +900,10 @@ Res ArenaRetract(Arena arena, Addr base, Size size)
return ResOK;
}
/* These segment functions are low-level and used through-out.
* They are therefore on the critical path and their AVERs are
* so-marked.
*/
/* SegBase -- return the base address of a segment */
@ -907,9 +911,9 @@ Addr SegBase(Seg seg)
{
Arena arena;
AVERT(Seg, seg);
AVERT_CRITICAL(Seg, seg);
arena = SegArena(seg);
AVERT(Arena, arena);
AVERT_CRITICAL(Arena, arena);
return (*arena->class->segBase)(seg);
}
@ -919,9 +923,9 @@ Addr SegBase(Seg seg)
Addr SegLimit(Seg seg)
{
Arena arena;
AVERT(Seg, seg);
AVERT_CRITICAL(Seg, seg);
arena = SegArena(seg);
AVERT(Arena, arena);
AVERT_CRITICAL(Arena, arena);
return (*arena->class->segLimit)(seg);
}
@ -931,9 +935,9 @@ Addr SegLimit(Seg seg)
Size SegSize(Seg seg)
{
Arena arena;
AVERT(Seg, seg);
AVERT_CRITICAL(Seg, seg);
arena = SegArena(seg);
AVERT(Arena, arena);
AVERT_CRITICAL(Arena, arena);
return (*arena->class->segSize)(seg);
}
@ -975,8 +979,8 @@ Bool SegFirst(Seg *segReturn, Arena arena)
Bool SegNext(Seg *segReturn, Arena arena, Addr addr)
{
AVER(segReturn != NULL);
AVERT(Arena, arena);
AVER_CRITICAL(segReturn != NULL);
AVERT_CRITICAL(Arena, arena);
return (*arena->class->segNext)(segReturn, arena, addr);
}

View file

@ -1,6 +1,6 @@
/* impl.c.arenavm: VIRTUAL MEMORY BASED ARENA IMPLEMENTATION
*
* $HopeName: MMsrc!arenavm.c(trunk.30) $
* $HopeName: MMsrc!arenavm.c(trunk.31) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* This is the implementation of the Segment abstraction from the VM
@ -29,7 +29,7 @@
#include "mpm.h"
#include "mpsavm.h"
SRCID(arenavm, "$HopeName: MMsrc!arenavm.c(trunk.30) $");
SRCID(arenavm, "$HopeName: MMsrc!arenavm.c(trunk.31) $");
typedef struct VMArenaStruct *VMArena;
@ -719,6 +719,9 @@ static void VMSegFree(Seg seg)
EVENT_PP(SegFree, vmArena, seg);
}
/* These Seg functions are low-level and are on the critical path in
* various ways. The more common therefore use AVER_CRITICAL
*/
/* VMSegBase -- return the base address of a segment
*
@ -733,10 +736,10 @@ static Addr VMSegBase(Seg seg)
Page page;
Index i;
AVERT(Seg, seg);
AVERT_CRITICAL(Seg, seg);
vmArena = SegVMArena(seg);
AVERT(VMArena, vmArena);
AVERT_CRITICAL(VMArena, vmArena);
page = PageOfSeg(seg);
i = page - vmArena->pageTable;
@ -757,10 +760,10 @@ static Addr VMSegLimit(Seg seg)
VMArena vmArena;
Page page;
AVERT(Seg, seg);
AVERT_CRITICAL(Seg, seg);
vmArena = SegVMArena(seg);
AVERT(VMArena, vmArena);
AVERT_CRITICAL(VMArena, vmArena);
if(SegSingle(seg)) {
return AddrAdd(VMSegBase(seg), vmArena->pageSize);
@ -779,7 +782,7 @@ static Addr VMSegLimit(Seg seg)
static Size VMSegSize(Seg seg)
{
AVERT(Seg, seg);
AVERT_CRITICAL(Seg, seg);
return AddrOffset(VMSegBase(seg), VMSegLimit(seg));
}
@ -890,16 +893,16 @@ static Bool VMSegNext(Seg *segReturn, Arena arena, Addr addr)
VMArena vmArena;
Index i;
AVER(segReturn != NULL);
AVER_CRITICAL(segReturn != NULL);
vmArena = ArenaVMArena(arena);
AVERT(VMArena, vmArena);
AVER(AddrIsAligned(addr, arena->alignment));
AVERT_CRITICAL(VMArena, vmArena);
AVER_CRITICAL(AddrIsAligned(addr, arena->alignment));
i = indexOfAddr(vmArena, addr);
/* There are fewer pages than addresses, therefore the */
/* page index can never wrap around */
AVER(i+1 != 0);
AVER_CRITICAL(i+1 != 0);
return segSearch(segReturn, vmArena, i + 1);
}

View file

@ -1,6 +1,6 @@
/* impl.c.bt: BIT TABLES
*
* $HopeName: MMsrc!bt.c(trunk.10) $
* $HopeName: MMsrc!bt.c(trunk.11) $
* Copyright (C) 1997 Harlequin Group, all rights reserved
*
* READERSHIP
@ -19,7 +19,7 @@
#include "mpm.h"
SRCID(bt, "$HopeName: MMsrc!bt.c(trunk.10) $");
SRCID(bt, "$HopeName: MMsrc!bt.c(trunk.11) $");
/* is the whole word of bits at this index set? */
@ -190,14 +190,37 @@ Bool BTIsSetRange(BT bt, Index base, Index limit)
/* design.mps.bt.fun.res-range */
void BTResRange(BT t, Index i, Index j)
void BTResRange(BT t, Index base, Index limit)
{
AVER(BTCheck(t));
AVER(i < j);
Index bitIndex, innerBase, innerLimit;
while(i < j) {
BTRes(t, i);
++i;
AVER(BTCheck(t));
AVER(base < limit);
/* We determine the maximal inner range that has word-aligned */
/* base and limit. We then reset the lead and trailing bits as */
/* bits, and the rest as words. */
innerBase = BTIndexAlignUp(base);
innerLimit = BTIndexAlignDown(limit);
if(innerBase >= innerLimit) { /* no inner range */
for(bitIndex = base; bitIndex < limit; bitIndex++)
BTRes(t, bitIndex);
} else {
Index wordIndex, wordBase, wordLimit;
wordBase = innerBase >> MPS_WORD_SHIFT;
wordLimit = innerLimit >> MPS_WORD_SHIFT;
for(bitIndex = base; bitIndex < innerBase; bitIndex++)
BTRes(t, bitIndex);
for(wordIndex = wordBase; wordIndex < wordLimit; wordIndex++)
t[wordIndex] = (Word)0;
for(bitIndex = innerLimit; bitIndex < limit; bitIndex++)
BTRes(t, bitIndex);
}
}
@ -392,13 +415,43 @@ Bool BTRangesSame(BT BTx, BT BTy, Index base, Index limit)
void BTCopyInvertRange(BT fromBT, BT toBT, Index base, Index limit)
{
Index i = base;
while(i < limit) {
if (BTGet(fromBT, i))
BTRes(toBT,i);
else
BTSet(toBT,i);
++ i;
Index bitIndex, innerBase, innerLimit;
/* We determine the maximal inner range that has word-aligned */
/* base and limit. We then copy the lead and trailing bits as */
/* bits, and the rest as words. */
innerBase = BTIndexAlignUp(base);
innerLimit = BTIndexAlignDown(limit);
if(innerBase >= innerLimit) { /* no inner range */
for(bitIndex = base; bitIndex < limit; bitIndex++)
if(BTGet(fromBT, bitIndex))
BTRes(toBT, bitIndex);
else
BTSet(toBT, bitIndex);
} else {
Index wordIndex, wordBase, wordLimit;
wordBase = innerBase >> MPS_WORD_SHIFT;
wordLimit = innerLimit >> MPS_WORD_SHIFT;
for(bitIndex = base; bitIndex < innerBase; bitIndex++) {
if (BTGet(fromBT, bitIndex))
BTRes(toBT, bitIndex);
else
BTSet(toBT, bitIndex);
}
for(wordIndex = wordBase; wordIndex < wordLimit; wordIndex++)
toBT[wordIndex] = ~fromBT[wordIndex];
for(bitIndex = innerLimit; bitIndex < limit; bitIndex++) {
if(BTGet(fromBT, bitIndex))
BTRes(toBT, bitIndex);
else
BTSet(toBT, bitIndex);
}
}
}

View file

@ -1,6 +1,6 @@
/* impl.c.poolawl: AUTOMATIC WEAK LINKED POOL CLASS
*
* $HopeName: MMsrc!poolawl.c(trunk.23) $
* $HopeName: MMsrc!poolawl.c(trunk.24) $
* Copyright (C) 1997 The Harlequin Group Limited. All rights reserved.
*
* READERSHIP
@ -16,7 +16,7 @@
#include "mpm.h"
#include "mpscawl.h"
SRCID(poolawl, "$HopeName: MMsrc!poolawl.c(trunk.23) $");
SRCID(poolawl, "$HopeName: MMsrc!poolawl.c(trunk.24) $");
#define AWLSig ((Sig)0x519b7a37) /* SIGPooLAWL */
@ -41,6 +41,7 @@ typedef struct AWLGroupStruct {
BT scanned;
BT alloc;
Count grains;
Count free; /* number of free grains */
} AWLGroupStruct, *AWLGroup;
@ -147,6 +148,7 @@ static Res AWLGroupCreate(AWLGroup *groupReturn,
SegSetSummary(seg, RefSetUNIV);
SegSetP(seg, group);
group->seg = seg;
group->free = bits;
group->sig = AWLGroupSig;
AVERT(AWLGroup, group);
*groupReturn = group;
@ -170,17 +172,13 @@ static Bool AWLGroupAlloc(Addr *baseReturn, Addr *limitReturn,
{
Count n; /* number of grains equivalent to alloc size */
Index i, j;
Arena arena;
AVER(baseReturn != NULL);
AVER(limitReturn != NULL);
AVERT(AWLGroup, group);
AVERT(AWL, awl);
AVER(size > 0);
arena = PoolArena(&awl->poolStruct);
AVERT(Arena, arena);
AVER(size << awl->alignShift >= size);
if(size > SegSize(group->seg)) {
return FALSE;
@ -267,7 +265,6 @@ static Res AWLBufferFill(Seg *segReturn, Addr *baseReturn, Addr *limitReturn,
AWLGroup group;
AWL awl;
Res res;
Arena arena;
Ring node, nextNode;
AVER(segReturn != NULL);
@ -277,8 +274,6 @@ static Res AWLBufferFill(Seg *segReturn, Addr *baseReturn, Addr *limitReturn,
AVERT(Buffer, buffer);
AVER(size > 0);
arena = PoolArena(pool);
awl = PoolPoolAWL(pool);
AVERT(AWL, awl);
@ -294,7 +289,8 @@ static Res AWLBufferFill(Seg *segReturn, Addr *baseReturn, Addr *limitReturn,
/* buffered, and has the same ranks as the buffer. */
if(SegBuffer(seg) == NULL &&
SegRankSet(seg) == BufferRankSet(buffer))
if(AWLGroupAlloc(&base, &limit, group, awl, size))
if(group->free << awl->alignShift >= size &&
AWLGroupAlloc(&base, &limit, group, awl, size))
goto found;
}
@ -346,6 +342,7 @@ static void AWLBufferEmpty(Pool pool, Buffer buffer)
AVER(i <= j);
if(i < j) {
BTResRange(group->alloc, i, j);
group->free += j - i;
}
}
@ -558,7 +555,6 @@ static Res awlScanSinglePass(Bool *anyScannedReturn,
static Res AWLScan(ScanState ss, Pool pool, Seg seg)
{
Arena arena;
AWL awl;
AWLGroup group;
Bool anyScanned;
@ -575,8 +571,6 @@ static Res AWLScan(ScanState ss, Pool pool, Seg seg)
awl = PoolPoolAWL(pool);
AVERT(AWL, awl);
arena = PoolArena(pool);
/* If the scanner isn't going to scan all the objects then the */
/* summary of the unscanned objects must be added into the scan */
/* state summary, so that it's a valid summary of the entire */
@ -608,7 +602,6 @@ static Res AWLFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
Index i;
AWL awl;
AWLGroup group;
Arena arena;
AVERT(Pool, pool);
AVERT(ScanState, ss);
@ -621,9 +614,6 @@ static Res AWLFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
group = (AWLGroup)SegP(seg);
AVERT(AWLGroup, group);
arena = PoolArena(pool);
AVERT(Arena, arena);
ref = *refIO;
i = AddrOffset(SegBase(seg), ref) >> awl->alignShift;
@ -666,7 +656,6 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
AWL awl;
AWLGroup group;
Index i;
Arena arena;
AVERT(Pool, pool);
AVERT(Trace, trace);
@ -677,9 +666,6 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
group = (AWLGroup)SegP(seg);
AVERT(AWLGroup, group);
arena = PoolArena(pool);
AVERT(Arena, arena);
base = SegBase(seg);
i = 0;
@ -711,6 +697,7 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
BTResRange(group->mark, i, j);
BTSetRange(group->scanned, i, j);
BTResRange(group->alloc, i, j);
group->free += j - i;
}
i = j;
}
@ -807,5 +794,6 @@ static Bool AWLGroupCheck(AWLGroup group)
CHECKL(group->alloc != NULL);
/* Can't do any real check on ->grains */
CHECKL(group->grains > 0);
CHECKL(group->free <= group->grains);
return TRUE;
}