mirror of
git://git.sv.gnu.org/emacs.git
synced 2026-01-07 04:10:27 -08:00
Undo seghasbuffer change, to simplify the diffs.
Copied from Perforce Change: 185890 ServerID: perforce.ravenbrook.com
This commit is contained in:
parent
36d731c149
commit
82db47ceac
8 changed files with 41 additions and 39 deletions
|
|
@ -1333,7 +1333,7 @@ static void segBufAttach(Buffer buffer, Addr base, Addr limit,
|
|||
found = SegOfAddr(&seg, arena, base);
|
||||
AVER(found);
|
||||
AVER(segbuf->seg == NULL);
|
||||
AVER(!SegHasBuffer(seg));
|
||||
AVER(SegBuffer(seg) == NULL);
|
||||
AVER(SegBase(seg) <= base);
|
||||
AVER(limit <= SegLimit(seg));
|
||||
|
||||
|
|
|
|||
|
|
@ -708,7 +708,6 @@ extern Addr (SegLimit)(Seg seg);
|
|||
#define SegOfPoolRing(node) (RING_ELT(Seg, poolRing, (node)))
|
||||
#define SegOfGreyRing(node) (&(RING_ELT(GCSeg, greyRing, (node)) \
|
||||
->segStruct))
|
||||
#define SegHasBuffer(seg) (SegBuffer(seg) != NULL)
|
||||
#define SegSummary(seg) (((GCSeg)(seg))->summary)
|
||||
|
||||
#define SegSetPM(seg, mode) ((void)((seg)->pm = BS_BITFIELD(Access, (mode))))
|
||||
|
|
|
|||
|
|
@ -169,6 +169,7 @@ static Res AMCSegInit(Seg seg, Pool pool, Addr base, Size size,
|
|||
static void AMCSegSketch(Seg seg, char *pbSketch, size_t cbSketch)
|
||||
{
|
||||
amcSeg amcseg;
|
||||
Buffer buffer;
|
||||
|
||||
AVER(pbSketch);
|
||||
AVER(cbSketch >= 5);
|
||||
|
|
@ -196,8 +197,10 @@ static void AMCSegSketch(Seg seg, char *pbSketch, size_t cbSketch)
|
|||
pbSketch[2] = 'W'; /* White */
|
||||
}
|
||||
|
||||
if (SegHasBuffer(seg)) {
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
buffer = SegBuffer(seg);
|
||||
if(buffer == NULL) {
|
||||
pbSketch[3] = '_';
|
||||
} else {
|
||||
Bool mut = BufferIsMutator(buffer);
|
||||
Bool flipped = ((buffer->mode & BufferModeFLIPPED) != 0);
|
||||
Bool trapped = BufferIsTrapped(buffer);
|
||||
|
|
@ -220,8 +223,6 @@ static void AMCSegSketch(Seg seg, char *pbSketch, size_t cbSketch)
|
|||
} else {
|
||||
/* I don't know what's going on! */
|
||||
}
|
||||
} else {
|
||||
pbSketch[3] = '_';
|
||||
}
|
||||
|
||||
pbSketch[4] = '\0';
|
||||
|
|
@ -287,7 +288,7 @@ static Res AMCSegDescribe(Seg seg, mps_lib_FILE *stream)
|
|||
if(res != ResOK)
|
||||
return res;
|
||||
|
||||
if (SegHasBuffer(seg))
|
||||
if(SegBuffer(seg) != NULL)
|
||||
init = BufferGetInit(SegBuffer(seg));
|
||||
else
|
||||
init = limit;
|
||||
|
|
@ -669,7 +670,6 @@ static Res amcGenCreate(amcGen *genReturn, AMC amc, GenDesc gen)
|
|||
if(res != ResOK)
|
||||
goto failGenInit;
|
||||
RingInit(&amcgen->amcRing);
|
||||
amcgen->segs = 0;
|
||||
amcgen->forward = buffer;
|
||||
amcgen->sig = amcGenSig;
|
||||
|
||||
|
|
@ -1200,6 +1200,7 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
|
|||
Size condemned = 0;
|
||||
amcGen gen;
|
||||
AMC amc;
|
||||
Buffer buffer;
|
||||
amcSeg amcseg;
|
||||
Res res;
|
||||
|
||||
|
|
@ -1208,8 +1209,8 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg)
|
|||
AVERT(Seg, seg);
|
||||
amcseg = Seg2amcSeg(seg);
|
||||
|
||||
if (SegHasBuffer(seg)) {
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
buffer = SegBuffer(seg);
|
||||
if(buffer != NULL) {
|
||||
AVERT(Buffer, buffer);
|
||||
|
||||
if(!BufferIsMutator(buffer)) { /* forwarding buffer */
|
||||
|
|
@ -1380,7 +1381,7 @@ static Res amcScanNailedOnce(Bool *totalReturn, Bool *moreReturn,
|
|||
NailboardClearNewNails(board);
|
||||
|
||||
p = SegBase(seg);
|
||||
while (SegHasBuffer(seg)) {
|
||||
while(SegBuffer(seg) != NULL) {
|
||||
limit = BufferScanLimit(SegBuffer(seg));
|
||||
if(p >= limit) {
|
||||
AVER(p == limit);
|
||||
|
|
@ -1485,7 +1486,7 @@ static Res AMCScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
|
||||
base = AddrAdd(SegBase(seg), format->headerSize);
|
||||
/* <design/poolamc/#seg-scan.loop> */
|
||||
while (SegHasBuffer(seg)) {
|
||||
while(SegBuffer(seg) != NULL) {
|
||||
limit = AddrAdd(BufferScanLimit(SegBuffer(seg)),
|
||||
format->headerSize);
|
||||
if(base >= limit) {
|
||||
|
|
@ -1929,7 +1930,7 @@ static void amcReclaimNailed(Pool pool, Trace trace, Seg seg)
|
|||
headerSize = format->headerSize;
|
||||
ShieldExpose(arena, seg);
|
||||
p = SegBase(seg);
|
||||
if (SegHasBuffer(seg)) {
|
||||
if(SegBuffer(seg) != NULL) {
|
||||
limit = BufferScanLimit(SegBuffer(seg));
|
||||
} else {
|
||||
limit = SegLimit(seg);
|
||||
|
|
@ -1982,13 +1983,13 @@ static void amcReclaimNailed(Pool pool, Trace trace, Seg seg)
|
|||
|
||||
/* Free the seg if we can; fixes .nailboard.limitations.middle. */
|
||||
if(preservedInPlaceCount == 0
|
||||
&& !SegHasBuffer(seg)
|
||||
&& (SegBuffer(seg) == NULL)
|
||||
&& (SegNailed(seg) == TraceSetEMPTY)) {
|
||||
|
||||
amcGen gen = amcSegGen(seg);
|
||||
|
||||
/* We may not free a buffered seg. */
|
||||
AVER(!SegHasBuffer(seg));
|
||||
AVER(SegBuffer(seg) == NULL);
|
||||
|
||||
PoolGenReclaim(&gen->pgen, SegSize(seg), Seg2amcSeg(seg)->deferred);
|
||||
PoolGenFree(&gen->pgen, seg);
|
||||
|
|
@ -2065,7 +2066,7 @@ static void AMCReclaim(Pool pool, Trace trace, Seg seg)
|
|||
|
||||
/* We may not free a buffered seg. (But all buffered + condemned */
|
||||
/* segs should have been nailed anyway). */
|
||||
AVER(!SegHasBuffer(seg));
|
||||
AVER(SegBuffer(seg) == NULL);
|
||||
|
||||
trace->reclaimSize += SegSize(seg);
|
||||
|
||||
|
|
@ -2134,7 +2135,7 @@ static void AMCWalk(Pool pool, Seg seg, FormattedObjectsStepMethod f,
|
|||
|
||||
/* If the segment is buffered, only walk as far as the end */
|
||||
/* of the initialized objects. cf. AMCScan */
|
||||
if(SegHasBuffer(seg))
|
||||
if(SegBuffer(seg) != NULL)
|
||||
limit = BufferScanLimit(SegBuffer(seg));
|
||||
else
|
||||
limit = SegLimit(seg);
|
||||
|
|
@ -2232,7 +2233,7 @@ static Res AMCAddrObject(Addr *pReturn, Pool pool, Seg seg, Addr addr)
|
|||
|
||||
arena = PoolArena(pool);
|
||||
base = SegBase(seg);
|
||||
if (SegHasBuffer(seg)) {
|
||||
if (SegBuffer(seg) != NULL) {
|
||||
/* We use BufferGetInit here (and not BufferScanLimit) because we
|
||||
* want to be able to find objects that have been allocated and
|
||||
* committed since the last flip. These objects lie between the
|
||||
|
|
|
|||
|
|
@ -288,7 +288,7 @@ static void AMSSegFinish(Seg seg)
|
|||
ams = amsseg->ams;
|
||||
AVERT(AMS, ams);
|
||||
arena = PoolArena(AMS2Pool(ams));
|
||||
AVER(!SegHasBuffer(seg));
|
||||
AVER(SegBuffer(seg) == NULL);
|
||||
|
||||
/* keep the destructions in step with AMSSegInit failure cases */
|
||||
amsDestroyTables(ams, amsseg->allocTable, amsseg->nongreyTable,
|
||||
|
|
@ -972,7 +972,7 @@ static Res AMSBufferFill(Addr *baseReturn, Addr *limitReturn,
|
|||
seg = AMSSeg2Seg(amsseg);
|
||||
|
||||
if (SegRankSet(seg) == rankSet
|
||||
&& !SegHasBuffer(seg)
|
||||
&& SegBuffer(seg) == NULL
|
||||
/* Can't use a white or grey segment, see d.m.p.fill.colour. */
|
||||
&& SegWhite(seg) == TraceSetEMPTY
|
||||
&& SegGrey(seg) == TraceSetEMPTY)
|
||||
|
|
@ -1105,6 +1105,7 @@ static Res AMSWhiten(Pool pool, Trace trace, Seg seg)
|
|||
{
|
||||
AMS ams;
|
||||
AMSSeg amsseg;
|
||||
Buffer buffer; /* the seg's buffer, if it has one */
|
||||
Count uncondemned;
|
||||
|
||||
AVERT(Pool, pool);
|
||||
|
|
@ -1144,8 +1145,8 @@ static Res AMSWhiten(Pool pool, Trace trace, Seg seg)
|
|||
amsseg->allocTableInUse = TRUE;
|
||||
}
|
||||
|
||||
if (SegHasBuffer(seg)) { /* <design/poolams/#condemn.buffer> */
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
buffer = SegBuffer(seg);
|
||||
if (buffer != NULL) { /* <design/poolams/#condemn.buffer> */
|
||||
Index scanLimitIndex, limitIndex;
|
||||
scanLimitIndex = AMS_ADDR_INDEX(seg, BufferScanLimit(buffer));
|
||||
limitIndex = AMS_ADDR_INDEX(seg, BufferLimit(buffer));
|
||||
|
|
@ -1631,7 +1632,7 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
|
|||
amsseg->colourTablesInUse = FALSE;
|
||||
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
|
||||
|
||||
if (amsseg->freeGrains == grains && !SegHasBuffer(seg))
|
||||
if (amsseg->freeGrains == grains && SegBuffer(seg) == NULL)
|
||||
/* No survivors */
|
||||
PoolGenFree(&ams->pgen, seg);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -654,7 +654,7 @@ static Res AWLBufferFill(Addr *baseReturn, Addr *limitReturn,
|
|||
|
||||
/* Only try to allocate in the segment if it is not already */
|
||||
/* buffered, and has the same ranks as the buffer. */
|
||||
if (!SegHasBuffer(seg)
|
||||
if (SegBuffer(seg) == NULL
|
||||
&& SegRankSet(seg) == BufferRankSet(buffer)
|
||||
&& AWLGrainsSize(awl, awlseg->freeGrains) >= size
|
||||
&& AWLSegAlloc(&base, &limit, awlseg, awl, size))
|
||||
|
|
@ -749,6 +749,7 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
|
|||
{
|
||||
AWL awl;
|
||||
AWLSeg awlseg;
|
||||
Buffer buffer;
|
||||
Count uncondemned;
|
||||
|
||||
/* All parameters checked by generic PoolWhiten. */
|
||||
|
|
@ -757,17 +758,17 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
|
|||
AVERT(AWL, awl);
|
||||
awlseg = Seg2AWLSeg(seg);
|
||||
AVERT(AWLSeg, awlseg);
|
||||
buffer = SegBuffer(seg);
|
||||
|
||||
/* Can only whiten for a single trace, */
|
||||
/* see <design/poolawl/#fun.condemn> */
|
||||
AVER(SegWhite(seg) == TraceSetEMPTY);
|
||||
|
||||
if (!SegHasBuffer(seg)) {
|
||||
if(buffer == NULL) {
|
||||
awlRangeWhiten(awlseg, 0, awlseg->grains);
|
||||
uncondemned = (Count)0;
|
||||
} else {
|
||||
/* Whiten everything except the buffer. */
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
Addr base = SegBase(seg);
|
||||
Index scanLimitIndex = awlIndexOfAddr(base, awl, BufferScanLimit(buffer));
|
||||
Index limitIndex = awlIndexOfAddr(base, awl, BufferLimit(buffer));
|
||||
|
|
@ -825,7 +826,7 @@ static void AWLGrey(Pool pool, Trace trace, Seg seg)
|
|||
AVERT(AWLSeg, awlseg);
|
||||
|
||||
SegSetGrey(seg, TraceSetAdd(SegGrey(seg), trace));
|
||||
if (SegHasBuffer(seg)) {
|
||||
if (SegBuffer(seg) != NULL) {
|
||||
Addr base = SegBase(seg);
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
|
||||
|
|
@ -1130,7 +1131,7 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
|
|||
continue;
|
||||
}
|
||||
p = awlAddrOfIndex(base, awl, i);
|
||||
if (SegHasBuffer(seg)) {
|
||||
if (SegBuffer(seg) != NULL) {
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
|
||||
if(p == BufferScanLimit(buffer)
|
||||
|
|
@ -1248,7 +1249,7 @@ static void AWLWalk(Pool pool, Seg seg, FormattedObjectsStepMethod f,
|
|||
Addr next;
|
||||
Index i;
|
||||
|
||||
if (SegHasBuffer(seg)) {
|
||||
if (SegBuffer(seg) != NULL) {
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
if (object == BufferScanLimit(buffer)
|
||||
&& BufferScanLimit(buffer) != BufferLimit(buffer)) {
|
||||
|
|
|
|||
|
|
@ -252,7 +252,7 @@ static Bool loSegFindFree(Addr *bReturn, Addr *lReturn,
|
|||
AVER(agrains <= loseg->freeGrains);
|
||||
AVER(size <= SegSize(seg));
|
||||
|
||||
if (SegHasBuffer(seg))
|
||||
if (SegBuffer(seg) != NULL)
|
||||
/* Don't bother trying to allocate from a buffered segment */
|
||||
return FALSE;
|
||||
|
||||
|
|
@ -338,11 +338,11 @@ static void loSegReclaim(LOSeg loseg, Trace trace)
|
|||
*/
|
||||
p = base;
|
||||
while(p < limit) {
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
Addr q;
|
||||
Index i;
|
||||
|
||||
if (SegHasBuffer(seg)) {
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
if(buffer != NULL) {
|
||||
marked = TRUE;
|
||||
if (p == BufferScanLimit(buffer)
|
||||
&& BufferScanLimit(buffer) != BufferLimit(buffer)) {
|
||||
|
|
@ -429,7 +429,7 @@ static void LOWalk(Pool pool, Seg seg,
|
|||
Addr next;
|
||||
Index j;
|
||||
|
||||
if (SegHasBuffer(seg)) {
|
||||
if(SegBuffer(seg) != NULL) {
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
if(object == BufferScanLimit(buffer) &&
|
||||
BufferScanLimit(buffer) != BufferLimit(buffer)) {
|
||||
|
|
@ -676,6 +676,7 @@ static Res LOWhiten(Pool pool, Trace trace, Seg seg)
|
|||
{
|
||||
LO lo;
|
||||
LOSeg loseg;
|
||||
Buffer buffer;
|
||||
Count grains, uncondemned;
|
||||
|
||||
AVERT(Pool, pool);
|
||||
|
|
@ -691,8 +692,8 @@ static Res LOWhiten(Pool pool, Trace trace, Seg seg)
|
|||
grains = loSegGrains(loseg);
|
||||
|
||||
/* Whiten allocated objects; leave free areas black. */
|
||||
if (SegHasBuffer(seg)) {
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
buffer = SegBuffer(seg);
|
||||
if (buffer != NULL) {
|
||||
Addr base = SegBase(seg);
|
||||
Index scanLimitIndex = loIndexOfAddr(base, lo, BufferScanLimit(buffer));
|
||||
Index limitIndex = loIndexOfAddr(base, lo, BufferLimit(buffer));
|
||||
|
|
|
|||
|
|
@ -520,7 +520,7 @@ static Res SNCScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
|
||||
/* If the segment is buffered, only walk as far as the end */
|
||||
/* of the initialized objects. */
|
||||
if (SegHasBuffer(seg)) {
|
||||
if (SegBuffer(seg) != NULL) {
|
||||
limit = BufferScanLimit(SegBuffer(seg));
|
||||
} else {
|
||||
limit = SegLimit(seg);
|
||||
|
|
@ -648,7 +648,7 @@ static void SNCWalk(Pool pool, Seg seg, FormattedObjectsStepMethod f,
|
|||
|
||||
/* If the segment is buffered, only walk as far as the end */
|
||||
/* of the initialized objects. Cf. SNCScan. */
|
||||
if (SegHasBuffer(seg))
|
||||
if (SegBuffer(seg) != NULL)
|
||||
limit = BufferScanLimit(SegBuffer(seg));
|
||||
else
|
||||
limit = SegLimit(seg);
|
||||
|
|
|
|||
|
|
@ -334,7 +334,6 @@ static Res AMSTInit(Pool pool, ArgList args)
|
|||
Res res;
|
||||
unsigned gen = AMS_GEN_DEFAULT;
|
||||
ArgStruct arg;
|
||||
unsigned gen = AMS_GEN_DEFAULT;
|
||||
|
||||
AVERT(Pool, pool);
|
||||
AVERT(ArgList, args);
|
||||
|
|
@ -556,7 +555,7 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn,
|
|||
if (SegLimit(seg) == limit && SegBase(seg) == base) {
|
||||
if (amstseg->prev != NULL) {
|
||||
Seg segLo = AMSTSeg2Seg(amstseg->prev);
|
||||
if (!SegHasBuffer(segLo) && SegGrey(segLo) == SegGrey(seg)) {
|
||||
if (SegBuffer(segLo) == NULL && SegGrey(segLo) == SegGrey(seg)) {
|
||||
/* .merge */
|
||||
Seg mergedSeg;
|
||||
Res mres;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue