mirror of
git://git.sv.gnu.org/emacs.git
synced 2026-02-04 14:40:54 -08:00
New function seghasbuffer.
Copied from Perforce Change: 191537 ServerID: perforce.ravenbrook.com
This commit is contained in:
parent
36b227c0c7
commit
8e518349f4
8 changed files with 27 additions and 18 deletions
|
|
@ -1195,7 +1195,7 @@ static void segBufAttach(Buffer buffer, Addr base, Addr limit,
|
|||
found = SegOfAddr(&seg, arena, base);
|
||||
AVER(found);
|
||||
AVER(segbuf->seg == NULL);
|
||||
AVER(SegBuffer(seg) == NULL);
|
||||
AVER(!SegHasBuffer(seg));
|
||||
AVER(SegBase(seg) <= base);
|
||||
AVER(limit <= SegLimit(seg));
|
||||
|
||||
|
|
|
|||
|
|
@ -693,6 +693,7 @@ extern Res SegMerge(Seg *mergedSegReturn, Seg segLo, Seg segHi);
|
|||
extern Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at);
|
||||
extern Res SegDescribe(Seg seg, mps_lib_FILE *stream, Count depth);
|
||||
extern void SegSetSummary(Seg seg, RefSet summary);
|
||||
extern Bool SegHasBuffer(Seg seg);
|
||||
extern Buffer SegBuffer(Seg seg);
|
||||
extern void SegSetBuffer(Seg seg, Buffer buffer);
|
||||
extern Addr SegBufferScanLimit(Seg seg);
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ static Res AMCSegDescribe(Seg seg, mps_lib_FILE *stream, Count depth)
|
|||
if(res != ResOK)
|
||||
return res;
|
||||
|
||||
if(SegBuffer(seg) != NULL)
|
||||
if(SegHasBuffer(seg))
|
||||
init = BufferGetInit(SegBuffer(seg));
|
||||
else
|
||||
init = limit;
|
||||
|
|
@ -1303,7 +1303,7 @@ static Res amcScanNailedOnce(Bool *totalReturn, Bool *moreReturn,
|
|||
NailboardClearNewNails(board);
|
||||
|
||||
p = SegBase(seg);
|
||||
while(SegBuffer(seg) != NULL) {
|
||||
while (SegHasBuffer(seg)) {
|
||||
limit = BufferScanLimit(SegBuffer(seg));
|
||||
if(p >= limit) {
|
||||
AVER(p == limit);
|
||||
|
|
@ -1406,7 +1406,7 @@ static Res AMCScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
|
|||
|
||||
base = AddrAdd(SegBase(seg), format->headerSize);
|
||||
/* <design/poolamc/#seg-scan.loop> */
|
||||
while(SegBuffer(seg) != NULL) {
|
||||
while (SegHasBuffer(seg)) {
|
||||
limit = AddrAdd(BufferScanLimit(SegBuffer(seg)),
|
||||
format->headerSize);
|
||||
if(base >= limit) {
|
||||
|
|
@ -1773,13 +1773,13 @@ static void amcReclaimNailed(Pool pool, Trace trace, Seg seg)
|
|||
|
||||
/* Free the seg if we can; fixes .nailboard.limitations.middle. */
|
||||
if(preservedInPlaceCount == 0
|
||||
&& (SegBuffer(seg) == NULL)
|
||||
&& (!SegHasBuffer(seg))
|
||||
&& (SegNailed(seg) == TraceSetEMPTY)) {
|
||||
|
||||
amcGen gen = amcSegGen(seg);
|
||||
|
||||
/* We may not free a buffered seg. */
|
||||
AVER(SegBuffer(seg) == NULL);
|
||||
AVER(!SegHasBuffer(seg));
|
||||
|
||||
PoolGenFree(&gen->pgen, seg, 0, SegSize(seg), 0, Seg2amcSeg(seg)->deferred);
|
||||
}
|
||||
|
|
@ -1824,7 +1824,7 @@ static void AMCReclaim(Pool pool, Trace trace, Seg seg)
|
|||
|
||||
/* We may not free a buffered seg. (But all buffered + condemned */
|
||||
/* segs should have been nailed anyway). */
|
||||
AVER(SegBuffer(seg) == NULL);
|
||||
AVER(!SegHasBuffer(seg));
|
||||
|
||||
STATISTIC(trace->reclaimSize += SegSize(seg));
|
||||
|
||||
|
|
@ -1954,7 +1954,7 @@ static Res AMCAddrObject(Addr *pReturn, Pool pool, Seg seg, Addr addr)
|
|||
|
||||
arena = PoolArena(pool);
|
||||
base = SegBase(seg);
|
||||
if (SegBuffer(seg) != NULL) {
|
||||
if (SegHasBuffer(seg)) {
|
||||
/* We use BufferGetInit here (and not BufferScanLimit) because we
|
||||
* want to be able to find objects that have been allocated and
|
||||
* committed since the last flip. These objects lie between the
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ static void AMSSegFinish(Seg seg)
|
|||
ams = amsseg->ams;
|
||||
AVERT(AMS, ams);
|
||||
arena = PoolArena(AMSPool(ams));
|
||||
AVER(SegBuffer(seg) == NULL);
|
||||
AVER(!SegHasBuffer(seg));
|
||||
|
||||
/* keep the destructions in step with AMSSegInit failure cases */
|
||||
amsDestroyTables(ams, amsseg->allocTable, amsseg->nongreyTable,
|
||||
|
|
@ -975,7 +975,7 @@ static Res AMSBufferFill(Addr *baseReturn, Addr *limitReturn,
|
|||
seg = AMSSeg2Seg(amsseg);
|
||||
|
||||
if (SegRankSet(seg) == rankSet
|
||||
&& SegBuffer(seg) == NULL
|
||||
&& !SegHasBuffer(seg)
|
||||
/* Can't use a white or grey segment, see d.m.p.fill.colour. */
|
||||
&& SegWhite(seg) == TraceSetEMPTY
|
||||
&& SegGrey(seg) == TraceSetEMPTY)
|
||||
|
|
@ -1637,7 +1637,7 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg)
|
|||
amsseg->colourTablesInUse = FALSE;
|
||||
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
|
||||
|
||||
if (amsseg->freeGrains == grains && SegBuffer(seg) == NULL)
|
||||
if (amsseg->freeGrains == grains && !SegHasBuffer(seg))
|
||||
/* No survivors */
|
||||
PoolGenFree(&ams->pgen, seg,
|
||||
AMSGrainsSize(ams, amsseg->freeGrains),
|
||||
|
|
|
|||
|
|
@ -658,7 +658,7 @@ static Res AWLBufferFill(Addr *baseReturn, Addr *limitReturn,
|
|||
|
||||
/* Only try to allocate in the segment if it is not already */
|
||||
/* buffered, and has the same ranks as the buffer. */
|
||||
if (SegBuffer(seg) == NULL
|
||||
if (!SegHasBuffer(seg)
|
||||
&& SegRankSet(seg) == BufferRankSet(buffer)
|
||||
&& AWLGrainsSize(awl, awlseg->freeGrains) >= size
|
||||
&& AWLSegAlloc(&base, &limit, awlseg, awl, size))
|
||||
|
|
@ -833,7 +833,7 @@ static void AWLGrey(Pool pool, Trace trace, Seg seg)
|
|||
AVERT(AWLSeg, awlseg);
|
||||
|
||||
SegSetGrey(seg, TraceSetAdd(SegGrey(seg), trace));
|
||||
if (SegBuffer(seg) != NULL) {
|
||||
if (SegHasBuffer(seg)) {
|
||||
Addr base = SegBase(seg);
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
|
||||
|
|
@ -1259,7 +1259,7 @@ static void AWLWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
|||
Addr next;
|
||||
Index i;
|
||||
|
||||
if (SegBuffer(seg) != NULL) {
|
||||
if (SegHasBuffer(seg)) {
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
if (object == BufferScanLimit(buffer)
|
||||
&& BufferScanLimit(buffer) != BufferLimit(buffer)) {
|
||||
|
|
|
|||
|
|
@ -251,7 +251,7 @@ static Bool loSegFindFree(Addr *bReturn, Addr *lReturn,
|
|||
AVER(agrains <= loseg->freeGrains);
|
||||
AVER(size <= SegSize(seg));
|
||||
|
||||
if(SegBuffer(seg) != NULL)
|
||||
if (SegHasBuffer(seg))
|
||||
/* Don't bother trying to allocate from a buffered segment */
|
||||
return FALSE;
|
||||
|
||||
|
|
@ -429,7 +429,7 @@ static void LOWalk(Pool pool, Seg seg, FormattedObjectsVisitor f,
|
|||
Addr next;
|
||||
Index j;
|
||||
|
||||
if(SegBuffer(seg) != NULL) {
|
||||
if (SegHasBuffer(seg)) {
|
||||
Buffer buffer = SegBuffer(seg);
|
||||
if(object == BufferScanLimit(buffer) &&
|
||||
BufferScanLimit(buffer) != BufferLimit(buffer)) {
|
||||
|
|
|
|||
|
|
@ -328,6 +328,14 @@ void SegSetRankAndSummary(Seg seg, RankSet rankSet, RefSet summary)
|
|||
}
|
||||
|
||||
|
||||
/* SegHasBuffer -- segment has a buffer? */
|
||||
|
||||
Bool SegHasBuffer(Seg seg)
|
||||
{
|
||||
return SegBuffer(seg) != NULL;
|
||||
}
|
||||
|
||||
|
||||
/* SegBuffer -- return the buffer of a segment */
|
||||
|
||||
Buffer SegBuffer(Seg seg)
|
||||
|
|
@ -640,7 +648,7 @@ Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at)
|
|||
|
||||
/* Can only split a buffered segment if the entire buffer is below
|
||||
* the split point. */
|
||||
AVER(SegBuffer(seg) == NULL || BufferLimit(SegBuffer(seg)) <= at);
|
||||
AVER(!SegHasBuffer(seg) || BufferLimit(SegBuffer(seg)) <= at);
|
||||
|
||||
if (seg->queued)
|
||||
ShieldFlush(arena); /* see <design/seg/#split-merge.shield> */
|
||||
|
|
|
|||
|
|
@ -551,7 +551,7 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn,
|
|||
if (SegLimit(seg) == limit && SegBase(seg) == base) {
|
||||
if (amstseg->prev != NULL) {
|
||||
Seg segLo = AMSTSeg2Seg(amstseg->prev);
|
||||
if (SegBuffer(segLo) == NULL &&
|
||||
if (!SegHasBuffer(segLo) &&
|
||||
SegGrey(segLo) == SegGrey(seg) &&
|
||||
SegWhite(segLo) == SegWhite(seg)) {
|
||||
/* .merge */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue