diff --git a/mps/code/abq.c b/mps/code/abq.c
index a0c07932836..3052f7850a6 100644
--- a/mps/code/abq.c
+++ b/mps/code/abq.c
@@ -41,8 +41,8 @@ Res ABQInit(Arena arena, ABQ abq, void *owner, Count items)
AVER(items > 0);
elements = items + 1;
-
- res = ControlAlloc(&p, arena, ABQQueueSize(elements),
+
+ res = ControlAlloc(&p, arena, ABQQueueSize(elements),
/* withReservoirPermit */ FALSE);
if (res != ResOK)
return res;
@@ -56,7 +56,7 @@ Res ABQInit(Arena arena, ABQ abq, void *owner, Count items)
METER_INIT(abq->pop, "pop", owner);
METER_INIT(abq->peek, "peek", owner);
METER_INIT(abq->delete, "delete", owner);
-
+
abq->sig = ABQSig;
AVERT(ABQ, abq);
@@ -68,7 +68,7 @@ Res ABQInit(Arena arena, ABQ abq, void *owner, Count items)
Bool ABQCheck(ABQ abq)
{
Index index;
-
+
CHECKS(ABQ, abq);
CHECKL(abq->elements > 0);
CHECKL(abq->in < abq->elements);
@@ -96,7 +96,7 @@ void ABQFinish(Arena arena, ABQ abq)
METER_EMIT(&abq->peek);
METER_EMIT(&abq->delete);
ControlFree(arena, abq->queue, ABQQueueSize(abq->elements));
-
+
abq->elements = 0;
abq->queue = NULL;
@@ -114,7 +114,7 @@ Res ABQPush(ABQ abq, CBSBlock block)
if (ABQIsFull(abq))
return ResFAIL;
-
+
abq->queue[abq->in] = block;
abq->in = ABQNextIndex(abq, abq->in);
@@ -130,7 +130,7 @@ Res ABQPop(ABQ abq, CBSBlock *blockReturn)
AVERT(ABQ, abq);
METER_ACC(abq->pop, ABQDepth(abq));
-
+
if (ABQIsEmpty(abq))
return ResFAIL;
@@ -138,7 +138,7 @@ Res ABQPop(ABQ abq, CBSBlock *blockReturn)
AVERT(CBSBlock, *blockReturn);
abq->out = ABQNextIndex(abq, abq->out);
-
+
AVERT(ABQ, abq);
return ResOK;
}
@@ -180,7 +180,7 @@ Res ABQDelete(ABQ abq, CBSBlock block)
in = abq->in;
elements = abq->elements;
queue = abq->queue;
-
+
while (index != in) {
if (queue[index] == block) {
goto found;
@@ -248,17 +248,17 @@ Res ABQDescribe(ABQ abq, mps_lib_FILE *stream)
res = METER_WRITE(abq->delete, stream);
if(res != ResOK)
return res;
-
+
res = WriteF(stream, "}\n", NULL);
if(res != ResOK)
return res;
-
+
return ResOK;
}
/* ABQIsEmpty -- Is an ABQ empty? */
-Bool ABQIsEmpty(ABQ abq)
+Bool ABQIsEmpty(ABQ abq)
{
AVERT(ABQ, abq);
@@ -279,7 +279,7 @@ Bool ABQIsFull(ABQ abq)
Count ABQDepth(ABQ abq)
{
Index out, in;
-
+
AVERT(ABQ, abq);
out = abq->out;
in = abq->in;
diff --git a/mps/code/abq.h b/mps/code/abq.h
index 5f3df736bbd..efed10402e7 100644
--- a/mps/code/abq.h
+++ b/mps/code/abq.h
@@ -51,7 +51,7 @@ typedef struct ABQStruct
METER_DECL(pop);
METER_DECL(peek);
METER_DECL(delete);
-
+
Sig sig;
} ABQStruct;
diff --git a/mps/code/abqtest.c b/mps/code/abqtest.c
index 3d932269cf8..4ce11f633df 100644
--- a/mps/code/abqtest.c
+++ b/mps/code/abqtest.c
@@ -43,7 +43,7 @@ static int deleted = 0;
typedef struct TestStruct *Test;
-typedef struct TestStruct
+typedef struct TestStruct
{
Test next;
int id;
@@ -51,7 +51,7 @@ typedef struct TestStruct
} TestStruct;
-static CBSBlock TestCBSBlock(Test t)
+static CBSBlock TestCBSBlock(Test t)
{
return &t->cbsBlockStruct;
}
@@ -79,7 +79,7 @@ static CBSBlock CreateCBSBlock(int no)
return TestCBSBlock(b);
}
-
+
static void DestroyCBSBlock(CBSBlock c)
{
@@ -89,7 +89,7 @@ static void DestroyCBSBlock(CBSBlock c)
testBlocks = b->next;
else {
Test prev;
-
+
for (prev = testBlocks; prev != 0; prev = prev->next)
if (prev->next == b) {
prev->next = b->next;
@@ -132,7 +132,7 @@ static void step(void)
default:
if (!deleted & (pushee > popee)) {
Test b;
-
+
deleted = abqRnd (pushee - popee) + popee;
for (b = testBlocks; b != NULL; b = b->next)
if (b->id == deleted)
diff --git a/mps/code/amcss.c b/mps/code/amcss.c
index 2e2f396ac9f..34983f61783 100644
--- a/mps/code/amcss.c
+++ b/mps/code/amcss.c
@@ -50,7 +50,7 @@ static void enable(mps_arena_t arena)
static void report(mps_arena_t arena)
{
mps_message_t message;
-
+
while (mps_message_get(&message, arena, mps_message_type_gc())) {
size_t live, condemned, not_condemned;
@@ -163,7 +163,7 @@ static void *test(void *arg, size_t s)
break;
}
} while(1);
-
+
report(arena);
for(r = 0; r < exactRootsCOUNT; ++r)
cdie(exactRoots[r] == objNULL ||
diff --git a/mps/code/amcsshe.c b/mps/code/amcsshe.c
index 0e5677776d3..2be9e32be89 100644
--- a/mps/code/amcsshe.c
+++ b/mps/code/amcsshe.c
@@ -51,7 +51,7 @@ static mps_word_t *tvw;
static mps_word_t dylan_make_WV(mps_word_t version, mps_word_t vb,
- mps_word_t es, mps_word_t vf)
+ mps_word_t es, mps_word_t vf)
{
/* VERSION- ... VB------ reserved ES---VF- */
return((version << (MPS_WORD_WIDTH - 8)) |
diff --git a/mps/code/amsss.c b/mps/code/amsss.c
index c8922be2d47..6cc24aba44b 100644
--- a/mps/code/amsss.c
+++ b/mps/code/amsss.c
@@ -25,7 +25,7 @@
#define exactRootsCOUNT 50
#define ambigRootsCOUNT 100
-/* This is enough for three GCs. */
+/* This is enough for three GCs. */
#define totalSizeMAX 800 * (size_t)1024
#define totalSizeSTEP 200 * (size_t)1024
/* objNULL needs to be odd so that it's ignored in exactRoots. */
@@ -158,7 +158,7 @@ int main(int argc, char **argv)
mps_arena_t arena;
mps_thr_t thread;
void *r;
-
+
randomize(argc, argv);
die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE),
diff --git a/mps/code/apss.c b/mps/code/apss.c
index 83afa7dfd52..f95123fbae2 100644
--- a/mps/code/apss.c
+++ b/mps/code/apss.c
@@ -73,7 +73,7 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
int j = rand()%(testSetSIZE-i);
void *tp;
size_t ts;
-
+
tp = ps[j]; ts = ss[j];
ps[j] = ps[i]; ss[j] = ss[i];
ps[i] = tp; ss[i] = ts;
diff --git a/mps/code/arena.c b/mps/code/arena.c
index f59376be7c8..5fd42184566 100644
--- a/mps/code/arena.c
+++ b/mps/code/arena.c
@@ -2,7 +2,7 @@
*
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
- *
+ *
* .sources: design.mps.arena is the main design document. */
#include "tract.h"
@@ -24,7 +24,7 @@ static Res ArenaTrivDescribe(Arena arena, mps_lib_FILE *stream)
if (!CHECKT(Arena, arena)) return ResFAIL;
if (stream == NULL) return ResFAIL;
- return WriteF(stream,
+ return WriteF(stream,
" No class-specific description available.\n", NULL);
}
@@ -173,7 +173,7 @@ Res ArenaInit(Arena arena, ArenaClass class)
/* initialize the reservoir, design.mps.reservoir */
res = ReservoirInit(&arena->reservoirStruct, arena);
- if (res != ResOK)
+ if (res != ResOK)
goto failReservoirInit;
AVERT(Arena, arena);
@@ -211,11 +211,11 @@ Res ArenaCreateV(Arena *arenaReturn, ArenaClass class, va_list args)
ChunkEncache(arena, arena->primary);
res = ControlInit(arena);
- if (res != ResOK)
+ if (res != ResOK)
goto failControlInit;
res = GlobalsCompleteCreate(ArenaGlobals(arena));
- if (res != ResOK)
+ if (res != ResOK)
goto failGlobalsCompleteCreate;
AVERT(Arena, arena);
@@ -276,11 +276,11 @@ Res ControlInit(Arena arena)
Res res;
AVERT(Arena, arena);
- res = PoolInit(&arena->controlPoolStruct.poolStruct,
+ res = PoolInit(&arena->controlPoolStruct.poolStruct,
arena, PoolClassMV(),
ARENA_CONTROL_EXTENDBY, ARENA_CONTROL_AVGSIZE,
ARENA_CONTROL_MAXSIZE);
- if (res != ResOK)
+ if (res != ResOK)
return res;
arena->poolReady = TRUE; /* design.mps.arena.pool.ready */
return ResOK;
@@ -307,7 +307,7 @@ Res ArenaDescribe(Arena arena, mps_lib_FILE *stream)
if (stream == NULL) return ResFAIL;
res = WriteF(stream, "Arena $P {\n", (WriteFP)arena,
- " class $P (\"$S\")\n",
+ " class $P (\"$S\")\n",
(WriteFP)arena->class, arena->class->name,
NULL);
if (res != ResOK) return res;
@@ -335,7 +335,7 @@ Res ArenaDescribe(Arena arena, mps_lib_FILE *stream)
if (res != ResOK) return res;
res = WriteF(stream,
- "} Arena $P ($U)\n", (WriteFP)arena,
+ "} Arena $P ($U)\n", (WriteFP)arena,
(WriteFU)arena->serial,
NULL);
return res;
@@ -355,7 +355,7 @@ Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream)
if (!CHECKT(Arena, arena)) return ResFAIL;
if (stream == NULL) return ResFAIL;
- b = TractFirst(&tract, arena);
+ b = TractFirst(&tract, arena);
oldLimit = TractBase(tract);
while (b) {
base = TractBase(tract);
@@ -396,7 +396,7 @@ Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream)
* with void* (design.mps.type.addr.use), ControlAlloc must take care of
* allocating so that the block can be addressed with a void*. */
-Res ControlAlloc(void **baseReturn, Arena arena, size_t size,
+Res ControlAlloc(void **baseReturn, Arena arena, size_t size,
Bool withReservoirPermit)
{
Addr base;
@@ -410,7 +410,7 @@ Res ControlAlloc(void **baseReturn, Arena arena, size_t size,
res = PoolAlloc(&base, ArenaControlPool(arena), (Size)size,
withReservoirPermit);
- if (res != ResOK)
+ if (res != ResOK)
return res;
*baseReturn = (void *)base; /* see .controlalloc.addr */
@@ -628,7 +628,7 @@ Res ArenaExtend(Arena arena, Addr base, Size size)
AVER(size > 0);
res = (*arena->class->extend)(arena, base, size);
- if (res != ResOK)
+ if (res != ResOK)
return res;
EVENT_PAW(ArenaExtend, arena, base, size);
diff --git a/mps/code/arenacl.c b/mps/code/arenacl.c
index aed95dcb63b..cc38de55ae9 100644
--- a/mps/code/arenacl.c
+++ b/mps/code/arenacl.c
@@ -2,9 +2,9 @@
*
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
- *
+ *
* .design: See design.mps.arena.client.
- *
+ *
* .improve.remember: One possible performance improvement is to
* remember (a conservative approximation to) the indices of the first
* and last free pages in each chunk, and start searching from these
@@ -70,7 +70,7 @@ static Bool ClientChunkCheck(ClientChunk clChunk)
CHECKL((Addr)(chunk + 1) < (Addr)chunk->allocTable);
return TRUE;
}
-
+
/* ClientArenaCheck -- check the consistency of a client arena */
@@ -195,15 +195,15 @@ static Res ClientArenaInit(Arena *arenaReturn, ArenaClass class,
Addr base, limit, chunkBase;
Res res;
Chunk chunk;
-
+
size = va_arg(args, Size);
base = va_arg(args, Addr);
AVER(arenaReturn != NULL);
AVER((ArenaClass)mps_arena_class_cl() == class);
AVER(base != (Addr)0);
- clArenaSize = SizeAlignUp(sizeof(ClientArenaStruct), MPS_PF_ALIGN);
- if (size < clArenaSize)
+ clArenaSize = SizeAlignUp(sizeof(ClientArenaStruct), MPS_PF_ALIGN);
+ if (size < clArenaSize)
return ResMEMORY;
limit = AddrAdd(base, size);
@@ -281,7 +281,7 @@ static Res ClientArenaExtend(Arena arena, Addr base, Size size)
AVER(base != (Addr)0);
AVER(size > 0);
limit = AddrAdd(base, size);
-
+
clientArena = Arena2ClientArena(arena);
res = clientChunkCreate(&chunk, base, limit, clientArena);
return res;
@@ -299,7 +299,7 @@ static Size ClientArenaReserved(Arena arena)
size = 0;
/* .req.extend.slow */
- RING_FOR(node, &arena->chunkRing, nextNode) {
+ RING_FOR(node, &arena->chunkRing, nextNode) {
Chunk chunk = RING_ELT(Chunk, chunkRing, node);
AVERT(Chunk, chunk);
size += AddrOffset(chunk->base, chunk->limit);
@@ -331,7 +331,7 @@ static Res chunkAlloc(Addr *baseReturn, Tract *baseTractReturn,
if (pref->high)
b = BTFindShortResRangeHigh(&baseIndex, &limitIndex, chunk->allocTable,
chunk->allocBase, chunk->pages, pages);
- else
+ else
b = BTFindShortResRange(&baseIndex, &limitIndex, chunk->allocTable,
chunk->allocBase, chunk->pages, pages);
@@ -389,7 +389,7 @@ static Res ClientAlloc(Addr *baseReturn, Tract *baseTractReturn,
pages = ChunkSizeToPages(arena->primary, size);
/* .req.extend.slow */
- RING_FOR(node, &arena->chunkRing, nextNode) {
+ RING_FOR(node, &arena->chunkRing, nextNode) {
Chunk chunk = RING_ELT(Chunk, chunkRing, node);
res = chunkAlloc(baseReturn, baseTractReturn, pref, pages, pool, chunk);
if (res == ResOK || res == ResCOMMIT_LIMIT) {
diff --git a/mps/code/arenacv.c b/mps/code/arenacv.c
index ae7e64a70a2..d361bfa2db3 100644
--- a/mps/code/arenacv.c
+++ b/mps/code/arenacv.c
@@ -28,15 +28,15 @@
/* testAllocAndIterate -- Test arena allocation and iteration
*
- * .tract-seg: Test allocation and iteration, using both low-level
- * tracts and higher-level segments. To do this, contrive a set of
+ * .tract-seg: Test allocation and iteration, using both low-level
+ * tracts and higher-level segments. To do this, contrive a set of
* allocation and iteration functions which are interchangeable.
*/
/* Type definitions for the interchangability interface */
-/* AllocInfo -- interchangeable info about allocated regions */
+/* AllocInfo -- interchangeable info about allocated regions */
typedef struct AllocInfoStruct *AllocInfo;
@@ -60,7 +60,7 @@ typedef void (*FreeFun)(AllocInfo ai);
typedef Bool (*FirstFun)(AllocInfoStruct *aiReturn, Arena arena);
-typedef Bool (*NextFun)(AllocInfoStruct *nextReturn, AllocInfo ai,
+typedef Bool (*NextFun)(AllocInfoStruct *nextReturn, AllocInfo ai,
Arena arena);
typedef Count (*UnitsFun)(Count pages);
@@ -103,8 +103,8 @@ static Res allocAsTract(AllocInfoStruct *aiReturn, SegPref pref,
static void freeAsTract(AllocInfo ai)
{
- ArenaFree(ai->the.tractData.base,
- ai->the.tractData.size,
+ ArenaFree(ai->the.tractData.base,
+ ai->the.tractData.size,
ai->the.tractData.pool);
}
@@ -121,7 +121,7 @@ static Bool firstAsTract(AllocInfoStruct *aiReturn, Arena arena)
return res;
}
-static Bool nextAsTract(AllocInfoStruct *nextReturn, AllocInfo ai,
+static Bool nextAsTract(AllocInfoStruct *nextReturn, AllocInfo ai,
Arena arena)
{
Bool res;
@@ -152,7 +152,7 @@ static void testAsTract(AllocInfo ai, Arena arena)
cdie(found, "TractOfAddr");
base = TractBase(tract);
cdie(base == ai->the.tractData.base, "base");
-
+
}
static void copyAsTract(AllocInfoStruct *toReturn, AllocInfo from)
@@ -203,7 +203,7 @@ static Bool firstAsSeg(AllocInfoStruct *aiReturn, Arena arena)
return res;
}
-static Bool nextAsSeg(AllocInfoStruct *nextReturn, AllocInfo ai,
+static Bool nextAsSeg(AllocInfoStruct *nextReturn, AllocInfo ai,
Arena arena)
{
Bool res;
@@ -229,7 +229,7 @@ static void testAsSeg(AllocInfo ai, Arena arena)
Seg seg = ai->the.segData.seg;
Addr base, limit;
Size size;
-
+
UNUSED(arena);
base = SegBase(seg);
limit = SegLimit(seg);
@@ -255,7 +255,7 @@ static AllocatorClassStruct allocatorSegStruct = {
/* The main function can use either tracts or segs */
-static void testAllocAndIterate(Arena arena, Pool pool,
+static void testAllocAndIterate(Arena arena, Pool pool,
Size pageSize, Count numPerPage,
AllocatorClass allocator)
{
@@ -304,12 +304,12 @@ static void testAllocAndIterate(Arena arena, Pool pool,
}
/* Should be able to iterate over at least offset, new, top */
- expected =
+ expected =
allocator->units(offset) +
- allocator->units(new) +
+ allocator->units(new) +
allocator->units(1);
- if (regionNum >= expected)
+ if (regionNum >= expected)
enoughRegions = ResOK;
else
enoughRegions = ResFAIL;
@@ -352,11 +352,11 @@ static void testPageTable(ArenaClass class, ...)
printf("%ld tracts per page in the page table.\n", (long)tractsPerPage);
/* test tract allocation and iteration */
- testAllocAndIterate(arena, pool, pageSize, tractsPerPage,
+ testAllocAndIterate(arena, pool, pageSize, tractsPerPage,
&allocatorTractStruct);
/* test segment allocation and iteration */
- testAllocAndIterate(arena, pool, pageSize, tractsPerPage,
+ testAllocAndIterate(arena, pool, pageSize, tractsPerPage,
&allocatorSegStruct);
PoolDestroy(pool);
diff --git a/mps/code/arenavm.c b/mps/code/arenavm.c
index ab15db980c0..0599a71637f 100644
--- a/mps/code/arenavm.c
+++ b/mps/code/arenavm.c
@@ -411,7 +411,7 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, va_list args)
AVER(userSize > 0);
/* Create a VM to hold the arena and map it. */
- vmArenaSize = SizeAlignUp(sizeof(VMArenaStruct), MPS_PF_ALIGN);
+ vmArenaSize = SizeAlignUp(sizeof(VMArenaStruct), MPS_PF_ALIGN);
res = VMCreate(&arenaVM, vmArenaSize);
if (res != ResOK)
goto failVMCreate;
@@ -431,7 +431,7 @@ static Res VMArenaInit(Arena *arenaReturn, ArenaClass class, va_list args)
vmArena->spareSize = 0;
/* .blacklist: We blacklist the zones corresponding to small integers. */
- vmArena->blacklist =
+ vmArena->blacklist =
ZoneSetAdd(arena, ZoneSetAdd(arena, ZoneSetEMPTY, (Addr)1), (Addr)-1);
for(gen = (Index)0; gen < VMArenaGenCount; gen++) {
@@ -539,7 +539,7 @@ static void VMArenaSpareCommitExceeded(Arena arena)
/* Page Table Partial Mapping
*
- * Some helper functions
+ * Some helper functions
*/
@@ -630,7 +630,7 @@ static void tablePagesUsed(Index *tableBaseReturn, Index *tableLimitReturn,
*tableBaseReturn =
PageTablePageIndex(chunk,
AddrPageBase(chunk, addrOfPageDesc(chunk, pageBase)));
- *tableLimitReturn =
+ *tableLimitReturn =
PageTablePageIndex(chunk,
AddrAlignUp(addrOfPageDesc(chunk, pageLimit),
ChunkPageSize(chunk)));
@@ -641,7 +641,7 @@ static void tablePagesUsed(Index *tableBaseReturn, Index *tableLimitReturn,
/* tablePagesEnsureMapped -- ensure needed part of page table is mapped
*
* Pages from baseIndex to limitIndex are about to be allocated.
- * Ensure that the relevant pages occupied by the page table are mapped.
+ * Ensure that the relevant pages occupied by the page table are mapped.
*/
static Res tablePagesEnsureMapped(VMChunk vmChunk,
Index baseIndex, Index limitIndex)
@@ -662,7 +662,7 @@ static Res tablePagesEnsureMapped(VMChunk vmChunk,
chunk, baseIndex, limitIndex);
tableCursorIndex = tableBaseIndex;
-
+
while(BTFindLongResRange(&unmappedBaseIndex, &unmappedLimitIndex,
vmChunk->pageTableMapped,
tableCursorIndex, tableLimitIndex,
@@ -794,7 +794,7 @@ static Bool pagesFindFreeInArea(Index *baseReturn, Chunk chunk, Size size,
/* pagesFindFreeInZones -- find a range of free pages with a ZoneSet
- *
+ *
* This function finds the intersection of ZoneSet and the set of free
* pages and tries to find a free run of pages in the resulting set of
* areas.
@@ -862,7 +862,7 @@ static Bool pagesFindFreeInZones(Index *baseReturn, VMChunk *chunkReturn,
*chunkReturn = Chunk2VMChunk(chunk);
return TRUE;
}
-
+
base = limit;
} else {
/* Adding the zoneSize might wrap round (to zero, because */
@@ -948,12 +948,12 @@ static Bool pagesFindFreeWithSegPref(Index *baseReturn, VMChunk *chunkReturn,
/* Note that each is a superset of the previous, unless */
/* blacklisted zones have been allocated (or the default */
/* is used). */
- if (pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
+ if (pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
ZoneSetDiff(preferred, vmArena->blacklist),
pref->high)
- || pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
+ || pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
ZoneSetUnion(preferred,
- ZoneSetDiff(vmArena->freeSet,
+ ZoneSetDiff(vmArena->freeSet,
vmArena->blacklist)),
pref->high)) {
return TRUE; /* found */
@@ -961,7 +961,7 @@ static Bool pagesFindFreeWithSegPref(Index *baseReturn, VMChunk *chunkReturn,
if (!barge)
/* do not barge into other zones, give up now */
return FALSE;
- if (pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
+ if (pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
ZoneSetDiff(ZoneSetUNIV, vmArena->blacklist),
pref->high)
|| pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
@@ -976,12 +976,12 @@ static Bool pagesFindFreeWithSegPref(Index *baseReturn, VMChunk *chunkReturn,
/* - Any zone. */
/* Note that each is a superset of the previous, unless */
/* blacklisted zones have been allocated. */
- if (pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
+ if (pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
ZoneSetInter(preferred, vmArena->blacklist),
pref->high)
|| pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
preferred, pref->high)
- || pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
+ || pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
ZoneSetUnion(preferred, vmArena->blacklist),
pref->high)
|| pagesFindFreeInZones(baseReturn, chunkReturn, vmArena, size,
@@ -1047,7 +1047,7 @@ static Res VMAllocPolicy(Index *baseIndexReturn, VMChunk *chunkReturn,
static Res VMNZAllocPolicy(Index *baseIndexReturn, VMChunk *chunkReturn,
VMArena vmArena, SegPref pref, Size size)
{
- if (pagesFindFreeInZones(baseIndexReturn, chunkReturn, vmArena, size,
+ if (pagesFindFreeInZones(baseIndexReturn, chunkReturn, vmArena, size,
ZoneSetUNIV, pref->high)) {
return ResOK;
}
@@ -1145,7 +1145,7 @@ static Res pagesMarkAllocated(VMArena vmArena, VMChunk vmChunk,
break;
}
AVER(unmappedLimit <= limitIndex);
- res = vmArenaMap(vmArena, vmChunk->vm,
+ res = vmArenaMap(vmArena, vmChunk->vm,
PageIndexBase(chunk, unmappedBase),
PageIndexBase(chunk, unmappedLimit));
if (res != ResOK)
@@ -1190,7 +1190,7 @@ failTableMap:
/* vmAllocComm -- allocate a region from the arena
*
* Common code used by mps_arena_class_vm and
- * mps_arena_class_vmnz.
+ * mps_arena_class_vmnz.
*/
static Res vmAllocComm(Addr *baseReturn, Tract *baseTractReturn,
VMAllocPolicyMethod policy,
@@ -1219,7 +1219,7 @@ static Res vmAllocComm(Addr *baseReturn, Tract *baseTractReturn,
AVERT(VMArena, vmArena);
/* All chunks have same pageSize. */
AVER(SizeIsAligned(size, ChunkPageSize(arena->primary)));
-
+
/* NULL is used as a discriminator */
/* (see design.mps.arena.vm.table.disc) therefore the real pool */
/* must be non-NULL. */
@@ -1269,7 +1269,7 @@ static Res vmAllocComm(Addr *baseReturn, Tract *baseTractReturn,
}
vmArena->freeSet = ZoneSetDiff(vmArena->freeSet, zones);
-
+
*baseReturn = base;
*baseTractReturn = baseTract;
return ResOK;
@@ -1301,7 +1301,7 @@ static Res VMNZAlloc(Addr *baseReturn, Tract *baseTractReturn,
* The function f is called on the ranges of spare pages which are
* within the range of pages from base to limit. PageStruct descriptors
* from base to limit should be mapped in the page table before calling
- * this function.
+ * this function.
*/
typedef void (*spareRangesFn)(VMChunk, Index, Index, void *);
@@ -1357,7 +1357,7 @@ static void vmArenaUnmapSpareRange(VMChunk vmChunk,
return;
}
-
+
/* sparePagesPurge -- all spare pages are found and purged (unmapped)
*
@@ -1519,7 +1519,7 @@ DEFINE_ARENA_CLASS(VMArenaClass, this)
}
-/* VMNZArenaClass -- The VMNZ arena class definition
+/* VMNZArenaClass -- The VMNZ arena class definition
*
* VMNZ is just VMArena with a different allocation policy.
*/
diff --git a/mps/code/assert.c b/mps/code/assert.c
index b584d227efa..52b0071c9f7 100644
--- a/mps/code/assert.c
+++ b/mps/code/assert.c
@@ -14,9 +14,9 @@
SRCID(assert, "$Id$");
-/* CheckLevel -- Control check level
+/* CheckLevel -- Control check level
*
- * This controls the behaviour of Check methods unless MPS_HOT_RED
+ * This controls the behaviour of Check methods unless MPS_HOT_RED
* is defined, when it is effectively stuck at "CheckNONE".
*/
diff --git a/mps/code/awlut.c b/mps/code/awlut.c
index c6e6dea1f88..1ae9f11f49c 100644
--- a/mps/code/awlut.c
+++ b/mps/code/awlut.c
@@ -278,7 +278,7 @@ static void *setup(void *v, size_t s)
"Weak AP Create\n");
die(mps_ap_create(&bogusap, tablepool, MPS_RANK_EXACT),
"Bogus AP Create\n");
-
+
test(leafap, exactap, weakap, bogusap);
mps_ap_destroy(bogusap);
diff --git a/mps/code/bt.c b/mps/code/bt.c
index c250d689586..2d76ea806e1 100644
--- a/mps/code/bt.c
+++ b/mps/code/bt.c
@@ -19,7 +19,7 @@ SRCID(bt, "$Id$");
/* BTIndexAlignUp, BTIndexAlignDown -- Align bit-table indices
- *
+ *
* Align bit-table indices up and down to word boundaries
*/
@@ -118,7 +118,7 @@ SRCID(bt, "$Id$");
END
-/* ACT_ON_RANGE_HIGH -- macro to act on a base-limit range
+/* ACT_ON_RANGE_HIGH -- macro to act on a base-limit range
*
* in reverse order. Usage as for ACT_ON_RANGE
*/
@@ -167,7 +167,7 @@ SRCID(bt, "$Id$");
/* BTCreate -- allocate a BT from the control pool
- *
+ *
* See design.mps.bt.if.create
*/
@@ -181,7 +181,7 @@ Res BTCreate(BT *btReturn, Arena arena, Count length)
AVERT(Arena, arena);
AVER(length > 0);
- res = ControlAlloc(&p, arena, BTSize(length),
+ res = ControlAlloc(&p, arena, BTSize(length),
/* withReservoirPermit */ FALSE);
if (res != ResOK)
return res;
@@ -193,7 +193,7 @@ Res BTCreate(BT *btReturn, Arena arena, Count length)
/* BTDestroy -- free a BT to the control pool.
- *
+ *
* See design.mps.bt.if.destroy
*/
@@ -202,7 +202,7 @@ void BTDestroy(BT bt, Arena arena, Count length)
AVER(bt != NULL);
AVERT(Arena, arena);
AVER(length > 0);
-
+
ControlFree(arena, bt, BTSize(length));
}
@@ -223,7 +223,7 @@ static Bool BTCheck(BT bt)
/* BTSize -- return the size of a BT
*
- * See design.mps.bt.fun.size
+ * See design.mps.bt.fun.size
*/
size_t (BTSize)(unsigned long n)
@@ -233,11 +233,11 @@ size_t (BTSize)(unsigned long n)
return BTSize(n);
}
-
+
/* BTGet -- get a bit from a BT
*
- * See design.mps.bt.fun.get
+ * See design.mps.bt.fun.get
*/
Bool (BTGet)(BT t, Index i)
@@ -248,11 +248,11 @@ Bool (BTGet)(BT t, Index i)
/* see macro in impl.h.mpm */
return BTGet(t, i);
}
-
+
/* BTSet -- set a bit in a BT
*
- * See design.mps.bt.fun.set
+ * See design.mps.bt.fun.set
*/
void (BTSet)(BT t, Index i)
@@ -297,7 +297,7 @@ void BTSetRange(BT t, Index base, Index limit)
#define WORD_SET_RANGE(i) \
t[(i)] = ~(Word)(0)
- ACT_ON_RANGE(base, limit, SINGLE_SET_RANGE,
+ ACT_ON_RANGE(base, limit, SINGLE_SET_RANGE,
BITS_SET_RANGE, WORD_SET_RANGE);
}
@@ -319,8 +319,8 @@ Bool BTIsResRange(BT bt, Index base, Index limit)
if ((bt[(i)] & BTMask((base),(limit))) != (Word)0) return FALSE
#define WORD_IS_RES_RANGE(i) \
if (bt[(i)] != (Word)0) return FALSE
-
- ACT_ON_RANGE(base, limit, SINGLE_IS_RES_RANGE,
+
+ ACT_ON_RANGE(base, limit, SINGLE_IS_RES_RANGE,
BITS_IS_RES_RANGE, WORD_IS_RES_RANGE);
return TRUE;
}
@@ -348,7 +348,7 @@ Bool BTIsSetRange(BT bt, Index base, Index limit)
#define WORD_IS_SET_RANGE(i) \
if (bt[(i)] != ~(Word)0) return FALSE
- ACT_ON_RANGE(base, limit, SINGLE_IS_SET_RANGE,
+ ACT_ON_RANGE(base, limit, SINGLE_IS_SET_RANGE,
BITS_IS_SET_RANGE, WORD_IS_SET_RANGE);
return TRUE;
}
@@ -370,7 +370,7 @@ void BTResRange(BT t, Index base, Index limit)
t[(i)] &= ~(BTMask((base),(limit)))
#define WORD_RES_RANGE(i) t[(i)] = (Word)(0)
- ACT_ON_RANGE(base, limit, SINGLE_RES_RANGE,
+ ACT_ON_RANGE(base, limit, SINGLE_RES_RANGE,
BITS_RES_RANGE, WORD_RES_RANGE);
}
@@ -406,7 +406,7 @@ btFindSetLabel:; \
*bfsIndexReturn = (i); \
*bfsFoundReturn = TRUE; \
goto btFindSetLabel; \
- }
+ }
#define BITS_FIND_SET(wi,base,limit) \
BEGIN \
Index bactWi = (wi); \
@@ -427,8 +427,8 @@ btFindSetLabel:; \
* Works by first shifting the base of the range to the low
* bits of the word. Then loops performing a binary chop
* over the data looking to see if a bit is set in the lower
- * half. If not, it must be in the upper half which is then
- * shifted down. The loop completes after using a chop unit
+ * half. If not, it must be in the upper half which is then
+ * shifted down. The loop completes after using a chop unit
* of a single single bit.
*/
@@ -460,7 +460,7 @@ btFindSetLabel:; \
* Usage as for BTFindSet
*
* Internally uses the label btFindResLabel
- * which must be redefined to avoid a nameclash if the macro is
+ * which must be redefined to avoid a nameclash if the macro is
* used twice in a function scope.
*/
@@ -480,7 +480,7 @@ btFindResLabel:; \
*bfsIndexReturn = (i); \
*bfsFoundReturn = TRUE; \
goto btFindResLabel; \
- }
+ }
#define BITS_FIND_RES(wi,base,limit) \
BEGIN \
Index bactWi = (wi); \
@@ -500,7 +500,7 @@ btFindResLabel:; \
* Usage as for BTFindSet
*
* Internally uses the label btFindSetHighLabel
- * which must be redefined to avoid a nameclash if the macro is
+ * which must be redefined to avoid a nameclash if the macro is
* used twice in a function scope.
*/
@@ -520,7 +520,7 @@ btFindSetHighLabel:; \
*bfsIndexReturn = (i); \
*bfsFoundReturn = TRUE; \
goto btFindSetHighLabel; \
- }
+ }
#define BITS_FIND_SET_HIGH(wi,base,limit) \
BEGIN \
Index bactWi = (wi); \
@@ -563,14 +563,14 @@ btFindSetHighLabel:; \
goto label; \
} \
END
-
+
-/* BTFindResHigh -- find the highest reset bit in a range
+/* BTFindResHigh -- find the highest reset bit in a range
*
* Usage as for BTFindSet
*
* Internally uses the label btFindSetHighLabel
- * which must be redefined to avoid a nameclash if the macro is
+ * which must be redefined to avoid a nameclash if the macro is
* used twice in a function scope.
*/
@@ -590,7 +590,7 @@ btFindResHighLabel:; \
*bfsIndexReturn = (i); \
*bfsFoundReturn = TRUE; \
goto btFindResHighLabel; \
- }
+ }
#define BITS_FIND_RES_HIGH(wi,base,limit) \
BEGIN \
Index bactWi = (wi); \
@@ -667,13 +667,13 @@ static Bool BTFindResRange(Index *baseReturn, Index *limitReturn,
BTFindSet(&foundSet, &setIndex, bt, setBase, setLimit);
if (!foundSet)
setIndex = setLimit;
-
+
AVER(setIndex - resBase >= minLength);
AVER(setIndex - resBase <= maxLength);
*baseReturn = resBase;
*limitReturn = setIndex;
return TRUE;
-
+
} else {
/* Range was too small. Try again */
unseenBase = minLimit;
@@ -726,10 +726,10 @@ static Bool BTFindResRangeHigh(Index *baseReturn, Index *limitReturn,
unseenLimit = searchLimit; /* haven't seen anything yet */
resBase = searchBase + minLength -1;
- while (resLimit > resBase) {
+ while (resLimit > resBase) {
Index setIndex; /* index of first set bit found */
Bool foundSet = FALSE; /* true if a set bit is found */
-
+
/* Find the first reset bit if it's not already known */
if (!foundRes) {
/* Look for the limit of a range */
@@ -762,13 +762,13 @@ static Bool BTFindResRangeHigh(Index *baseReturn, Index *limitReturn,
baseIndex = setIndex+1;
else
baseIndex = setBase;
-
+
AVER(resLimit - baseIndex >= minLength);
AVER(resLimit - baseIndex <= maxLength);
*baseReturn = baseIndex;
*limitReturn = resLimit;
return TRUE;
-
+
} else {
/* Range was too small. Try again */
unseenLimit = minBase;
@@ -780,7 +780,7 @@ static Bool BTFindResRangeHigh(Index *baseReturn, Index *limitReturn,
foundRes = FALSE;
}
}
- }
+ }
/* failure */
return FALSE;
@@ -861,10 +861,10 @@ Bool BTFindShortResRangeHigh(Index *baseReturn, Index *limitReturn,
/* BTRangesSame -- check that a range of bits in two BTs are the same.
- *
+ *
* See design.mps.bt.if.ranges-same
*/
-
+
Bool BTRangesSame(BT comparand, BT comparator, Index base, Index limit)
{
AVER(BTCheck(comparand));
@@ -888,8 +888,8 @@ Bool BTRangesSame(BT comparand, BT comparator, Index base, Index limit)
if ((comparand[wactI]) != (comparator[wactI])) \
return FALSE; \
END
-
- ACT_ON_RANGE(base, limit, SINGLE_RANGES_SAME,
+
+ ACT_ON_RANGE(base, limit, SINGLE_RANGES_SAME,
BITS_RANGES_SAME, WORD_RANGES_SAME);
return TRUE;
}
@@ -897,7 +897,7 @@ Bool BTRangesSame(BT comparand, BT comparator, Index base, Index limit)
/* BTCopyInvertRange -- copy a range of bits from one BT to another,
* inverting them as you go.
- *
+ *
* See design.mps.bt.if.copy-invert-range
*/
@@ -912,7 +912,7 @@ void BTCopyInvertRange(BT fromBT, BT toBT, Index base, Index limit)
if (BTGet(fromBT, (i))) \
BTRes(toBT, (i)); \
else \
- BTSet(toBT, (i))
+ BTSet(toBT, (i))
#define BITS_COPY_INVERT_RANGE(i,base,limit) \
BEGIN \
Index bactI = (i); \
@@ -925,14 +925,14 @@ void BTCopyInvertRange(BT fromBT, BT toBT, Index base, Index limit)
Index wactI = (i); \
toBT[wactI] = ~fromBT[wactI]; \
END
-
- ACT_ON_RANGE(base, limit, SINGLE_COPY_INVERT_RANGE,
+
+ ACT_ON_RANGE(base, limit, SINGLE_COPY_INVERT_RANGE,
BITS_COPY_INVERT_RANGE, WORD_COPY_INVERT_RANGE);
}
/* BTCopyRange -- copy a range of bits from one BT to another
- *
+ *
* See design.mps.bt.if.copy-range
*/
@@ -947,7 +947,7 @@ void BTCopyRange(BT fromBT, BT toBT, Index base, Index limit)
if (BTGet(fromBT, (i))) \
BTSet(toBT, (i)); \
else \
- BTRes(toBT, (i))
+ BTRes(toBT, (i))
#define BITS_COPY_RANGE(i,base,limit) \
BEGIN \
Index bactI = (i); \
@@ -960,8 +960,8 @@ void BTCopyRange(BT fromBT, BT toBT, Index base, Index limit)
Index wactI = (i); \
toBT[wactI] = fromBT[wactI]; \
END
-
- ACT_ON_RANGE(base, limit, SINGLE_COPY_RANGE,
+
+ ACT_ON_RANGE(base, limit, SINGLE_COPY_RANGE,
BITS_COPY_RANGE, WORD_COPY_RANGE);
}
@@ -970,14 +970,14 @@ void BTCopyRange(BT fromBT, BT toBT, Index base, Index limit)
* offset range in another BT
*
* .slow: Can't always use ACT_ON_RANGE because word alignment
- * may differ for each range. We could try to be smart about
+ * may differ for each range. We could try to be smart about
* detecting similar alignment - but we don't.
- *
+ *
* See design.mps.bt.if.copy-offset-range
*/
-void BTCopyOffsetRange(BT fromBT, BT toBT,
- Index fromBase, Index fromLimit,
+void BTCopyOffsetRange(BT fromBT, BT toBT,
+ Index fromBase, Index fromLimit,
Index toBase, Index toLimit)
{
Index fromBit, toBit;
diff --git a/mps/code/btcv.c b/mps/code/btcv.c
index c117c8b983a..66163c6b5ca 100644
--- a/mps/code/btcv.c
+++ b/mps/code/btcv.c
@@ -6,8 +6,8 @@
* .readership: MPS developers
*
* .coverage: Direct coverage of BTFind*ResRange*, BTRangesSame,
- * BTISResRange, BTIsSetRange, BTCopyRange, BTCopyOffsetRange.
- * Reasonable coverage of BTCopyInvertRange, BTResRange,
+ * BTISResRange, BTIsSetRange, BTCopyRange, BTCopyOffsetRange.
+ * Reasonable coverage of BTCopyInvertRange, BTResRange,
* BTSetRange, BTRes, BTSet, BTCreate, BTDestroy.
*/
@@ -24,8 +24,8 @@ SRCID(btcv, "$Id$");
/* bt*Symmetric -- Symmetric operations on bit tables
*
- * The operations take 2 bit tables, btlo & bthi.
- * They perform the equivalent BT* operation on btlo, and
+ * The operations take 2 bit tables, btlo & bthi.
+ * They perform the equivalent BT* operation on btlo, and
* a reflected operation on the bits of bthi from the opposite
* direction.
*/
@@ -78,10 +78,10 @@ static void btTestSingleRange(BTFinderFn finder, BT bt,
Index foundBase, foundLimit;
found = finder(&foundBase, &foundLimit, bt, base, limit, length);
- cdie(found == expect, "FindResRange result");
+ cdie(found == expect, "FindResRange result");
if (expect) {
- cdie(foundBase == expectBase, "FindResRange base");
- cdie(foundLimit == expectLimit, "FindResRange limit");
+ cdie(foundBase == expectBase, "FindResRange base");
+ cdie(foundLimit == expectLimit, "FindResRange limit");
}
}
@@ -99,15 +99,15 @@ static void btTestResRange(BT btlo, BT bthi, Count btSize,
Index expectBase, Index expectLimit)
{
btTestSingleRange(BTFindShortResRange, btlo,
- base, limit,
- length, expect,
+ base, limit,
+ length, expect,
expectBase, expectLimit);
btTestSingleRange(BTFindShortResRangeHigh, bthi,
- btReflectLimit(btSize, limit),
- btReflectLimit(btSize, base),
- length, expect,
- btReflectLimit(btSize, expectLimit),
+ btReflectLimit(btSize, limit),
+ btReflectLimit(btSize, base),
+ length, expect,
+ btReflectLimit(btSize, expectLimit),
btReflectLimit(btSize, expectBase));
}
@@ -125,15 +125,15 @@ static void btTestLongResRange(BT btlo, BT bthi, Count btSize,
Index expectBase, Index expectLimit)
{
btTestSingleRange(BTFindLongResRange, btlo,
- base, limit,
- length, expect,
+ base, limit,
+ length, expect,
expectBase, expectLimit);
btTestSingleRange(BTFindLongResRangeHigh, bthi,
- btReflectLimit(btSize, limit),
- btReflectLimit(btSize, base),
- length, expect,
- btReflectLimit(btSize, expectLimit),
+ btReflectLimit(btSize, limit),
+ btReflectLimit(btSize, base),
+ length, expect,
+ btReflectLimit(btSize, expectLimit),
btReflectLimit(btSize, expectBase));
}
@@ -143,14 +143,14 @@ static void btTestLongResRange(BT btlo, BT bthi, Count btSize,
* Test finding reset ranges in an all-reset table.
*/
-static void btAllResTest(BT btlo, BT bthi, Count btSize,
+static void btAllResTest(BT btlo, BT bthi, Count btSize,
Index base, Index limit,
unsigned long length)
{
btResRangeSymmetric(btlo, bthi, btSize, 0, btSize);
- btTestResRange(btlo, bthi, btSize, base, limit, length,
+ btTestResRange(btlo, bthi, btSize, base, limit, length,
TRUE, base, base + length);
- btTestLongResRange(btlo, bthi, btSize, base, limit, length,
+ btTestLongResRange(btlo, bthi, btSize, base, limit, length,
TRUE, base, limit);
}
@@ -162,15 +162,15 @@ static void btAllResTest(BT btlo, BT bthi, Count btSize,
* by mistake.
*/
-static void btNoResTest(BT btlo, BT bthi, Count btSize,
+static void btNoResTest(BT btlo, BT bthi, Count btSize,
Index base, Index limit,
unsigned long length)
{
btResRangeSymmetric(btlo, bthi, btSize, 0, btSize);
btSetRangeSymmetric(btlo, bthi, btSize, base, limit);
- btTestResRange(btlo, bthi, btSize, base, limit, length,
+ btTestResRange(btlo, bthi, btSize, base, limit, length,
FALSE, 0, 0);
- btTestLongResRange(btlo, bthi, btSize, base, limit, length,
+ btTestLongResRange(btlo, bthi, btSize, base, limit, length,
FALSE, 0, 0);
}
@@ -182,21 +182,21 @@ static void btNoResTest(BT btlo, BT bthi, Count btSize,
* Expect to find the range if it's long enough,
*/
-static void btResAndFindTest(BT btlo, BT bthi, Count btSize,
+static void btResAndFindTest(BT btlo, BT bthi, Count btSize,
Index base, Index limit,
Index resBase, Index resLimit,
unsigned long length)
{
btResRangeSymmetric(btlo, bthi, btSize, resBase, resLimit);
if ((resLimit - resBase) < length) {
- btTestResRange(btlo, bthi, btSize, base, limit, length,
+ btTestResRange(btlo, bthi, btSize, base, limit, length,
FALSE, 0, 0);
- btTestLongResRange(btlo, bthi, btSize, base, limit, length,
+ btTestLongResRange(btlo, bthi, btSize, base, limit, length,
FALSE, 0, 0);
} else {
- btTestResRange(btlo, bthi, btSize, base, limit, length,
+ btTestResRange(btlo, bthi, btSize, base, limit, length,
TRUE, resBase, resBase + length);
- btTestLongResRange(btlo, bthi, btSize, base, limit, length,
+ btTestLongResRange(btlo, bthi, btSize, base, limit, length,
TRUE, resBase, resLimit);
}
}
@@ -208,7 +208,7 @@ static void btResAndFindTest(BT btlo, BT bthi, Count btSize,
* Test finding single ranges of various sizes
*/
-static void btSingleResTest(BT btlo, BT bthi, Count btSize,
+static void btSingleResTest(BT btlo, BT bthi, Count btSize,
Index base, Index limit,
unsigned long length)
{
@@ -222,13 +222,13 @@ static void btSingleResTest(BT btlo, BT bthi, Count btSize,
for (resBase = base; resBase <= base +2; resBase++) {
btResRangeSymmetric(btlo, bthi, btSize, 0, btSize);
btSetRangeSymmetric(btlo, bthi, btSize, base, limit);
- btResAndFindTest(btlo, bthi, btSize, base, limit,
+ btResAndFindTest(btlo, bthi, btSize, base, limit,
resBase, resBase + resLen, length);
}
for (resLimit = limit; resLimit >= limit -2; resLimit--) {
btResRangeSymmetric(btlo, bthi, btSize, 0, btSize);
btSetRangeSymmetric(btlo, bthi, btSize, base, limit);
- btResAndFindTest(btlo, bthi, btSize, base, limit,
+ btResAndFindTest(btlo, bthi, btSize, base, limit,
resLimit - resLen, resLimit, length);
}
}
@@ -255,8 +255,8 @@ enum {
typedef unsigned Arrangement;
/* Choose a limit for reset range 1 */
-static Index btArrangeRes1(Arrangement arrange,
- Index base, Index res2Base,
+static Index btArrangeRes1(Arrangement arrange,
+ Index base, Index res2Base,
unsigned long length)
{
switch (arrange) {
@@ -276,7 +276,7 @@ static Index btArrangeRes1(Arrangement arrange,
return base + length;
}
- default:
+ default:
NOTREACHED;
return 0; /* keep the compiler happy */
}
@@ -293,7 +293,7 @@ enum {
typedef unsigned Pattern;
/* Choose a limit for reset range 1 */
-static void btResetFirstRange(BT btlo, BT bthi, Count btSize,
+static void btResetFirstRange(BT btlo, BT bthi, Count btSize,
Index res1Limit,
unsigned long length,
Pattern pattern)
@@ -307,7 +307,7 @@ static void btResetFirstRange(BT btlo, BT bthi, Count btSize,
}
case PatternSETMID: {
- /* Actually make 2 ranges here by setting a bit in the middle */
+ /* Actually make 2 ranges here by setting a bit in the middle */
Index mid = res1Limit - length + (length / 2);
btResRangeSymmetric(btlo, bthi, btSize, res1Limit-length, res1Limit);
btSetSymmetric(btlo, bthi, btSize, mid);
@@ -316,18 +316,18 @@ static void btResetFirstRange(BT btlo, BT bthi, Count btSize,
case PatternJUSTSMALL: {
/* Range of (length - 1) */
- btResRangeSymmetric(btlo, bthi, btSize,
+ btResRangeSymmetric(btlo, bthi, btSize,
1 + res1Limit - length, res1Limit);
return;
}
- default:
+ default:
NOTREACHED;
}
}
-static void btDoubleResTest(BT btlo, BT bthi, Count btSize,
+static void btDoubleResTest(BT btlo, BT bthi, Count btSize,
Index base, Index limit,
unsigned long length)
{
@@ -354,7 +354,7 @@ static void btDoubleResTest(BT btlo, BT bthi, Count btSize,
btSetRangeSymmetric(btlo, bthi, btSize, base, limit);
btResetFirstRange(btlo, bthi, btSize, res1Limit, length, pat);
/* Set up range 2 and expect to find it when searching */
- btResAndFindTest(btlo, bthi, btSize, base, limit,
+ btResAndFindTest(btlo, bthi, btSize, base, limit,
res2Base, res2Limit, length);
}
}
@@ -364,12 +364,12 @@ static void btDoubleResTest(BT btlo, BT bthi, Count btSize,
}
-/* btFindRangeTests -- Test BTFind*ResRange*
+/* btFindRangeTests -- Test BTFind*ResRange*
*
* Run a variety of FindResRange tests with different table patterns.
*/
-static void btFindRangeTests(BT btlo, BT bthi, Count btSize,
+static void btFindRangeTests(BT btlo, BT bthi, Count btSize,
Index base, Index limit,
unsigned long length)
{
@@ -393,7 +393,7 @@ static void btFindRangeTests(BT btlo, BT bthi, Count btSize,
* These tests also test BTCopyInvertRange
*/
-static void btIsRangeTests(BT bt1, BT bt2, Count btSize,
+static void btIsRangeTests(BT bt1, BT bt2, Count btSize,
Index base, Index limit)
{
Index minBase, maxLimit, b, l;
@@ -416,7 +416,7 @@ static void btIsRangeTests(BT bt1, BT bt2, Count btSize,
/* near each of the base and limit of the range in question */
Bool outside; /* true if set bits are both outside test range */
- outside = (b < base) && (l > limit);
+ outside = (b < base) && (l > limit);
BTResRange(bt1, 0, btSize);
BTSet(bt1, b);
BTSet(bt1, l - 1);
@@ -449,7 +449,7 @@ static void btIsRangeTests(BT bt1, BT bt2, Count btSize,
*
*/
-static void btCopyTests(BT bt1, BT bt2, Count btSize,
+static void btCopyTests(BT bt1, BT bt2, Count btSize,
Index base, Index limit)
{
Index minBase, maxLimit, b, l;
@@ -472,7 +472,7 @@ static void btCopyTests(BT bt1, BT bt2, Count btSize,
/* near each of the base and limit of the range in question */
Bool outside; /* true if set bits are both outside test range */
- outside = (b < base) && (l > limit);
+ outside = (b < base) && (l > limit);
BTResRange(bt1, 0, btSize);
BTSet(bt1, b);
BTSet(bt1, l - 1);
@@ -482,9 +482,9 @@ static void btCopyTests(BT bt1, BT bt2, Count btSize,
cdie(BTIsResRange(bt2, 0, limit - base) == outside, "BTIsResRange");
/* check copying the region to the top of the other table */
- BTCopyOffsetRange(bt1, bt2,
+ BTCopyOffsetRange(bt1, bt2,
base, limit, btSize + base - limit, btSize);
- cdie(BTIsResRange(bt2, btSize + base - limit, btSize) == outside,
+ cdie(BTIsResRange(bt2, btSize + base - limit, btSize) == outside,
"BTIsResRange");
/* check copying the region to the same place in the other table */
@@ -505,7 +505,7 @@ static void btCopyTests(BT bt1, BT bt2, Count btSize,
-/* btTests -- Do all the tests
+/* btTests -- Do all the tests
*/
static void btTests(BT btlo, BT bthi, Count btSize)
@@ -548,17 +548,17 @@ int main(int argc, char *argv[])
/* tests need 4 whole words plus a few extra bits */
btSize = MPS_WORD_WIDTH * 4 + 10;
- testlib_unused(argc);
+ testlib_unused(argc);
testlib_unused(argv);
die(mps_arena_create(&mpsArena, mps_arena_class_vm(), testArenaSIZE),
"mps_arena_create");
arena = (Arena)mpsArena; /* avoid pun */
- die((mps_res_t)BTCreate(&btlo, arena, btSize),
+ die((mps_res_t)BTCreate(&btlo, arena, btSize),
"failed to create low bit table");
- die((mps_res_t)BTCreate(&bthi, arena, btSize),
+ die((mps_res_t)BTCreate(&bthi, arena, btSize),
"failed to create high bit table");
btTests(btlo, bthi, btSize);
diff --git a/mps/code/bttest.c b/mps/code/bttest.c
index 7d2f4632c58..8d02b7ce6c7 100644
--- a/mps/code/bttest.c
+++ b/mps/code/bttest.c
@@ -72,7 +72,7 @@ static Bool checkDefaultRange(Index arg)
return FALSE;
}
return TRUE; /* explicit valid range */
-}
+}
static void quit(void)
@@ -357,7 +357,7 @@ static void showBT(void) {
/* disable "conversion from int to char" */
#pragma warning(default: 4244)
#endif
-
+
#define testArenaSIZE (((size_t)64)<<20)
diff --git a/mps/code/buffer.c b/mps/code/buffer.c
index 388196350a4..2cecc8111a5 100644
--- a/mps/code/buffer.c
+++ b/mps/code/buffer.c
@@ -80,7 +80,7 @@ Bool BufferCheck(Buffer buffer)
/* Nothing reliable to check for lightweight frame state */
CHECKL(buffer->poolLimit == (Addr)0);
} else {
- Addr aplimit;
+ Addr aplimit;
/* The buffer is attached to a region of memory. */
/* Check consistency. */
@@ -154,7 +154,7 @@ Res BufferDescribe(Buffer buffer, mps_lib_FILE *stream)
res = WriteF(stream,
"Buffer $P ($U) {\n",
(WriteFP)buffer, (WriteFU)buffer->serial,
- " class $P (\"$S\")\n",
+ " class $P (\"$S\")\n",
(WriteFP)buffer->class, buffer->class->name,
" Arena $P\n", (WriteFP)buffer->arena,
" Pool $P\n", (WriteFP)buffer->pool,
@@ -196,7 +196,7 @@ static Res BufferInitV(Buffer buffer, BufferClass class,
AVERT(Pool, pool);
/* The PoolClass should support buffer protocols */
AVER((pool->class->attr & AttrBUF)); /* .trans.mod */
-
+
arena = PoolArena(pool);
/* Initialize the buffer. See impl.h.mpmst for a definition of */
/* the structure. sig and serial comes later .init.sig-serial */
@@ -254,7 +254,7 @@ failInit:
*
* See design.mps.buffer.method.create. */
-Res BufferCreate(Buffer *bufferReturn, BufferClass class,
+Res BufferCreate(Buffer *bufferReturn, BufferClass class,
Pool pool, Bool isMutator, ...)
{
Res res;
@@ -271,7 +271,7 @@ Res BufferCreate(Buffer *bufferReturn, BufferClass class,
*
* See design.mps.buffer.method.create. */
-Res BufferCreateV(Buffer *bufferReturn, BufferClass class,
+Res BufferCreateV(Buffer *bufferReturn, BufferClass class,
Pool pool, Bool isMutator, va_list args)
{
Res res;
@@ -286,7 +286,7 @@ Res BufferCreateV(Buffer *bufferReturn, BufferClass class,
arena = PoolArena(pool);
/* Allocate memory for the buffer descriptor structure. */
- res = ControlAlloc(&p, arena, class->size,
+ res = ControlAlloc(&p, arena, class->size,
/* withReservoirPermit */ FALSE);
if (res != ResOK)
goto failAlloc;
@@ -404,7 +404,7 @@ void BufferFinish(Buffer buffer)
/* Detach the buffer from its owning pool and unsig it. */
RingRemove(&buffer->poolRing);
buffer->sig = SigInvalid;
-
+
/* Finish off the generic buffer fields. */
RingFinish(&buffer->poolRing);
@@ -568,7 +568,7 @@ Res BufferFramePush(AllocFrame *frameReturn, Buffer buffer)
if (buffer->mode & BufferModeFLIPPED) {
BufferSetUnflipped(buffer);
}
-
+
/* check for PopPending */
if (BufferIsTrappedByMutator(buffer)) {
BufferFrameNotifyPopPending(buffer);
@@ -590,7 +590,7 @@ Res BufferFramePop(Buffer buffer, AllocFrame frame)
/* frame is of an abstract type & can't be checked */
pool = BufferPool(buffer);
return (*pool->class->framePop)(pool, buffer, frame);
-
+
}
@@ -631,7 +631,7 @@ Res BufferReserve(Addr *pReturn, Buffer buffer, Size size,
* BufferAttach is entered because of a BufferFill, or because of a Pop
* operation on a lightweight frame. */
-void BufferAttach(Buffer buffer, Addr base, Addr limit,
+void BufferAttach(Buffer buffer, Addr base, Addr limit,
Addr init, Size size)
{
Size filled;
@@ -1099,7 +1099,7 @@ static void bufferTrivFinish (Buffer buffer)
/* bufferTrivAttach -- basic buffer attach method */
-static void bufferTrivAttach(Buffer buffer, Addr base, Addr limit,
+static void bufferTrivAttach(Buffer buffer, Addr base, Addr limit,
Addr init, Size size)
{
/* No special attach method for simple buffers */
@@ -1123,7 +1123,7 @@ static void bufferTrivDetach(Buffer buffer)
}
-/* bufferNoSeg -- basic buffer BufferSeg accessor method
+/* bufferNoSeg -- basic buffer BufferSeg accessor method
*
* .noseg: basic buffers don't support segments, so this method should
* not be called. */
@@ -1147,7 +1147,7 @@ static RankSet bufferTrivRankSet (Buffer buffer)
}
-/* bufferNoSetRankSet -- basic BufferSetRankSet setter method
+/* bufferNoSetRankSet -- basic BufferSetRankSet setter method
*
* .norank: basic buffers don't support ranksets, so this method should
* not be called. */
@@ -1160,7 +1160,7 @@ static void bufferNoSetRankSet (Buffer buffer, RankSet rankset)
}
-/* bufferNoReassignSeg -- basic BufferReassignSeg method
+/* bufferNoReassignSeg -- basic BufferReassignSeg method
*
* .noseg: basic buffers don't support attachment to sements, so this
* method should not be called. */
@@ -1202,10 +1202,10 @@ Bool BufferClassCheck(BufferClass class)
CHECKL(FUNCHECK(class->describe));
CHECKS(BufferClass, class);
return TRUE;
-}
+}
-/* BufferClass -- the vanilla buffer class definition
+/* BufferClass -- the vanilla buffer class definition
*
* See design.mps.buffer.class.hierarchy.buffer. */
@@ -1257,7 +1257,7 @@ Bool SegBufCheck(SegBuf segbuf)
CHECKL(SegCheck(segbuf->seg));
/* To avoid recursive checking, leave it to SegCheck to make */
/* sure the buffer and segment fields tally. */
-
+
if (buffer->mode & BufferModeFLIPPED) {
/* Only buffers that allocate pointers get flipped. */
CHECKL(segbuf->rankSet != RankSetEMPTY);
@@ -1318,7 +1318,7 @@ static void segBufFinish (Buffer buffer)
/* segBufAttach -- SegBuf attach method */
-static void segBufAttach(Buffer buffer, Addr base, Addr limit,
+static void segBufAttach(Buffer buffer, Addr base, Addr limit,
Addr init, Size size)
{
SegBuf segbuf;
@@ -1455,11 +1455,11 @@ static Res segBufDescribe(Buffer buffer, mps_lib_FILE *stream)
}
-/* SegBufClass -- SegBuf class definition
+/* SegBufClass -- SegBuf class definition
*
* Supports an association with a single segment when attached. See
* design.mps.buffer.class.hierarchy.segbuf. */
-
+
typedef BufferClassStruct SegBufClassStruct;
DEFINE_CLASS(SegBufClass, class)
@@ -1510,12 +1510,12 @@ static Res rankBufInit (Buffer buffer, Pool pool, va_list args)
}
-/* RankBufClass -- RankBufClass class definition
+/* RankBufClass -- RankBufClass class definition
*
* A subclass of SegBufClass, sharing structure for instances.
*
* Supports initialization to a rank supplied at creation time. */
-
+
typedef BufferClassStruct RankBufClassStruct;
DEFINE_CLASS(RankBufClass, class)
diff --git a/mps/code/cbs.c b/mps/code/cbs.c
index 6327cd16fe8..5d07d9bc233 100644
--- a/mps/code/cbs.c
+++ b/mps/code/cbs.c
@@ -6,7 +6,7 @@
* .intro: This is a portable implementation of coalescing block
* structures.
*
- * .purpose: CBSs are used to manage potentially unbounded
+ * .purpose: CBSs are used to manage potentially unbounded
* collections of memory blocks.
*
* .sources: design.mps.cbs.
@@ -81,7 +81,7 @@ static CBSEmergencyGrain CBSEmergencyGrainInit(CBS cbs, Addr base, Addr limit)
}
-/* CBSEnter, CBSLeave -- Avoid re-entrance
+/* CBSEnter, CBSLeave -- Avoid re-entrance
*
* .enter-leave: The callbacks are restricted in what they may call.
* These functions enforce this.
@@ -126,7 +126,7 @@ Bool CBSCheck(CBS cbs)
CHECKL(cbs->mayUseInline || cbs->emergencyBlockList == NULL);
CHECKL(cbs->mayUseInline || cbs->emergencyGrainList == NULL);
/* See design.mps.cbs.align */
- CHECKL(!cbs->mayUseInline ||
+ CHECKL(!cbs->mayUseInline ||
AlignIsAligned(cbs->alignment, cbsMinimumAlignment));
/* can't check emergencyBlockList or emergencyGrainList more */
/* Checking eblSize and eglSize is too laborious without a List ADT */
@@ -162,7 +162,7 @@ Size (CBSBlockSize)(CBSBlock block)
}
-/* cbsSplayCompare -- Compare key to [base,limit)
+/* cbsSplayCompare -- Compare key to [base,limit)
*
* See design.mps.splay.type.splay.compare.method
*/
@@ -183,7 +183,7 @@ static Compare cbsSplayCompare(void *key, SplayNode node)
base2 = cbsBlock->base;
limit2 = cbsBlock->limit;
- if (base1 < base2)
+ if (base1 < base2)
return CompareLESS;
else if (base1 >= limit2)
return CompareGREATER;
@@ -194,7 +194,7 @@ static Compare cbsSplayCompare(void *key, SplayNode node)
/* cbsTestNode, cbsTestTree -- test for nodes larger than the S parameter */
-static Bool cbsTestNode(SplayTree tree, SplayNode node,
+static Bool cbsTestNode(SplayTree tree, SplayNode node,
void *closureP, unsigned long closureS)
{
Size size;
@@ -213,7 +213,7 @@ static Bool cbsTestNode(SplayTree tree, SplayNode node,
}
static Bool cbsTestTree(SplayTree tree, SplayNode node,
- void *closureP, unsigned long closureS)
+ void *closureP, unsigned long closureS)
{
Size size;
CBSBlock block;
@@ -289,7 +289,7 @@ Res CBSInit(Arena arena, CBS cbs, void *owner,
return ResPARAM;
}
- SplayTreeInit(splayTreeOfCBS(cbs), &cbsSplayCompare,
+ SplayTreeInit(splayTreeOfCBS(cbs), &cbsSplayCompare,
fastFind ? &cbsUpdateNode : NULL);
res = PoolCreate(&(cbs->blockPool), arena, PoolClassMFS(),
sizeof(CBSBlockStruct) * 64, sizeof(CBSBlockStruct));
@@ -367,7 +367,7 @@ static void cbsBlockDelete(CBS cbs, CBSBlock block)
oldSize = CBSBlockSize(block);
METER_ACC(cbs->splaySearch, cbs->splayTreeSize);
- res = SplayTreeDelete(splayTreeOfCBS(cbs), splayNodeOfCBSBlock(block),
+ res = SplayTreeDelete(splayTreeOfCBS(cbs), splayNodeOfCBSBlock(block),
keyOfCBSBlock(block));
AVER(res == ResOK); /* Must be possible to delete node */
STATISTIC(--cbs->splayTreeSize);
@@ -545,7 +545,7 @@ static Res cbsInsertIntoTree(Addr *baseReturn, Addr *limitReturn,
cbsBlockGrow(cbs, rightCBS, oldSize);
} else { /* !leftMerge, !rightMerge */
res = cbsBlockNew(cbs, base, limit);
- if (res != ResOK)
+ if (res != ResOK)
goto fail;
}
}
@@ -628,7 +628,7 @@ static Res cbsCoalesceWithEmergencyLists(Addr *baseIO, Addr *limitIO, CBS cbs)
}
/* block's next is still valid, even if it's been coalesced */
}
-
+
if (cbs->emergencyGrainList != NULL) {
CBSEmergencyGrain prev, grain, next;
Addr grainBase, grainLimit;
@@ -677,7 +677,7 @@ static Res cbsCoalesceWithEmergencyLists(Addr *baseIO, Addr *limitIO, CBS cbs)
/* Because the lists are known to have isolated ranges, there can */
/* be no more than 2 coalescences. */
- AVER(nCoalescences <= 2);
+ AVER(nCoalescences <= 2);
*baseIO = base;
*limitIO = limit;
@@ -690,7 +690,7 @@ static Res cbsCoalesceWithEmergencyLists(Addr *baseIO, Addr *limitIO, CBS cbs)
* The range must be unadjacent to any items on the emergency lists.
*/
-static Res cbsAddToEmergencyLists(CBS cbs, Addr base, Addr limit)
+static Res cbsAddToEmergencyLists(CBS cbs, Addr base, Addr limit)
{
Res res = ResOK;
Size size;
@@ -715,7 +715,7 @@ static Res cbsAddToEmergencyLists(CBS cbs, Addr base, Addr limit)
if (prev != NULL && block != NULL)
AVER(CBSEmergencyBlockLimit(prev) < CBSEmergencyBlockBase(block));
-
+
/* check ordering: prev ... new ... block */
if (prev != NULL && CBSEmergencyBlockLimit(prev) >= base)
return ResFAIL; /* range intersects with existing block */
@@ -723,7 +723,7 @@ static Res cbsAddToEmergencyLists(CBS cbs, Addr base, Addr limit)
if (block != NULL && limit >= CBSEmergencyBlockBase(block))
return ResFAIL; /* range intersects with existing block */
- if (prev == NULL)
+ if (prev == NULL)
cbs->emergencyBlockList = new;
else
CBSEmergencyBlockSetNext(prev, new);
@@ -737,13 +737,13 @@ static Res cbsAddToEmergencyLists(CBS cbs, Addr base, Addr limit)
grain != NULL && CBSEmergencyGrainBase(grain) < base;
prev = grain, grain = CBSEmergencyGrainNext(grain)) {
if (prev != NULL)
- AVER(CBSEmergencyGrainLimit(cbs, prev) <
+ AVER(CBSEmergencyGrainLimit(cbs, prev) <
CBSEmergencyGrainBase(grain));
}
if (prev != NULL && grain != NULL)
AVER(CBSEmergencyGrainLimit(cbs, prev) < CBSEmergencyGrainBase(grain));
-
+
/* check ordering: prev ... new ... grain */
if (prev != NULL && CBSEmergencyGrainLimit(cbs, prev) >= base)
return ResFAIL; /* range intersects with existing grain */
@@ -751,7 +751,7 @@ static Res cbsAddToEmergencyLists(CBS cbs, Addr base, Addr limit)
if (grain != NULL && limit >= CBSEmergencyGrainBase(grain))
return ResFAIL; /* range intersects with existing grain */
- if (prev == NULL)
+ if (prev == NULL)
cbs->emergencyGrainList = new;
else
CBSEmergencyGrainSetNext(prev, new);
@@ -768,7 +768,7 @@ static Res cbsAddToEmergencyLists(CBS cbs, Addr base, Addr limit)
/* cbsFlushEmergencyLists -- Attempt to move ranges to CBS proper */
-static void cbsFlushEmergencyLists(CBS cbs)
+static void cbsFlushEmergencyLists(CBS cbs)
{
Res res = ResOK;
Addr base, limit;
@@ -1021,7 +1021,7 @@ static Res cbsDeleteFromEmergencyBlockList(CBS cbs, Addr base, Addr limit)
if (blockBase <= base && limit <= blockLimit) {
/* remove from list */
- if (prev == NULL)
+ if (prev == NULL)
cbs->emergencyBlockList = CBSEmergencyBlockNext(block);
else
CBSEmergencyBlockSetNext(prev, CBSEmergencyBlockNext(block));
@@ -1041,7 +1041,7 @@ static Res cbsDeleteFromEmergencyBlockList(CBS cbs, Addr base, Addr limit)
} else {
return ResFAIL; /* partly in list */
}
- }
+ }
return ResFAIL; /* not in list at all */
}
@@ -1062,7 +1062,7 @@ static Res cbsDeleteFromEmergencyGrainList(CBS cbs, Addr base, Addr limit)
prev = grain, grain = CBSEmergencyGrainNext(grain)) {
if (prev != NULL)
AVER(CBSEmergencyGrainLimit(cbs, prev) < CBSEmergencyGrainBase(grain));
- }
+ }
if (grain != NULL) {
grainBase = CBSEmergencyGrainBase(grain);
@@ -1083,7 +1083,7 @@ static Res cbsDeleteFromEmergencyGrainList(CBS cbs, Addr base, Addr limit)
} else {
return ResFAIL; /* range is partly in list */
}
- }
+ }
return ResFAIL; /* range is not in list at all */
}
@@ -1134,8 +1134,8 @@ Res CBSBlockDescribe(CBSBlock block, mps_lib_FILE *stream)
if (stream == NULL) return ResFAIL;
res = WriteF(stream,
- "[$P,$P) {$U}",
- (WriteFP)block->base,
+ "[$P,$P) {$U}",
+ (WriteFP)block->base,
(WriteFP)block->limit,
(WriteFU)block->maxSize,
NULL);
@@ -1258,7 +1258,7 @@ static Bool cbsSetMinSizeGrow(CBS cbs, CBSBlock block, void *p)
{
CBSSetMinSizeClosure closure;
Size size;
-
+
closure = (CBSSetMinSizeClosure)p;
AVER(closure->old > closure->new);
size = CBSBlockSize(block);
@@ -1272,7 +1272,7 @@ static Bool cbsSetMinSizeShrink(CBS cbs, CBSBlock block, void *p)
{
CBSSetMinSizeClosure closure;
Size size;
-
+
closure = (CBSSetMinSizeClosure)p;
AVER(closure->old < closure->new);
size = CBSBlockSize(block);
@@ -1305,7 +1305,7 @@ void CBSSetMinSize(CBS cbs, Size minSize)
/* CBSFindDeleteCheck -- check method for a CBSFindDelete value */
-static Bool CBSFindDeleteCheck(CBSFindDelete findDelete)
+static Bool CBSFindDeleteCheck(CBSFindDelete findDelete)
{
CHECKL(findDelete == CBSFindDeleteNONE || findDelete == CBSFindDeleteLOW
|| findDelete == CBSFindDeleteHIGH
@@ -1320,9 +1320,9 @@ static Bool CBSFindDeleteCheck(CBSFindDelete findDelete)
typedef Res (*cbsDeleteMethod)(CBS cbs, Addr base, Addr limit);
-static void cbsFindDeleteRange(Addr *baseReturn, Addr *limitReturn,
- CBS cbs, Addr base, Addr limit, Size size,
- cbsDeleteMethod delete,
+static void cbsFindDeleteRange(Addr *baseReturn, Addr *limitReturn,
+ CBS cbs, Addr base, Addr limit, Size size,
+ cbsDeleteMethod delete,
CBSFindDelete findDelete)
{
Bool callDelete = TRUE;
@@ -1373,7 +1373,7 @@ static void cbsFindDeleteRange(Addr *baseReturn, Addr *limitReturn,
/* CBSFindFirst -- find the first block of at least the given size */
Bool CBSFindFirst(Addr *baseReturn, Addr *limitReturn,
- CBS cbs, Size size, CBSFindDelete findDelete)
+ CBS cbs, Size size, CBSFindDelete findDelete)
{
Bool found;
Addr base = (Addr)0, limit = (Addr)0; /* only defined when found is TRUE */
@@ -1425,11 +1425,11 @@ Bool CBSFindFirst(Addr *baseReturn, Addr *limitReturn,
deleteMethod = &cbsDeleteFromEmergencyBlockList;
/* @@@@ Could remove in place more efficiently. */
break;
- }
+ }
}
}
- if (cbs->emergencyGrainList != NULL &&
+ if (cbs->emergencyGrainList != NULL &&
size <= CBSEmergencyGrainSize(cbs)) {
/* Take first grain */
CBSEmergencyGrain grain = cbs->emergencyGrainList;
@@ -1456,7 +1456,7 @@ Bool CBSFindFirst(Addr *baseReturn, Addr *limitReturn,
/* CBSFindLast -- find the last block of at least the given size */
Bool CBSFindLast(Addr *baseReturn, Addr *limitReturn,
- CBS cbs, Size size, CBSFindDelete findDelete)
+ CBS cbs, Size size, CBSFindDelete findDelete)
{
Bool found;
Addr base = (Addr)0, limit = (Addr)0; /* only defined in found is TRUE */
@@ -1506,11 +1506,11 @@ Bool CBSFindLast(Addr *baseReturn, Addr *limitReturn,
limit = CBSEmergencyBlockLimit(block);
deleteMethod = &cbsDeleteFromEmergencyBlockList;
/* @@@@ Could remove in place more efficiently. */
- }
+ }
}
}
- if (cbs->emergencyGrainList != NULL &&
+ if (cbs->emergencyGrainList != NULL &&
size <= CBSEmergencyGrainSize(cbs)) {
CBSEmergencyGrain grain;
@@ -1600,7 +1600,7 @@ Bool CBSFindLargest(Addr *baseReturn, Addr *limitReturn,
limit = CBSEmergencyBlockLimit(block);
deleteMethod = &cbsDeleteFromEmergencyBlockList;
/* @@@@ Could remove in place more efficiently. */
- }
+ }
}
}
diff --git a/mps/code/cbs.h b/mps/code/cbs.h
index 0ab38db4333..d8daf61685a 100644
--- a/mps/code/cbs.h
+++ b/mps/code/cbs.h
@@ -73,7 +73,7 @@ extern Res CBSInit(Arena arena, CBS cbs, void *owner,
Size minSize,
Align alignment,
Bool mayUseInline,
- Bool fastFind);
+ Bool fastFind);
extern void CBSFinish(CBS cbs);
extern Res CBSInsert(CBS cbs, Addr base, Addr limit);
diff --git a/mps/code/cbstest.c b/mps/code/cbstest.c
index 393a75a325a..a58f6a7661c 100644
--- a/mps/code/cbstest.c
+++ b/mps/code/cbstest.c
@@ -92,7 +92,7 @@ static void testCallback(CBS cbs, CBSBlock cbsBlock,
}
-static void cbsNewCallback(CBS cbs, CBSBlock cbsBlock,
+static void cbsNewCallback(CBS cbs, CBSBlock cbsBlock,
Size oldSize, Size newSize)
{
testCallback(cbs, cbsBlock, oldSize, newSize, &CallbackNew);
@@ -103,7 +103,7 @@ static void cbsNewCallback(CBS cbs, CBSBlock cbsBlock,
}
-static void cbsDeleteCallback(CBS cbs, CBSBlock cbsBlock,
+static void cbsDeleteCallback(CBS cbs, CBSBlock cbsBlock,
Size oldSize, Size newSize)
{
testCallback(cbs, cbsBlock, oldSize, newSize, &CallbackDelete);
@@ -114,7 +114,7 @@ static void cbsDeleteCallback(CBS cbs, CBSBlock cbsBlock,
}
-static void cbsGrowCallback(CBS cbs, CBSBlock cbsBlock,
+static void cbsGrowCallback(CBS cbs, CBSBlock cbsBlock,
Size oldSize, Size newSize)
{
testCallback(cbs, cbsBlock, oldSize, newSize, &CallbackGrow);
@@ -126,7 +126,7 @@ static void cbsGrowCallback(CBS cbs, CBSBlock cbsBlock,
}
-static void cbsShrinkCallback(CBS cbs, CBSBlock cbsBlock,
+static void cbsShrinkCallback(CBS cbs, CBSBlock cbsBlock,
Size oldSize, Size newSize)
{
testCallback(cbs, cbsBlock, oldSize, newSize, &CallbackShrink);
@@ -151,16 +151,16 @@ static Bool checkCBSAction(CBS cbs, CBSBlock cbsBlock, void *p)
limit = CBSBlockLimit(cbsBlock);
if (base > closure->oldLimit) {
- Insist(BTIsSetRange(closure->allocTable,
- indexOfAddr(closure->base, closure->oldLimit),
+ Insist(BTIsSetRange(closure->allocTable,
+ indexOfAddr(closure->base, closure->oldLimit),
indexOfAddr(closure->base, base)));
} else { /* must be at start of table */
Insist(base == closure->oldLimit);
Insist(closure->oldLimit == closure->base);
}
-
- Insist(BTIsResRange(closure->allocTable,
- indexOfAddr(closure->base, base),
+
+ Insist(BTIsResRange(closure->allocTable,
+ indexOfAddr(closure->base, base),
indexOfAddr(closure->base, limit)));
@@ -182,10 +182,10 @@ static void checkCBS(CBS cbs, BT allocTable, Addr dummyBlock)
CBSIterate(cbs, checkCBSAction, (void *)&closure);
if (closure.oldLimit == closure.base)
- Insist(BTIsSetRange(allocTable, 0,
+ Insist(BTIsSetRange(allocTable, 0,
indexOfAddr(dummyBlock, closure.limit)));
else if (closure.limit > closure.oldLimit)
- Insist(BTIsSetRange(allocTable,
+ Insist(BTIsSetRange(allocTable,
indexOfAddr(dummyBlock, closure.oldLimit),
indexOfAddr(dummyBlock, closure.limit)));
else
@@ -200,7 +200,7 @@ static Word cbsRnd(Word limit)
}
-/* nextEdge -- Finds the next transition in the bit table
+/* nextEdge -- Finds the next transition in the bit table
*
* Returns the index greater than such that the
* range [, ) has the same value in the bit table,
@@ -217,14 +217,14 @@ static Index nextEdge(BT bt, Size size, Index base)
baseValue = BTGet(bt, base);
- for(end = base + 1; end < size && BTGet(bt, end) == baseValue; end++)
+ for(end = base + 1; end < size && BTGet(bt, end) == baseValue; end++)
NOOP;
return end;
}
-/* lastEdge -- Finds the previous transition in the bit table
+/* lastEdge -- Finds the previous transition in the bit table
*
* Returns the index less than such that the range
* [, ] has the same value in the bit table,
@@ -241,7 +241,7 @@ static Index lastEdge(BT bt, Size size, Index base)
baseValue = BTGet(bt, base);
- for(end = base; end > (Index)0 && BTGet(bt, end - 1) == baseValue; end--)
+ for(end = base; end > (Index)0 && BTGet(bt, end - 1) == baseValue; end--)
NOOP;
return end;
@@ -253,8 +253,8 @@ static Index lastEdge(BT bt, Size size, Index base)
* The function first picks a uniformly distributed within the table.
*
* It then scans forward a binary exponentially distributed
- * number of "edges" in the table (that is, transitions between set and
- * reset) to get . Note that there is a 50% chance that will
+ * number of "edges" in the table (that is, transitions between set and
+ * reset) to get . Note that there is a 50% chance that will
* be the next edge, a 25% chance it will be the edge after, etc., until
* the end of the table.
*
@@ -264,7 +264,7 @@ static Index lastEdge(BT bt, Size size, Index base)
* Hence there is a somewhat better than 50% chance that the range will be
* all either set or reset.
*/
-
+
static void randomRange(Addr *baseReturn, Addr *limitReturn,
BT allocTable, Addr block)
{
@@ -319,10 +319,10 @@ static void checkExpectations(void)
Insist(!CallbackDelete.shouldBeCalled);
Insist(!CallbackGrow.shouldBeCalled);
Insist(!CallbackShrink.shouldBeCalled);
-}
+}
-static void allocate(CBS cbs, Addr block, BT allocTable,
+static void allocate(CBS cbs, Addr block, BT allocTable,
Addr base, Addr limit)
{
Res res;
@@ -333,9 +333,9 @@ static void allocate(CBS cbs, Addr block, BT allocTable,
il = indexOfAddr(block, limit);
isFree = BTIsResRange(allocTable, ib, il);
-
+
/*
- printf("allocate: [%p, %p) -- %s\n",
+ printf("allocate: [%p, %p) -- %s\n",
base, limit, isFree ? "succeed" : "fail");
*/
@@ -345,9 +345,9 @@ static void allocate(CBS cbs, Addr block, BT allocTable,
Addr outerBase, outerLimit; /* interval containing [ib, il) */
Size left, right, total; /* Sizes of block and two fragments */
- outerBase =
+ outerBase =
addrOfIndex(block, lastEdge(allocTable, ArraySize, ib));
- outerLimit =
+ outerLimit =
addrOfIndex(block, nextEdge(allocTable, ArraySize, il - 1));
left = AddrOffset(outerBase, base);
@@ -388,10 +388,10 @@ static void allocate(CBS cbs, Addr block, BT allocTable,
res = CBSDelete(cbs, base, limit);
if (!isFree) {
- die_expect((mps_res_t)res, MPS_RES_FAIL,
+ die_expect((mps_res_t)res, MPS_RES_FAIL,
"Succeeded in deleting allocated block");
} else { /* isFree */
- die_expect((mps_res_t)res, MPS_RES_OK,
+ die_expect((mps_res_t)res, MPS_RES_OK,
"failed to delete free block");
NAllocateSucceeded++;
BTSetRange(allocTable, ib, il);
@@ -415,7 +415,7 @@ static void deallocate(CBS cbs, Addr block, BT allocTable,
isAllocated = BTIsSetRange(allocTable, ib, il);
/*
- printf("deallocate: [%p, %p) -- %s\n",
+ printf("deallocate: [%p, %p) -- %s\n",
base, limit, isAllocated ? "succeed" : "fail");
*/
@@ -426,14 +426,14 @@ static void deallocate(CBS cbs, Addr block, BT allocTable,
/* Find the free blocks adjacent to the allocated block */
if (ib > 0 && !BTGet(allocTable, ib - 1)) {
- outerBase =
+ outerBase =
addrOfIndex(block, lastEdge(allocTable, ArraySize, ib - 1));
} else {
outerBase = base;
}
if (il < ArraySize && !BTGet(allocTable, il)) {
- outerLimit =
+ outerLimit =
addrOfIndex(block, nextEdge(allocTable, ArraySize, il));
} else {
outerLimit = limit;
@@ -446,7 +446,7 @@ static void deallocate(CBS cbs, Addr block, BT allocTable,
/* based on detailed knowledge of CBS behaviour */
checkExpectations();
if (total >= MinSize && left < MinSize && right < MinSize) {
- if (left >= right)
+ if (left >= right)
expectCallback(&CallbackNew, left, outerBase, outerLimit);
else
expectCallback(&CallbackNew, right, outerBase, outerLimit);
@@ -473,7 +473,7 @@ static void deallocate(CBS cbs, Addr block, BT allocTable,
res = CBSInsertReturningRange(&freeBase, &freeLimit, cbs, base, limit);
- if (!isAllocated) {
+ if (!isAllocated) {
die_expect((mps_res_t)res, MPS_RES_FAIL,
"succeeded in inserting non-allocated block");
} else { /* isAllocated */
@@ -490,7 +490,7 @@ static void deallocate(CBS cbs, Addr block, BT allocTable,
static void find(CBS cbs, void *block, BT alloc, Size size, Bool high,
- CBSFindDelete findDelete)
+ CBSFindDelete findDelete)
{
Bool expected, found;
Index expectedBase, expectedLimit;
@@ -500,7 +500,7 @@ static void find(CBS cbs, void *block, BT alloc, Size size, Bool high,
checkExpectations();
expected = (high ? BTFindLongResRangeHigh : BTFindLongResRange)
- (&expectedBase, &expectedLimit, alloc,
+ (&expectedBase, &expectedLimit, alloc,
(Index)0, (Index)ArraySize, (unsigned long)size);
if (expected) {
@@ -532,9 +532,9 @@ static void find(CBS cbs, void *block, BT alloc, Size size, Bool high,
if (newSize == 0)
expectCallback(&CallbackDelete, oldSize, (Addr)0, (Addr)0);
else if (newSize < MinSize)
- expectCallback(&CallbackDelete, oldSize,
+ expectCallback(&CallbackDelete, oldSize,
remainderBase, remainderLimit);
- else
+ else
expectCallback(&CallbackShrink, oldSize,
remainderBase, remainderLimit);
}
@@ -578,8 +578,8 @@ extern int main(int argc, char *argv[])
randomize(argc, argv);
- NAllocateTried = NAllocateSucceeded = NDeallocateTried =
- NDeallocateSucceeded = NNewBlocks = NDeleteBlocks =
+ NAllocateTried = NAllocateSucceeded = NDeallocateTried =
+ NDeallocateSucceeded = NNewBlocks = NDeleteBlocks =
NGrowBlocks = NShrinkBlocks = 0;
clearExpectations();
@@ -588,12 +588,12 @@ extern int main(int argc, char *argv[])
"mps_arena_create");
arena = (Arena)mpsArena; /* avoid pun */
- die((mps_res_t)BTCreate(&allocTable, arena, ArraySize),
+ die((mps_res_t)BTCreate(&allocTable, arena, ArraySize),
"failed to create alloc table");
- die((mps_res_t)CBSInit(arena, &cbsStruct, NULL, &cbsNewCallback,
+ die((mps_res_t)CBSInit(arena, &cbsStruct, NULL, &cbsNewCallback,
&cbsDeleteCallback, &cbsGrowCallback,
- &cbsShrinkCallback, MinSize,
+ &cbsShrinkCallback, MinSize,
Alignment, TRUE, TRUE),
"failed to initialise CBS");
cbs = &cbsStruct;
@@ -602,12 +602,12 @@ extern int main(int argc, char *argv[])
/* We're not going to use this block, but I feel unhappy just */
/* inventing addresses. */
- die((mps_res_t)ControlAlloc(&p, arena, ArraySize * Alignment,
- /* withReservoirPermit */ FALSE),
+ die((mps_res_t)ControlAlloc(&p, arena, ArraySize * Alignment,
+ /* withReservoirPermit */ FALSE),
"failed to allocate block");
dummyBlock = (Addr)p; /* avoid pun */
- printf("Allocated block [%p, %p)\n", dummyBlock,
+ printf("Allocated block [%p, %p)\n", dummyBlock,
(char *)dummyBlock + ArraySize);
checkCBS(cbs, allocTable, dummyBlock);
diff --git a/mps/code/check.h b/mps/code/check.h
index 3a44b54f855..3a541164213 100644
--- a/mps/code/check.h
+++ b/mps/code/check.h
@@ -50,7 +50,7 @@ enum {
#define AVER_CRITICAL(cond) DISCARD(cond)
#define AVERT_CRITICAL(type, val) DISCARD(type ## Check(val))
-#elif defined(MPS_HOT_RED)
+#elif defined(MPS_HOT_RED)
#define AVER(cond) ASSERT(cond, #cond)
#define AVERT(type, val) ASSERT(type ## Check(val), \
diff --git a/mps/code/comm.gmk b/mps/code/comm.gmk
index 01a58c7b3bc..aafb2134680 100644
--- a/mps/code/comm.gmk
+++ b/mps/code/comm.gmk
@@ -44,7 +44,7 @@
# have to change the makefiles for all the platforms which use this
# makefile to define the source list for that part, and the GNUmakefile
# to include a recursive call to the name of that part.
-#
+#
# CHECK PARAMETERS
#
# GNU make doesn't really have an "error" directive, but these lines
@@ -132,7 +132,7 @@ CFLAGSCOMMON = $(PFMDEFS) $(CFLAGSTARGET) $(CFLAGSCOMPILER)
# These flags are added to compilations for the indicated variety.
CFWE = -DCONFIG_VAR_WE -DNDEBUG $(CFLAGSOPTNODEBUG)
CFWI = -DCONFIG_VAR_WI -DNDEBUG $(CFLAGSOPT)
-CFHE = -DCONFIG_VAR_HE -DNDEBUG $(CFLAGSOPTNODEBUG)
+CFHE = -DCONFIG_VAR_HE -DNDEBUG $(CFLAGSOPTNODEBUG)
CFHI = -DCONFIG_VAR_HI -DNDEBUG $(CFLAGSOPT)
CFII = -DCONFIG_VAR_II -DNDEBUG $(CFLAGSOPT)
CFCE = -DCONFIG_VAR_CE $(CFLAGSOPTNODEBUG)
@@ -215,7 +215,7 @@ SW = $(SWCOMMON) $(SWPF)
# These map the source file lists onto object files and dependency files
# in the platform/variety directory.
-#
+#
# %%PART: Add a new macro which expands to the files included in the
# part.
@@ -325,7 +325,7 @@ clean: phony
rm -rf "$(PFM)"
# "target" builds some varieties of the target named in the TARGET macro.
-# %%VARIETY: Optionally, add a recursive make call for the new variety,
+# %%VARIETY: Optionally, add a recursive make call for the new variety,
# if it should be built by default.
ifdef TARGET
diff --git a/mps/code/commpost.nmk b/mps/code/commpost.nmk
index 0e54f3fd113..3c6389e86b8 100644
--- a/mps/code/commpost.nmk
+++ b/mps/code/commpost.nmk
@@ -52,7 +52,7 @@ clean:
-deltree /Y $(PFM)
# target target
-# %%VARIETY: Optionally, add a recursive make call for the new variety,
+# %%VARIETY: Optionally, add a recursive make call for the new variety,
# if it should be built by default.
# Only the varieties needed for development and internal customers are made.
# Depends on there being no file called "target".
@@ -129,7 +129,7 @@ $(PFM)\$(VARIETY)\lockutw3.exe: $(PFM)\$(VARIETY)\lockutw3.obj \
$(MPMOBJ) $(PLINTHOBJ) $(TESTLIBOBJ)
$(PFM)\$(VARIETY)\protcv.exe: $(PFM)\$(VARIETY)\protcv.obj \
- $(MPMOBJ) $(PLINTHOBJ)
+ $(MPMOBJ) $(PLINTHOBJ)
$(PFM)\$(VARIETY)\mpsicv.exe: $(PFM)\$(VARIETY)\mpsicv.obj \
$(MPMOBJ) $(AMCOBJ) $(PLINTHOBJ) $(DWOBJ) $(DWTESTOBJ) \
diff --git a/mps/code/commpre.nmk b/mps/code/commpre.nmk
index f18274ec4e4..9747ce50865 100644
--- a/mps/code/commpre.nmk
+++ b/mps/code/commpre.nmk
@@ -93,7 +93,7 @@ ECHO = echo
!ifdef TARGET
!if "$(TARGET)" == "mmsw.lib" || "$(TARGET)" == "epvmss.exe" || "$(TARGET)" == "replaysw.exe"
CFLAGSTARGETPRE = /DCONFIG_PROD_EPCORE
-CFLAGSTARGETPOST =
+CFLAGSTARGETPOST =
CRTFLAGSW = /MD
CRTFLAGSH = /MDd
CRTFLAGSC = /MDd
@@ -103,7 +103,7 @@ LINKFLAGSCOOL = msvcrtd.lib
!elseif "$(TARGET)" == "mmdw.lib"
# /Oy- is actually 86-specific, but Dylan is only built for that platform
-CFLAGSTARGETPRE = /DCONFIG_PROD_DYLAN
+CFLAGSTARGETPRE = /DCONFIG_PROD_DYLAN
CFLAGSTARGETPOST = /Oy-
CRTFLAGSW = /MT
CRTFLAGSH = /MT
@@ -179,10 +179,10 @@ LIBMAN = lib # can't call this LIB - it screws the environment
LIBFLAGSCOMMON = /nologo
LIBFLAGSWE =
LIBFLAGSWI =
-LIBFLAGSHE =
-LIBFLAGSHI =
-LIBFLAGSCE =
-LIBFLAGSCI =
+LIBFLAGSHE =
+LIBFLAGSHI =
+LIBFLAGSCE =
+LIBFLAGSCI =
LIBFLAGSTI =
#LIBFLAGSCV =
diff --git a/mps/code/dbgpool.c b/mps/code/dbgpool.c
index 81827d61ebf..bbd4bd23e56 100644
--- a/mps/code/dbgpool.c
+++ b/mps/code/dbgpool.c
@@ -150,7 +150,7 @@ static Res DebugPoolInit(Pool pool, va_list args)
}
debug->fenceTemplate = options->fenceTemplate;
}
-
+
/* tag init */
debug->tagInit = tagInit;
if (debug->tagInit != NULL) {
@@ -422,7 +422,7 @@ static void TagWalk(Pool pool, ObjectsStepMethod step, void *p)
debug = DebugPoolDebugMixin(pool);
AVER(debug != NULL);
- AVERT(PoolDebugMixin, debug);
+ AVERT(PoolDebugMixin, debug);
node = SplayTreeFirst(&debug->index, (void *)&dummy);
while (node != NULL) {
@@ -480,7 +480,7 @@ void mps_pool_check_fenceposts(mps_pool_t mps_pool)
{
Pool pool = (Pool)mps_pool;
Arena arena;
-
+
/* CHECKT not AVERT, see design.mps.interface.c.check.space */
AVER(CHECKT(Pool, pool));
arena = PoolArena(pool);
diff --git a/mps/code/dumper.c b/mps/code/dumper.c
index c170efa2414..f5e9443cd03 100644
--- a/mps/code/dumper.c
+++ b/mps/code/dumper.c
@@ -29,7 +29,7 @@ typedef struct AddrStruct *Addr;
#define RELATION(type, code, always, kind, format) \
case Event ## type: \
readEvent(#type, #format, header[0], header[1], header[2]); \
- break;
+ break;
#define AVER(test) \
@@ -84,10 +84,10 @@ static void readEvent(char *type, char *format, Word code, Word length,
v = malloc(length * sizeof(Word));
if(v == NULL)
error("Can't allocate string space %u", (unsigned)length);
- n = fread((void *)v, sizeof(Word), length, progin);
- if(n < 1)
- error("Can't read data for string");
- printf("%s ", v);
+ n = fread((void *)v, sizeof(Word), length, progin);
+ if(n < 1)
+ error("Can't read data for string");
+ printf("%s ", v);
length = 0;
} break;
case '0': break;
@@ -101,7 +101,7 @@ static void readEvent(char *type, char *format, Word code, Word length,
AVER(length == 0);
}
-
+
int main(int argc, char *argv[]) {
Word header[3];
size_t arg = 1;
@@ -131,7 +131,7 @@ int main(int argc, char *argv[]) {
continue;
error("Can't read from input");
}
-
+
switch(header[0]) {
#include "eventdef.h"
default:
@@ -142,5 +142,5 @@ int main(int argc, char *argv[]) {
return(0);
}
-
-
+
+
diff --git a/mps/code/event.c b/mps/code/event.c
index f8b91ebe1fe..69f0e2bbe03 100644
--- a/mps/code/event.c
+++ b/mps/code/event.c
@@ -7,10 +7,10 @@
*
* TRANSGRESSIONS (rule.impl.trans)
*
- * .trans.ref: The reference counting used to destroy the mps_io object
+ * .trans.ref: The reference counting used to destroy the mps_io object
* isn't right.
*
- * .trans.log: The log file will be re-created if the lifetimes of
+ * .trans.log: The log file will be re-created if the lifetimes of
* arenas don't overlap, but shared if they do. mps_io_create cannot
* be called twice, but EventInit avoids this anyway.
*
@@ -47,7 +47,7 @@ Word EventKindControl; /* Bit set used to control output. */
Res EventFlush(void)
{
Res res;
-
+
AVER(eventInited);
res = (Res)mps_io_write(eventIO, (void *)eventBuffer,
@@ -122,15 +122,15 @@ void EventFinish(void)
* Flip(M) EventControl(0,M)
* Read() EventControl(0,0)
*/
-
+
Word EventControl(Word resetMask, Word flipMask)
{
Word oldValue = EventKindControl;
-
+
/* EventKindControl = (EventKindControl & ~resetMask) ^ flipMask */
EventKindControl =
BS_SYM_DIFF(BS_DIFF(EventKindControl, resetMask), flipMask);
-
+
return oldValue;
}
@@ -180,13 +180,13 @@ void EventLabelAddr(Addr addr, Word id)
Res (EventSync)(void)
{
- return(ResOK);
+ return(ResOK);
}
Res (EventInit)(void)
{
- return(ResOK);
+ return(ResOK);
}
diff --git a/mps/code/event.h b/mps/code/event.h
index 7d8fc194b5b..d5591327b5d 100644
--- a/mps/code/event.h
+++ b/mps/code/event.h
@@ -64,8 +64,8 @@ extern Res EventFlush(void);
Event##type##Always = always, \
Event##type##Kind = EventKind##kind, \
Event##type##Format = EventFormat##format \
- };
-
+ };
+
#include "eventdef.h"
#undef RELATION
diff --git a/mps/code/eventcom.h b/mps/code/eventcom.h
index d67614f95e0..4ff637ba2f9 100644
--- a/mps/code/eventcom.h
+++ b/mps/code/eventcom.h
@@ -47,8 +47,8 @@ typedef EventUnion *Event;
/* Event types -- see design.mps.telemetry
*
- * These names are intended to be mnemonic. They are derived from
- * selected letters as indicated, using the transliteration in
+ * These names are intended to be mnemonic. They are derived from
+ * selected letters as indicated, using the transliteration in
* guide.hex.trans.
*
* These definitions will be unnecessary when the event codes are
diff --git a/mps/code/eventdef.h b/mps/code/eventdef.h
index 416c7285a8c..e3c7652ae54 100644
--- a/mps/code/eventdef.h
+++ b/mps/code/eventdef.h
@@ -5,9 +5,9 @@
*
* .source: design.mps.telemetry
*
- * .desc: This file declares relationships that define the various
- * event types. It is intended to be used with clever definitions
- * of the RELATION macro.
+ * .desc: This file declares relationships that define the various
+ * event types. It is intended to be used with clever definitions
+ * of the RELATION macro.
*
* TRANSGRESSIONS
*
@@ -44,17 +44,17 @@
/* Relations -- Generic definitions of events
- *
+ *
* These specify:
* - Type: The name of the event type, without the leading "Event";
* - Code: The unique 16-bit code associated with this event type,
* not currently used (see impl.h.eventcom);
- * - Always: Whether this event type should appear in optimised
+ * - Always: Whether this event type should appear in optimised
* varieties, not currently used;
* - Kind: Category into which this event falls, without the
* leading "EventKind";
* - Format: Character sequence indicating the format of the event
- * parameters, similar to writef (Pointer, Addr, Word, Unsigned,
+ * parameters, similar to writef (Pointer, Addr, Word, Unsigned,
* String, Double).
*/
diff --git a/mps/code/eventgen.pl b/mps/code/eventgen.pl
index 28354aebd37..bb330227a3f 100644
--- a/mps/code/eventgen.pl
+++ b/mps/code/eventgen.pl
@@ -30,7 +30,7 @@ $ID = substr(q$Id$, 4, -1);
open(C, ") {
- if(/RELATION\([^,]*,[^,]*,[^,]*,[^,]*, ([A-Z]+)\)/) {
+ if(/RELATION\([^,]*,[^,]*,[^,]*,[^,]*, ([A-Z]+)\)/) {
$Formats{$1} = 1 if(!defined($Formats{$1}));
}
}
diff --git a/mps/code/fmtdy.c b/mps/code/fmtdy.c
index 25ebb1eb52e..dae0dc64e70 100644
--- a/mps/code/fmtdy.c
+++ b/mps/code/fmtdy.c
@@ -182,7 +182,7 @@ int dylan_wrapper_check(mps_word_t *w)
/* size. This assumes that DylanWorks is only going to use byte */
/* vectors in the non-word case. */
- /* Variable part format 6 is reserved. */
+ /* Variable part format 6 is reserved. */
assert(vf != 6);
/* There should be no shift in word vector formats. */
@@ -802,7 +802,7 @@ mps_fmt_B_s *dylan_fmt_B_weak(void)
}
-/* Now we have format variety-independent version that pick the right
+/* Now we have format variety-independent version that pick the right
* format variety and create it.
*/
diff --git a/mps/code/fmtdytst.c b/mps/code/fmtdytst.c
index 54db892f781..cffd3d1c8bd 100644
--- a/mps/code/fmtdytst.c
+++ b/mps/code/fmtdytst.c
@@ -34,7 +34,7 @@ static mps_word_t *tvw;
static mps_word_t dylan_make_WV(mps_word_t version, mps_word_t vb,
- mps_word_t es, mps_word_t vf)
+ mps_word_t es, mps_word_t vf)
{
assert((version & ((1 << 8) - 1)) == version);
assert((vb & ((1 << 8) - 1)) == vb);
diff --git a/mps/code/fmthe.c b/mps/code/fmthe.c
index 9b97cfc3c61..9657c056a80 100644
--- a/mps/code/fmthe.c
+++ b/mps/code/fmthe.c
@@ -159,7 +159,7 @@ static int dylan_wrapper_check(mps_word_t *w)
/* size. This assumes that DylanWorks is only going to use byte */
/* vectors in the non-word case. */
- /* Variable part format 6 is reserved. */
+ /* Variable part format 6 is reserved. */
assert(vf != 6);
/* There should be no shift in word vector formats. */
diff --git a/mps/code/format.c b/mps/code/format.c
index a0951c0b3a4..3fbf0972a9b 100644
--- a/mps/code/format.c
+++ b/mps/code/format.c
@@ -38,7 +38,7 @@ Bool FormatCheck(Format format)
}
-static Addr FormatDefaultClass(Addr object)
+static Addr FormatDefaultClass(Addr object)
{
AVER(object != NULL);
@@ -66,7 +66,7 @@ Res FormatCreate(Format *formatReturn, Arena arena,
AVER(formatReturn != NULL);
- res = ControlAlloc(&p, arena, sizeof(FormatStruct),
+ res = ControlAlloc(&p, arena, sizeof(FormatStruct),
/* withReservoirPermit */ FALSE);
if(res != ResOK)
return res;
@@ -85,7 +85,7 @@ Res FormatCreate(Format *formatReturn, Arena arena,
if(class == NULL) {
format->class = &FormatDefaultClass;
} else {
- AVER(variety == FormatVarietyB);
+ AVER(variety == FormatVarietyB);
format->class = class;
}
if(headerSize != 0) {
@@ -100,7 +100,7 @@ Res FormatCreate(Format *formatReturn, Arena arena,
++arena->formatSerial;
AVERT(Format, format);
-
+
RingAppend(&arena->formatRing, &format->arenaRing);
*formatReturn = format;
@@ -115,7 +115,7 @@ void FormatDestroy(Format format)
RingRemove(&format->arenaRing);
format->sig = SigInvalid;
-
+
RingFinish(&format->arenaRing);
ControlFree(format->arena, format, sizeof(FormatStruct));
@@ -134,10 +134,10 @@ Arena FormatArena(Format format)
Res FormatDescribe(Format format, mps_lib_FILE *stream)
{
Res res;
-
+
res = WriteF(stream,
"Format $P ($U) {\n", (WriteFP)format, (WriteFU)format->serial,
- " arena $P ($U)\n",
+ " arena $P ($U)\n",
(WriteFP)format->arena, (WriteFU)format->arena->serial,
" alignment $W\n", (WriteFW)format->alignment,
" scan $F\n", (WriteFF)format->scan,
diff --git a/mps/code/global.c b/mps/code/global.c
index 8757a150b04..6cc309c1162 100644
--- a/mps/code/global.c
+++ b/mps/code/global.c
@@ -33,7 +33,7 @@ SRCID(global, "$Id$");
/* All static data objects are declared here. See .static */
/* design.mps.arena.static.ring.init */
-static Bool arenaRingInit = FALSE;
+static Bool arenaRingInit = FALSE;
static RingStruct arenaRing; /* design.mps.arena.static.ring */
@@ -274,7 +274,7 @@ Res GlobalsInit(Globals arenaGlobals)
for (i=0; i < TraceLIMIT; i++) {
/* design.mps.arena.trace.invalid */
- arena->trace[i].sig = SigInvalid;
+ arena->trace[i].sig = SigInvalid;
}
for(rank = 0; rank < RankLIMIT; ++rank)
RingInit(&arena->greyRing[rank]);
@@ -386,7 +386,7 @@ void GlobalsPrepareToDestroy(Globals arenaGlobals)
/* throw away the BT used by messages */
if (arena->enabledMessageTypes != NULL) {
- ControlFree(arena, (void *)arena->enabledMessageTypes,
+ ControlFree(arena, (void *)arena->enabledMessageTypes,
BTSize(MessageTypeLIMIT));
arena->enabledMessageTypes = NULL;
}
@@ -519,7 +519,7 @@ Bool ArenaAccess(Addr addr, AccessSet mode, MutatorFaultContext context)
* ArenaPoll does nothing if the amount of committed memory is less than
* the arena poll threshold. This means that actions are taken as the
* memory demands increase.
- *
+ *
* @@@@ This is where time is "stolen" from the mutator in addition
* to doing what it asks and servicing accesses. This is where the
* amount of time should be controlled, perhaps by passing time
@@ -678,7 +678,7 @@ Ref ArenaRead(Arena arena, Addr addr)
Seg seg;
AVERT(Arena, arena);
-
+
b = SegOfAddr(&seg, arena, addr);
AVER(b == TRUE);
@@ -748,7 +748,7 @@ Res GlobalsDescribe(Globals arenaGlobals, mps_lib_FILE *stream)
NULL);
if (res != ResOK) return res;
}
-
+
res = WriteF(stream,
" [note: indices are raw, not rotated]\n"
" prehistory = $B\n", (WriteFB)arena->prehistory,
diff --git a/mps/code/gp.gmk b/mps/code/gp.gmk
index b4c63b4e351..8877dcec310 100644
--- a/mps/code/gp.gmk
+++ b/mps/code/gp.gmk
@@ -4,7 +4,7 @@
# Copyright (c) 2001 Ravenbrook Limited.
#
# This file is included by platform makefiles that use the GNU CC
-# compiler with gprof. It defines the compiler specific variables
+# compiler with gprof. It defines the compiler specific variables
# that the common makefile fragment (impl.gmk.comm) requires.
diff --git a/mps/code/idlench.awk b/mps/code/idlench.awk
index b212e4c213f..6618ab57c38 100644
--- a/mps/code/idlench.awk
+++ b/mps/code/idlench.awk
@@ -27,7 +27,7 @@
# in the 3rd column: FUNCTION) whose names completely fill the column.
#
# A typical invocation might be:
-#
+#
# ./idlench.awk sos8cx/ci/*.o
#
# Not all awks are UNIX98 compliant; you need to find one that is.
diff --git a/mps/code/ld.c b/mps/code/ld.c
index af2cc966879..fcc0a052386 100644
--- a/mps/code/ld.c
+++ b/mps/code/ld.c
@@ -152,7 +152,7 @@ Bool LDIsStale(LD ld, Arena arena, Addr addr)
*
* This stores the fact that a set of references has changed in
* the history in the arena structure, and increments the epoch.
- *
+ *
* This is only called during a 'flip', because it must be atomic
* w.r.t. the mutator (and therefore w.r.t. LdIsStale). This is
* because it updates the notion of the 'current' and 'oldest' history
diff --git a/mps/code/lock.h b/mps/code/lock.h
index c7e5c788c95..6111c2c075a 100644
--- a/mps/code/lock.h
+++ b/mps/code/lock.h
@@ -71,7 +71,7 @@
*
* LockClaimGlobalRecursive & LockReleaseGlobalRecursive are
* similar to LockClaimRecursive & LockReleaseRecursive
- * except that they lock an implicit global lock. This may be
+ * except that they lock an implicit global lock. This may be
* used for locking access to data structures which are global,
* such as class objects.
*/
@@ -82,7 +82,7 @@
#include "mpm.h"
-#define LockSig ((Sig)0x51970CC9) /* SIGnature LOCK */
+#define LockSig ((Sig)0x51970CC9) /* SIGnature LOCK */
#if defined(THREAD_MULTI)
@@ -158,8 +158,8 @@ extern Bool LockCheck(Lock lock);
/* LockClaimGlobalRecursive
*
- * This is called to increase the number of claims on the recursive
- * global lock. LockClaimRecursive will wait until the lock is not
+ * This is called to increase the number of claims on the recursive
+ * global lock. LockClaimRecursive will wait until the lock is not
* owned by another thread and return with the lock owned.
* This can be called recursively.
*/
@@ -169,9 +169,9 @@ extern void LockClaimGlobalRecursive(void);
/* LockReleaseGlobalRecursive
*
- * This is called to reduce the number of claims on the recursive
- * global lock. If the number of claims drops to zero, ownership
- * is relinquished. This must not be called without possession of
+ * This is called to reduce the number of claims on the recursive
+ * global lock. If the number of claims drops to zero, ownership
+ * is relinquished. This must not be called without possession of
* the lock.
*/
diff --git a/mps/code/lockan.c b/mps/code/lockan.c
index 786136ea1f9..67f319c225f 100644
--- a/mps/code/lockan.c
+++ b/mps/code/lockan.c
@@ -80,8 +80,8 @@ void (LockReleaseRecursive)(Lock lock)
}
-/* Global locking is performed by normal locks.
- * A separate lock structure is used for recursive and
+/* Global locking is performed by normal locks.
+ * A separate lock structure is used for recursive and
* non-recursive locks so that each may be differently ordered
* with respect to client-allocated locks.
*/
diff --git a/mps/code/lockli.c b/mps/code/lockli.c
index d11d61abbe4..e89353dc857 100644
--- a/mps/code/lockli.c
+++ b/mps/code/lockli.c
@@ -3,23 +3,23 @@
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
*
- * .linux: This implementation currently just supports LinuxThreads
+ * .linux: This implementation currently just supports LinuxThreads
* (platform MPS_OS_LI), Single Unix i/f.
*
* .posix: In fact, the implementation should be reusable for most POSIX
- * implementations, but may need some customization for each.
+ * implementations, but may need some customization for each.
*
* .design: These locks are implemented using mutexes.
*
- * .recursive: Mutexes support both non-recursive and recursive locking, but
- * only at initialization time. This doesn't match the API of MPS Lock module,
+ * .recursive: Mutexes support both non-recursive and recursive locking, but
+ * only at initialization time. This doesn't match the API of MPS Lock module,
* which chooses at locking time, so all locks are made (non-recursive)
* errorchecking. Recursive locks are implemented by checking the error
* code.
*
* .claims: During use the claims field is updated to remember the number of
* claims acquired on a lock. This field must only be modified
- * while we hold the mutex.
+ * while we hold the mutex.
*/
#define _XOPEN_SOURCE 500
@@ -50,7 +50,7 @@ SRCID(lockli, "$Id$");
#define LockAttrSetRecursive(attrptr) \
pthread_mutexattr_setkind_np(attrptr, PTHREAD_MUTEX_ERRORCHECK_NP)
-
+
#else
#define LockAttrSetRecursive(attrptr) \
@@ -173,7 +173,7 @@ void LockClaimRecursive(Lock lock)
/* pthread_mutex_lock will return: */
/* 0 if we have just claimed the lock */
/* EDEADLK if we own the lock already. */
- AVER((res == 0 && lock->claims == 0) ||
+ AVER((res == 0 && lock->claims == 0) ||
(res == EDEADLK && lock->claims > 0));
++lock->claims;
diff --git a/mps/code/lockw3.c b/mps/code/lockw3.c
index 2d9d1af0fb5..7d70b1fe5f9 100644
--- a/mps/code/lockw3.c
+++ b/mps/code/lockw3.c
@@ -105,8 +105,8 @@ void LockReleaseRecursive(Lock lock)
-/* Global locking is performed by normal locks.
- * A separate lock structure is used for recursive and
+/* Global locking is performed by normal locks.
+ * A separate lock structure is used for recursive and
* non-recursive locks so that each may be differently ordered
* with respect to client-allocated locks.
*/
diff --git a/mps/code/locus.c b/mps/code/locus.c
index cb6c90f67f9..fe8a97b4a31 100644
--- a/mps/code/locus.c
+++ b/mps/code/locus.c
@@ -334,7 +334,7 @@ Res ChainCondemnAll(Chain chain, Trace trace)
haveWhiteSegs = TRUE;
}
}
-
+
return ResOK;
failBegin:
diff --git a/mps/code/message.c b/mps/code/message.c
index d30d306db41..8390a124b79 100644
--- a/mps/code/message.c
+++ b/mps/code/message.c
@@ -270,7 +270,7 @@ static Bool MessageTypeEnabled(Arena arena, MessageType type)
return BTGet(arena->enabledMessageTypes, type);
}
-
+
void MessageTypeEnable(Arena arena, MessageType type)
{
@@ -312,7 +312,7 @@ static void MessageDelete(Message message)
/* type specific dispatch methods */
-void MessageFinalizationRef(Ref *refReturn, Arena arena,
+void MessageFinalizationRef(Ref *refReturn, Arena arena,
Message message)
{
AVER(refReturn != NULL);
@@ -355,7 +355,7 @@ Size MessageGCNotCondemnedSize(Message message)
/* type-specific stub methods */
-void MessageNoFinalizationRef(Ref *refReturn, Arena arena,
+void MessageNoFinalizationRef(Ref *refReturn, Arena arena,
Message message)
{
AVER(refReturn != NULL);
diff --git a/mps/code/messtest.c b/mps/code/messtest.c
index a34a4f82df8..fe303d0c5ff 100644
--- a/mps/code/messtest.c
+++ b/mps/code/messtest.c
@@ -5,7 +5,7 @@
*/
#include "mpm.h"
-#include "mpsavm.h"
+#include "mpsavm.h"
#include "mps.h"
#include "testlib.h"
@@ -32,7 +32,7 @@ static MessageClassStruct DFMessageClassStruct = {
"DummyFinal", /* name */
dfMessageDelete, /* Delete */
MessageNoFinalizationRef, /* FinalizationRef */
- MessageNoGCLiveSize, /* GCLiveSize */
+ MessageNoGCLiveSize, /* GCLiveSize */
MessageNoGCCondemnedSize, /* GCCondemnedSize */
MessageNoGCNotCondemnedSize, /* GCNoteCondemnedSize */
MessageClassSig /* design.mps.message.class.sig.double */
@@ -46,7 +46,7 @@ static MessageClassStruct DGCMessageClassStruct = {
"DummyGC", /* name */
dfMessageDelete, /* Delete */
MessageNoFinalizationRef, /* FinalizationRef */
- MessageNoGCLiveSize, /* GCLiveSize */
+ MessageNoGCLiveSize, /* GCLiveSize */
MessageNoGCCondemnedSize, /* GCCondemnedSize */
MessageNoGCNotCondemnedSize, /* GCNoteCondemnedSize */
MessageClassSig /* design.mps.message.class.sig.double */
@@ -121,7 +121,7 @@ static void eatMessageOfType(Arena arena, MessageType type)
}
-/* eatHiddenMessage -- get a message which isn't at top of queue
+/* eatHiddenMessage -- get a message which isn't at top of queue
*
* Assumes there is at least 1 message of each of Finalization
* and GC types.
@@ -141,7 +141,7 @@ static void eatHiddenMessage(Arena arena)
}
-/* eatTopMessageOfType -- get a message which is at top of queue
+/* eatTopMessageOfType -- get a message which is at top of queue
*
* The message must be of the specified type.
* Assumes there is at least 1 message on the queue.
@@ -157,7 +157,7 @@ static void eatTopMessageOfType(Arena arena, MessageType type)
}
-/* eatTopMessage -- get a message which is at top of queue
+/* eatTopMessage -- get a message which is at top of queue
*
* Assumes there is at least 1 message on the queue.
*/
@@ -175,7 +175,7 @@ static void eatTopMessage(Arena arena)
/* testInterleaving -- test interleaving messages of different types
*
* See request.dylan.160204
- * must be able to retrieve a message even if a message of
+ * must be able to retrieve a message even if a message of
* another type is at the head of the queue.
*/
@@ -253,7 +253,7 @@ extern int main(int argc, char *argv[])
mps_arena_t mpsArena;
Arena arena;
- testlib_unused(argc);
+ testlib_unused(argc);
testlib_unused(argv);
die(mps_arena_create(&mpsArena, mps_arena_class_vm(), testArenaSIZE),
diff --git a/mps/code/meter.c b/mps/code/meter.c
index ff2d365bccc..660eae8cf73 100644
--- a/mps/code/meter.c
+++ b/mps/code/meter.c
@@ -77,7 +77,7 @@ Res MeterWrite(Meter meter, mps_lib_FILE *stream)
return res;
if (meter->count > 0) {
double mean = meter->total / (double)meter->count;
-
+
res = WriteF(stream,
", total: $D", meter->total,
", max: $U", meter->max,
diff --git a/mps/code/meter.h b/mps/code/meter.h
index b40150f9dbd..56fbb41849a 100644
--- a/mps/code/meter.h
+++ b/mps/code/meter.h
@@ -22,7 +22,7 @@
typedef struct MeterStruct *Meter;
-typedef struct MeterStruct
+typedef struct MeterStruct
{
char *name;
Count count;
diff --git a/mps/code/misc.h b/mps/code/misc.h
index c8b4d579a14..d8ecff44f36 100644
--- a/mps/code/misc.h
+++ b/mps/code/misc.h
@@ -115,7 +115,7 @@ typedef const struct SrcIdStruct {
* is syntactically a statement (to avoid it being used in computation).
*
* .discard: DISCARD uses sizeof so that the expression is not evaluated
- * and yet the compiler will check that it is a valid expression. The
+ * and yet the compiler will check that it is a valid expression. The
* conditional is compared with zero so it can designate a bitfield object.
*/
@@ -140,12 +140,12 @@ typedef const struct SrcIdStruct {
/* UNUSED -- declare parameter unused
*
* This macro supresses warnings about unused parameters. It should be
- * applied to the parameter at the beginning of the body of the
+ * applied to the parameter at the beginning of the body of the
* procedure.
*
* The cast to void appears to work for GCC, MSVC, and CodeWarrior.
* It's a shame there's no way to ensure that the parameter won't be
- * used. We could scramble it, but that's undesirable in release
+ * used. We could scramble it, but that's undesirable in release
* versions.
*/
diff --git a/mps/code/mpm.c b/mps/code/mpm.c
index 1335c6c6e36..bfdc8dad29e 100644
--- a/mps/code/mpm.c
+++ b/mps/code/mpm.c
@@ -64,7 +64,7 @@ Bool MPMCheck(void)
CHECKL(-(DBL_MIN_10_EXP) <= DBL_MAX_10_EXP);
}
- return TRUE;
+ return TRUE;
}
@@ -194,7 +194,7 @@ Addr (AddrAlignDown)(Addr addr, Align alignment)
}
-/* ResIsAllocFailure
+/* ResIsAllocFailure
*
* Test whether a result code is in the set of allocation failure codes. */
@@ -209,7 +209,7 @@ Bool ResIsAllocFailure(Res res)
* Output as an unsigned value in the given base (2-16), padded to the
* given width. */
-static Res WriteWord(mps_lib_FILE *stream, Word w, unsigned base,
+static Res WriteWord(mps_lib_FILE *stream, Word w, unsigned base,
unsigned width)
{
static const char digit[16] = "0123456789ABCDEF";
@@ -222,7 +222,7 @@ static Res WriteWord(mps_lib_FILE *stream, Word w, unsigned base,
AVER(stream != NULL);
AVER(2 <= base && base <= 16);
AVER(width <= MPS_WORD_WIDTH);
-
+
/* Add digits to the buffer starting at the right-hand end, so that */
/* the buffer forms a string representing the number. A do...while */
/* loop is used to ensure that at least one digit (zero) is written */
@@ -262,7 +262,7 @@ static Res WriteWord(mps_lib_FILE *stream, Word w, unsigned base,
* .write.double.check: There being no DBL_EXP_DIG, we assume that it is
* less than DBL_DIG. */
-static Res WriteDouble(mps_lib_FILE *stream, double d)
+static Res WriteDouble(mps_lib_FILE *stream, double d)
{
double F = d;
int E = 0, i, x = 0;
@@ -281,7 +281,7 @@ static Res WriteDouble(mps_lib_FILE *stream, double d)
/* terminator. See .write.double.check. */
char buf[1+DBL_DIG+2+1+1+DBL_DIG+1];
int j = 0;
-
+
if (F == 0.0) {
if (mps_lib_fputs("0", stream) == mps_lib_EOF)
return ResIO;
@@ -293,7 +293,7 @@ static Res WriteDouble(mps_lib_FILE *stream, double d)
j++;
F = - F;
}
-
+
/* This scaling operation could introduce rounding errors. */
for ( ; F >= 1.0 ; F /= 10.0) {
E++;
@@ -305,7 +305,7 @@ static Res WriteDouble(mps_lib_FILE *stream, double d)
}
for ( ; F < 0.1; F *= 10)
E--;
-
+
/* See if %e notation is required */
if (E > expmax || E <= expmin) {
x = E - 1;
@@ -330,7 +330,7 @@ static Res WriteDouble(mps_lib_FILE *stream, double d)
/* the exponent. This is Steele and White's FP3 algorithm. */
do {
int U;
-
+
if (E == 0) {
buf[j] = '.';
j++;
@@ -351,7 +351,7 @@ static Res WriteDouble(mps_lib_FILE *stream, double d)
buf[j] = digits[U];
j++;
} while (1);
-
+
/* Insert trailing 0's */
for (i = E; i > 0; i--) {
buf[j] = '0';
@@ -384,7 +384,7 @@ static Res WriteDouble(mps_lib_FILE *stream, double d)
} while (i > 0);
}
buf[j] = '\0'; /* arnold */
-
+
if (mps_lib_fputs(buf, stream) == mps_lib_EOF)
return ResIO;
return ResOK;
@@ -413,7 +413,7 @@ Res WriteF(mps_lib_FILE *stream, ...)
va_list args;
AVER(stream != NULL);
-
+
va_start(args, stream);
for(;;) {
@@ -432,14 +432,14 @@ Res WriteF(mps_lib_FILE *stream, ...)
switch(*format) {
case 'A': { /* address */
WriteFA addr = va_arg(args, WriteFA);
- res = WriteWord(stream, (Word)addr, 16,
+ res = WriteWord(stream, (Word)addr, 16,
(sizeof(WriteFA) * CHAR_BIT + 3) / 4);
if (res != ResOK) return res;
} break;
case 'P': { /* pointer, see .writef.p */
WriteFP p = va_arg(args, WriteFP);
- res = WriteWord(stream, (Word)p, 16,
+ res = WriteWord(stream, (Word)p, 16,
(sizeof(WriteFP) * CHAR_BIT + 3)/ 4);
if (res != ResOK) return res;
} break;
@@ -448,24 +448,24 @@ Res WriteF(mps_lib_FILE *stream, ...)
WriteFF f = va_arg(args, WriteFF);
Byte *b = (Byte *)&f;
for(i=0; i < sizeof(WriteFF); i++) {
- res = WriteWord(stream, (Word)(b[i]), 16,
+ res = WriteWord(stream, (Word)(b[i]), 16,
(CHAR_BIT + 3) / 4);
if (res != ResOK) return res;
}
} break;
-
+
case 'S': { /* string */
WriteFS s = va_arg(args, WriteFS);
r = mps_lib_fputs((const char *)s, stream);
if (r == mps_lib_EOF) return ResIO;
} break;
-
+
case 'C': { /* character */
WriteFC c = va_arg(args, WriteFC); /* promoted */
r = mps_lib_fputc((int)c, stream);
if (r == mps_lib_EOF) return ResIO;
} break;
-
+
case 'W': { /* word */
WriteFW w = va_arg(args, WriteFW);
res = WriteWord(stream, (Word)w, 16,
@@ -484,7 +484,7 @@ Res WriteF(mps_lib_FILE *stream, ...)
res = WriteWord(stream, (Word)b, 2, sizeof(WriteFB) * CHAR_BIT);
if (res != ResOK) return res;
} break;
-
+
case '$': { /* dollar char */
r = mps_lib_fputc('$', stream);
if (r == mps_lib_EOF) return ResIO;
@@ -495,7 +495,7 @@ Res WriteF(mps_lib_FILE *stream, ...)
res = WriteDouble(stream, d);
if (res != ResOK) return res;
} break;
-
+
default:
NOTREACHED;
}
@@ -504,9 +504,9 @@ Res WriteF(mps_lib_FILE *stream, ...)
++format;
}
}
-
+
va_end(args);
-
+
return ResOK;
}
@@ -519,7 +519,7 @@ size_t StringLength(const char *s)
AVER(s != NULL);
- for(i = 0; s[i] != '\0'; i++)
+ for(i = 0; s[i] != '\0'; i++)
NOOP;
return(i);
}
diff --git a/mps/code/mpm.h b/mps/code/mpm.h
index a52a2fe9717..990e51607d8 100644
--- a/mps/code/mpm.h
+++ b/mps/code/mpm.h
@@ -124,13 +124,13 @@ extern Bool ResIsAllocFailure(Res res);
/* Logs and Powers
- *
+ *
* SizeIsP2 returns TRUE if and only if size is a non-negative integer
* power of 2, and FALSE otherwise.
- *
+ *
* SizeLog2 returns the logarithm in base 2 of size. size must be a
* power of 2.
- *
+ *
* SizeFloorLog2 returns the floor of the logarithm in base 2 of size.
* size can be any positive non-zero value. */
@@ -236,7 +236,7 @@ extern Res PoolCreateV(Pool *poolReturn, Arena arena, PoolClass class,
va_list arg);
extern void PoolDestroy(Pool pool);
extern BufferClass PoolDefaultBufferClass(Pool pool);
-extern Res PoolAlloc(Addr *pReturn, Pool pool, Size size,
+extern Res PoolAlloc(Addr *pReturn, Pool pool, Size size,
Bool withReservoirPermit);
extern void PoolFree(Pool pool, Addr old, Size size);
extern Res PoolTraceBegin(Pool pool, Trace trace);
@@ -267,9 +267,9 @@ extern Res PoolNoBufferFill(Addr *baseReturn, Addr *limitReturn,
extern Res PoolTrivBufferFill(Addr *baseReturn, Addr *limitReturn,
Pool pool, Buffer buffer, Size size,
Bool withReservoirPermit);
-extern void PoolNoBufferEmpty(Pool pool, Buffer buffer,
+extern void PoolNoBufferEmpty(Pool pool, Buffer buffer,
Addr init, Addr limit);
-extern void PoolTrivBufferEmpty(Pool pool, Buffer buffer,
+extern void PoolTrivBufferEmpty(Pool pool, Buffer buffer,
Addr init, Addr limit);
extern Res PoolTrivDescribe(Pool pool, mps_lib_FILE *stream);
extern Res PoolNoTraceBegin(Pool pool, Trace trace);
@@ -459,7 +459,7 @@ extern Res TraceScanArea(ScanState ss, Addr *base, Addr *limit);
extern Res TraceScanAreaTagged(ScanState ss, Addr *base, Addr *limit);
extern Res TraceScanAreaMasked(ScanState ss,
Addr *base, Addr *limit, Word mask);
-extern void TraceScanSingleRef(TraceSet ts, Rank rank, Arena arena,
+extern void TraceScanSingleRef(TraceSet ts, Rank rank, Arena arena,
Seg seg, Ref *refIO);
@@ -531,7 +531,7 @@ extern Bool ArenaHasAddr(Arena arena, Addr addr);
extern Res ControlInit(Arena arena);
extern void ControlFinish(Arena arena);
-extern Res ControlAlloc(void **baseReturn, Arena arena, size_t size,
+extern Res ControlAlloc(void **baseReturn, Arena arena, size_t size,
Bool withReservoirPermit);
extern void ControlFree(Arena arena, void *base, size_t size);
@@ -684,9 +684,9 @@ extern Addr (SegLimit)(Seg seg);
/* Buffer Interface -- see impl.c.buffer */
-extern Res BufferCreate(Buffer *bufferReturn, BufferClass class,
+extern Res BufferCreate(Buffer *bufferReturn, BufferClass class,
Pool pool, Bool isMutator, ...);
-extern Res BufferCreateV(Buffer *bufferReturn, BufferClass class,
+extern Res BufferCreateV(Buffer *bufferReturn, BufferClass class,
Pool pool, Bool isMutator, va_list args);
extern void BufferDestroy(Buffer buffer);
extern Bool BufferCheck(Buffer buffer);
@@ -718,7 +718,7 @@ extern Bool BufferIsReset(Buffer buffer);
extern Bool BufferIsReady(Buffer buffer);
extern Bool BufferIsMutator(Buffer buffer);
extern void BufferSetAllocAddr(Buffer buffer, Addr addr);
-extern void BufferAttach(Buffer buffer,
+extern void BufferAttach(Buffer buffer,
Addr base, Addr limit, Addr init, Size size);
extern void BufferDetach(Buffer buffer, Pool pool);
extern void BufferFlip(Buffer buffer);
@@ -778,7 +778,7 @@ extern AllocPattern AllocPatternRampCollectAll(void);
/* Format Interface -- see impl.c.format */
extern Bool FormatCheck(Format format);
-extern Res FormatCreate(Format *formatReturn, Arena arena,
+extern Res FormatCreate(Format *formatReturn, Arena arena,
Align alignment,
FormatVariety variety,
FormatScanMethod scan,
@@ -852,13 +852,13 @@ extern void (ShieldFlush)(Arena arena);
#define ShieldRaise(arena, seg, mode) \
BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END
#define ShieldLower(arena, seg, mode) \
- BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END
+ BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END
#define ShieldEnter(arena) BEGIN UNUSED(arena); END
#define ShieldLeave(arena) BEGIN UNUSED(arena); END
#define ShieldExpose(arena, seg) \
- BEGIN UNUSED(arena); UNUSED(seg); END
+ BEGIN UNUSED(arena); UNUSED(seg); END
#define ShieldCover(arena, seg) \
- BEGIN UNUSED(arena); UNUSED(seg); END
+ BEGIN UNUSED(arena); UNUSED(seg); END
#define ShieldSuspend(arena) BEGIN UNUSED(arena); END
#define ShieldResume(arena) BEGIN UNUSED(arena); END
#define ShieldFlush(arena) BEGIN UNUSED(arena); END
@@ -907,7 +907,7 @@ extern Res RootCreateReg(Root *rootReturn, Arena arena,
RootScanRegMethod scan,
void *p, size_t s);
extern Res RootCreateFmt(Root *rootReturn, Arena arena,
- Rank rank, RootMode mode,
+ Rank rank, RootMode mode,
FormatScanMethod scan,
Addr base, Addr limit);
extern Res RootCreateFun(Root *rootReturn, Arena arena,
diff --git a/mps/code/mpmss.c b/mps/code/mpmss.c
index 2a2793dc48e..3394e283f70 100644
--- a/mps/code/mpmss.c
+++ b/mps/code/mpmss.c
@@ -58,7 +58,7 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
int j = rnd()%(testSetSIZE-i);
void *tp;
size_t ts;
-
+
tp = ps[j]; ts = ss[j];
ps[j] = ps[i]; ss[j] = ss[i];
ps[i] = tp; ss[i] = ts;
@@ -78,7 +78,7 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
if (res != MPS_RES_OK) return res;
}
}
-
+
mps_pool_destroy(pool);
return MPS_RES_OK;
diff --git a/mps/code/mpmst.h b/mps/code/mpmst.h
index 5f94f375a90..6c7a50642e5 100644
--- a/mps/code/mpmst.h
+++ b/mps/code/mpmst.h
@@ -120,7 +120,7 @@ typedef struct PoolStruct { /* generic structure */
* The signature is placed at the end, see
* design.mps.pool.outer-structure.sig. */
-#define MFSSig ((Sig)0x5193F599) /* SIGnature MFS */
+#define MFSSig ((Sig)0x5193F599) /* SIGnature MFS */
typedef struct MFSStruct { /* MFS outer structure */
PoolStruct poolStruct; /* generic structure */
@@ -168,7 +168,7 @@ typedef struct MVStruct { /* MV pool outer structure */
* conventions because it's not intended for general use and the use of
* a pool is an incidental detail. */
-#define ReservoirSig ((Sig)0x5196e599) /* SIGnature REServoir */
+#define ReservoirSig ((Sig)0x5196e599) /* SIGnature REServoir */
typedef struct ReservoirStruct { /* Reservoir structure */
PoolStruct poolStruct; /* generic pool structure */
@@ -179,7 +179,7 @@ typedef struct ReservoirStruct { /* Reservoir structure */
} ReservoirStruct;
-/* MessageClassStruct -- Message Class structure
+/* MessageClassStruct -- Message Class structure
*
* See design.mps.message.class.struct (and design.mps.message.message,
* and design.mps.message.class). */
@@ -194,7 +194,7 @@ typedef struct MessageClassStruct {
MessageDeleteMethod delete; /* terminates a message */
/* methods specific to MessageTypeFinalization */
- MessageFinalizationRefMethod finalizationRef;
+ MessageFinalizationRefMethod finalizationRef;
/* methods specific to MessageTypeGC */
MessageGCLiveSizeMethod gcLiveSize;
@@ -254,7 +254,7 @@ typedef struct SegClassStruct {
* .seg: Segments are the basic units of protection and tracer activity
* for allocated memory. See design.mps.seg. */
-#define SegSig ((Sig)0x5195E999) /* SIGnature SEG */
+#define SegSig ((Sig)0x5195E999) /* SIGnature SEG */
typedef struct SegStruct { /* segment structure */
Sig sig; /* impl.h.misc.sig */
@@ -277,7 +277,7 @@ typedef struct SegStruct { /* segment structure */
* .seggc: GCSeg is a subclass of Seg with support for buffered
* allocation and GC. See design.mps.seg. */
-#define GCSegSig ((Sig)0x5199C5E9) /* SIGnature GC SEG */
+#define GCSegSig ((Sig)0x5199C5E9) /* SIGnature GC SEG */
typedef struct GCSegStruct { /* GC segment structure */
SegStruct segStruct; /* superclass fields must come first */
@@ -289,14 +289,14 @@ typedef struct GCSegStruct { /* GC segment structure */
/* SegPrefStruct -- segment preference structure
- *
- * .seg-pref: arena memory users (pool class code) need a way of
+ *
+ * .seg-pref: arena memory users (pool class code) need a way of
* expressing preferences about the segments they allocate.
- *
+ *
* .seg-pref.misleading: The name is historical and misleading. SegPref
* objects need have nothing to do with segments. @@@@ */
-#define SegPrefSig ((Sig)0x5195E9B6) /* SIGnature SEG PRef */
+#define SegPrefSig ((Sig)0x5195E9B6) /* SIGnature SEG PRef */
typedef struct SegPrefStruct { /* segment placement preferences */
Sig sig; /* impl.h.misc.sig */
@@ -390,7 +390,7 @@ typedef struct BufferStruct {
* .segbuf: SegBuf is a subclass of Buffer with support for attachment
* to segments. */
-#define SegBufSig ((Sig)0x51959B0F) /* SIGnature SeG BUFfer */
+#define SegBufSig ((Sig)0x51959B0F) /* SIGnature SeG BUFfer */
typedef struct SegBufStruct {
BufferStruct bufferStruct; /* superclass fields must come first */
@@ -657,7 +657,7 @@ typedef struct ArenaStruct {
/* thread fields (impl.c.thread) */
RingStruct threadRing; /* ring of attached threads */
Serial threadSerial; /* serial of next thread */
-
+
/* shield fields (impl.c.shield) */
Bool insideShield; /* TRUE if and only if inside shield */
Seg shCache[ShieldCacheSIZE]; /* Cache of unsynced segs */
diff --git a/mps/code/mpmtypes.h b/mps/code/mpmtypes.h
index e439bb18020..5263163688e 100644
--- a/mps/code/mpmtypes.h
+++ b/mps/code/mpmtypes.h
@@ -51,7 +51,7 @@ typedef unsigned TraceSet; /* design.mps.trace */
typedef unsigned TraceState; /* design.mps.trace */
typedef unsigned AccessSet; /* design.mps.type.access-set */
typedef unsigned Attr; /* design.mps.type.attr */
-typedef unsigned FormatVariety;
+typedef unsigned FormatVariety;
typedef int RootVar; /* design.mps.type.rootvar */
typedef Word *BT; /* design.mps.bt */
@@ -148,16 +148,16 @@ typedef void (*SegFinishMethod)(Seg seg);
typedef void (*SegSetGreyMethod)(Seg seg, TraceSet grey);
typedef void (*SegSetWhiteMethod)(Seg seg, TraceSet white);
typedef void (*SegSetRankSetMethod)(Seg seg, RankSet rankSet);
-typedef void (*SegSetRankSummaryMethod)(Seg seg, RankSet rankSet,
+typedef void (*SegSetRankSummaryMethod)(Seg seg, RankSet rankSet,
RefSet summary);
typedef void (*SegSetSummaryMethod)(Seg seg, RefSet summary);
typedef Buffer (*SegBufferMethod)(Seg seg);
typedef void (*SegSetBufferMethod)(Seg seg, Buffer buffer);
typedef Res (*SegDescribeMethod)(Seg seg, mps_lib_FILE *stream);
-typedef Res (*SegMergeMethod)(Seg seg, Seg segHi,
+typedef Res (*SegMergeMethod)(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args);
-typedef Res (*SegSplitMethod)(Seg seg, Seg segHi,
+typedef Res (*SegSplitMethod)(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args);
@@ -165,7 +165,7 @@ typedef Res (*SegSplitMethod)(Seg seg, Seg segHi,
typedef Res (*BufferInitMethod)(Buffer buffer, Pool pool, va_list args);
typedef void (*BufferFinishMethod)(Buffer buffer);
-typedef void (*BufferAttachMethod)(Buffer buffer, Addr base, Addr limit,
+typedef void (*BufferAttachMethod)(Buffer buffer, Addr base, Addr limit,
Addr init, Size size);
typedef void (*BufferDetachMethod)(Buffer buffer);
typedef Seg (*BufferSegMethod)(Buffer buffer);
@@ -187,7 +187,7 @@ typedef void (*PoolFreeMethod)(Pool pool, Addr old, Size size);
typedef Res (*PoolBufferFillMethod)(Addr *baseReturn, Addr *limitReturn,
Pool pool, Buffer buffer, Size size,
Bool withReservoirPermit);
-typedef void (*PoolBufferEmptyMethod)(Pool pool, Buffer buffer,
+typedef void (*PoolBufferEmptyMethod)(Pool pool, Buffer buffer,
Addr init, Addr limit);
typedef Res (*PoolTraceBeginMethod)(Pool pool, Trace trace);
typedef Res (*PoolAccessMethod)(Pool pool, Seg seg, Addr addr,
@@ -204,7 +204,7 @@ typedef Res (*PoolFixEmergencyMethod)(Pool pool, ScanState ss,
typedef void (*PoolReclaimMethod)(Pool pool, Trace trace, Seg seg);
typedef void (*PoolRampBeginMethod)(Pool pool, Buffer buf, Bool collectAll);
typedef void (*PoolRampEndMethod)(Pool pool, Buffer buf);
-typedef Res (*PoolFramePushMethod)(AllocFrame *frameReturn,
+typedef Res (*PoolFramePushMethod)(AllocFrame *frameReturn,
Pool pool, Buffer buf);
typedef Res (*PoolFramePopMethod)(Pool pool, Buffer buf,
AllocFrame frame);
@@ -258,7 +258,7 @@ typedef Res (*RootScanRegMethod)(ScanState ss, Thread thread, void *p, size_t s)
/* design.mps.sig SIGnature IS BAD */
-#define SigInvalid ((Sig)0x51915BAD)
+#define SigInvalid ((Sig)0x51915BAD)
#define SizeMAX ((Size)-1)
#define AccessSetEMPTY ((AccessSet)0) /* design.mps.type.access-set */
@@ -304,7 +304,7 @@ enum {
/* Segment preferences */
enum {
SegPrefHigh = 1,
- SegPrefLow,
+ SegPrefLow,
SegPrefZoneSet,
SegPrefGen,
SegPrefCollected,
diff --git a/mps/code/mps.h b/mps/code/mps.h
index b3613b7f789..6adb0f6d84e 100644
--- a/mps/code/mps.h
+++ b/mps/code/mps.h
@@ -258,7 +258,7 @@ extern size_t mps_arena_commit_limit(mps_arena_t);
extern mps_res_t mps_arena_commit_limit_set(mps_arena_t, size_t);
extern void mps_arena_spare_commit_limit_set(mps_arena_t, size_t);
extern size_t mps_arena_spare_commit_limit(mps_arena_t);
-
+
extern size_t mps_space_reserved(mps_space_t);
extern size_t mps_space_committed(mps_space_t);
@@ -313,8 +313,8 @@ extern mps_res_t (mps_reserve)(mps_addr_t *, mps_ap_t, size_t);
extern mps_bool_t (mps_commit)(mps_ap_t, mps_addr_t, size_t);
extern mps_res_t mps_ap_fill(mps_addr_t *, mps_ap_t, size_t);
-extern mps_res_t mps_ap_fill_with_reservoir_permit(mps_addr_t *,
- mps_ap_t,
+extern mps_res_t mps_ap_fill_with_reservoir_permit(mps_addr_t *,
+ mps_ap_t,
size_t);
extern mps_res_t (mps_ap_frame_push)(mps_frame_t *, mps_ap_t);
@@ -400,8 +400,8 @@ extern void mps_sac_empty(mps_sac_t, mps_addr_t, size_t);
extern void mps_reservoir_limit_set(mps_arena_t, size_t);
extern size_t mps_reservoir_limit(mps_arena_t);
extern size_t mps_reservoir_available(mps_arena_t);
-extern mps_res_t mps_reserve_with_reservoir_permit(mps_addr_t *,
- mps_ap_t,
+extern mps_res_t mps_reserve_with_reservoir_permit(mps_addr_t *,
+ mps_ap_t,
size_t);
@@ -534,7 +534,7 @@ extern size_t mps_message_gc_live_size(mps_arena_t, mps_message_t);
extern size_t mps_message_gc_condemned_size(mps_arena_t, mps_message_t);
-extern size_t mps_message_gc_not_condemned_size(mps_arena_t,
+extern size_t mps_message_gc_not_condemned_size(mps_arena_t,
mps_message_t);
@@ -564,7 +564,7 @@ extern void mps_arena_formatted_objects_walk(mps_arena_t,
/* Root Walking */
-typedef void (*mps_roots_stepper_t)(mps_addr_t *,
+typedef void (*mps_roots_stepper_t)(mps_addr_t *,
mps_root_t,
void *, size_t);
extern void mps_arena_roots_walk(mps_arena_t,
diff --git a/mps/code/mpsi.c b/mps/code/mpsi.c
index ec0b79da0b0..362dd0e0f9a 100644
--- a/mps/code/mpsi.c
+++ b/mps/code/mpsi.c
@@ -63,7 +63,7 @@ SRCID(mpsi, "$Id$");
* in this implementation.
*
* .check.empty: Note that mpsi_check compiles away to almost nothing.
- *
+ *
* .check.enum.cast: enum comparisons have to be cast to avoid a warning
* from the SunPro C compiler. See builder.sc.warn.enum. */
@@ -98,7 +98,7 @@ static Bool mpsi_check(void)
/* The external idea of an address and the internal one */
/* had better match. */
CHECKL(CHECKTYPE(mps_addr_t, Addr));
-
+
/* The external idea of size and the internal one had */
/* better match. See design.mps.interface.c.cons.size */
/* and design.mps.interface.c.pun.size. */
@@ -163,10 +163,10 @@ static Bool mpsi_check(void)
/* Ranks
- *
+ *
* Here a rank returning function is defined for all client visible
* ranks.
- *
+ *
* .rank.final.not: RankFINAL does not have a corresponding function as
* it is only used internally. */
@@ -271,7 +271,7 @@ size_t mps_arena_commit_limit(mps_arena_t mps_arena)
return size;
}
-
+
mps_res_t mps_arena_commit_limit_set(mps_arena_t mps_arena, size_t limit)
{
Res res;
@@ -320,8 +320,8 @@ void mps_space_clamp(mps_space_t mps_space)
{
mps_arena_clamp(mps_space);
}
-
-
+
+
void mps_arena_release(mps_arena_t mps_arena)
{
Arena arena = (Arena)mps_arena;
@@ -335,7 +335,7 @@ void mps_space_release(mps_space_t mps_space)
{
mps_arena_release(mps_space);
}
-
+
void mps_arena_park(mps_space_t mps_space)
{
@@ -350,7 +350,7 @@ void mps_space_park(mps_space_t mps_space)
{
mps_arena_park(mps_space);
}
-
+
mps_res_t mps_arena_collect(mps_space_t mps_space)
{
@@ -367,7 +367,7 @@ mps_res_t mps_space_collect(mps_space_t mps_space)
{
return mps_arena_collect(mps_space);
}
-
+
/* mps_arena_create -- create an arena object */
@@ -487,7 +487,7 @@ mps_res_t mps_fmt_create_A(mps_fmt_t *mps_fmt_o,
}
-/* mps_fmt_create_B -- create an object format of variant B */
+/* mps_fmt_create_B -- create an object format of variant B */
mps_res_t mps_fmt_create_B(mps_fmt_t *mps_fmt_o,
mps_arena_t mps_arena,
@@ -522,7 +522,7 @@ mps_res_t mps_fmt_create_B(mps_fmt_t *mps_fmt_o,
}
-/* mps_fmt_create_auto_header -- create a format of variant auto_header */
+/* mps_fmt_create_auto_header -- create a format of variant auto_header */
mps_res_t mps_fmt_create_auto_header(mps_fmt_t *mps_fmt_o,
mps_arena_t mps_arena,
@@ -563,7 +563,7 @@ void mps_fmt_destroy(mps_fmt_t mps_fmt)
{
Format format = (Format)mps_fmt;
Arena arena;
-
+
AVER(CHECKT(Format, format));
arena = FormatArena(format);
@@ -603,7 +603,7 @@ mps_res_t mps_pool_create_v(mps_pool_t *mps_pool_o, mps_arena_t mps_arena,
res = PoolCreateV(&pool, arena, class, args);
ArenaLeave(arena);
-
+
if (res != ResOK) return res;
*mps_pool_o = (mps_pool_t)pool;
return res;
@@ -631,7 +631,7 @@ mps_res_t mps_alloc(mps_addr_t *p_o, mps_pool_t mps_pool, size_t size, ...)
Arena arena;
Addr p;
Res res;
-
+
AVER(CHECKT(Pool, pool));
arena = PoolArena(pool);
@@ -673,7 +673,7 @@ void mps_free(mps_pool_t mps_pool, mps_addr_t p, size_t size)
{
Pool pool = (Pool)mps_pool;
Arena arena;
-
+
AVER(CHECKT(Pool, pool));
arena = PoolArena(pool);
@@ -700,7 +700,7 @@ mps_res_t mps_ap_create(mps_ap_t *mps_ap_o, mps_pool_t mps_pool, ...)
BufferClass bufclass;
Res res;
va_list args;
-
+
AVER(mps_ap_o != NULL);
AVER(CHECKT(Pool, pool));
arena = PoolArena(pool);
@@ -733,7 +733,7 @@ mps_res_t mps_ap_create_v(mps_ap_t *mps_ap_o, mps_pool_t mps_pool,
Buffer buf;
BufferClass bufclass;
Res res;
-
+
AVER(mps_ap_o != NULL);
AVER(CHECKT(Pool, pool));
arena = PoolArena(pool);
@@ -758,7 +758,7 @@ void mps_ap_destroy(mps_ap_t mps_ap)
Buffer buf = BufferOfAP((AP)mps_ap);
Arena arena;
- AVER(mps_ap != NULL);
+ AVER(mps_ap != NULL);
AVER(CHECKT(Buffer, buf));
arena = BufferArena(buf);
@@ -794,7 +794,7 @@ mps_res_t (mps_reserve)(mps_addr_t *p_o, mps_ap_t mps_ap, size_t size)
-mps_res_t mps_reserve_with_reservoir_permit(mps_addr_t *p_o,
+mps_res_t mps_reserve_with_reservoir_permit(mps_addr_t *p_o,
mps_ap_t mps_ap, size_t size)
{
mps_res_t res;
@@ -846,8 +846,8 @@ mps_bool_t (mps_commit)(mps_ap_t mps_ap, mps_addr_t p, size_t size)
mps_res_t (mps_ap_frame_push)(mps_frame_t *frame_o, mps_ap_t mps_ap)
{
- AVER(frame_o != NULL);
- AVER(mps_ap != NULL);
+ AVER(frame_o != NULL);
+ AVER(mps_ap != NULL);
/* Fail if between reserve & commit */
if ((char *)mps_ap->alloc != (char *)mps_ap->init) {
@@ -867,10 +867,10 @@ mps_res_t (mps_ap_frame_push)(mps_frame_t *frame_o, mps_ap_t mps_ap)
AVER(CHECKT(Buffer, buf));
arena = BufferArena(buf);
-
+
ArenaEnter(arena);
AVERT(Buffer, buf);
-
+
res = BufferFramePush(&frame, buf);
if (res == ResOK) {
@@ -887,7 +887,7 @@ mps_res_t (mps_ap_frame_push)(mps_frame_t *frame_o, mps_ap_t mps_ap)
mps_res_t (mps_ap_frame_pop)(mps_ap_t mps_ap, mps_frame_t frame)
{
- AVER(mps_ap != NULL);
+ AVER(mps_ap != NULL);
/* Can't check frame because it's an arbitrary value */
/* Fail if between reserve & commit */
@@ -910,10 +910,10 @@ mps_res_t (mps_ap_frame_pop)(mps_ap_t mps_ap, mps_frame_t frame)
AVER(CHECKT(Buffer, buf));
arena = BufferArena(buf);
-
+
ArenaEnter(arena);
AVERT(Buffer, buf);
-
+
res = BufferFramePop(buf, (AllocFrame)frame);
ArenaLeave(arena);
@@ -934,7 +934,7 @@ mps_res_t mps_ap_fill(mps_addr_t *p_o, mps_ap_t mps_ap, size_t size)
Addr p;
Res res;
- AVER(mps_ap != NULL);
+ AVER(mps_ap != NULL);
AVER(CHECKT(Buffer, buf));
arena = BufferArena(buf);
@@ -950,14 +950,14 @@ mps_res_t mps_ap_fill(mps_addr_t *p_o, mps_ap_t mps_ap, size_t size)
res = BufferFill(&p, buf, size, FALSE);
ArenaLeave(arena);
-
+
if (res != ResOK) return res;
*p_o = (mps_addr_t)p;
return MPS_RES_OK;
}
-mps_res_t mps_ap_fill_with_reservoir_permit(mps_addr_t *p_o, mps_ap_t mps_ap,
+mps_res_t mps_ap_fill_with_reservoir_permit(mps_addr_t *p_o, mps_ap_t mps_ap,
size_t size)
{
Buffer buf = BufferOfAP((AP)mps_ap);
@@ -965,7 +965,7 @@ mps_res_t mps_ap_fill_with_reservoir_permit(mps_addr_t *p_o, mps_ap_t mps_ap,
Addr p;
Res res;
- AVER(mps_ap != NULL);
+ AVER(mps_ap != NULL);
AVER(CHECKT(Buffer, buf));
arena = BufferArena(buf);
@@ -981,7 +981,7 @@ mps_res_t mps_ap_fill_with_reservoir_permit(mps_addr_t *p_o, mps_ap_t mps_ap,
res = BufferFill(&p, buf, size, TRUE);
ArenaLeave(arena);
-
+
if (res != ResOK) return res;
*p_o = (mps_addr_t)p;
return MPS_RES_OK;
@@ -999,7 +999,7 @@ mps_bool_t mps_ap_trip(mps_ap_t mps_ap, mps_addr_t p, size_t size)
Arena arena;
Bool b;
- AVER(mps_ap != NULL);
+ AVER(mps_ap != NULL);
AVER(CHECKT(Buffer, buf));
arena = BufferArena(buf);
@@ -1038,7 +1038,7 @@ mps_res_t mps_sac_create(mps_sac_t *mps_sac_o, mps_pool_t mps_pool,
res = SACCreate(&sac, pool, (Count)classes_count, classes);
ArenaLeave(arena);
-
+
if (res != ResOK) return (mps_res_t)res;
*mps_sac_o = (mps_sac_t)ExternalSACOfSAC(sac);
return (mps_res_t)res;
@@ -1175,7 +1175,7 @@ mps_res_t mps_root_create(mps_root_t *mps_root_o, mps_arena_t mps_arena,
(RootScanMethod)mps_root_scan, p, s);
ArenaLeave(arena);
-
+
if (res != ResOK) return res;
*mps_root_o = (mps_root_t)root;
return MPS_RES_OK;
@@ -1205,7 +1205,7 @@ mps_res_t mps_root_create_table(mps_root_t *mps_root_o, mps_arena_t mps_arena,
(Addr *)base, (Addr *)base + size);
ArenaLeave(arena);
-
+
if (res != ResOK) return res;
*mps_root_o = (mps_root_t)root;
return MPS_RES_OK;
@@ -1237,7 +1237,7 @@ mps_res_t mps_root_create_table_masked(mps_root_t *mps_root_o,
mask);
ArenaLeave(arena);
-
+
if (res != ResOK) return res;
*mps_root_o = (mps_root_t)root;
return MPS_RES_OK;
@@ -1254,7 +1254,7 @@ mps_res_t mps_root_create_fmt(mps_root_t *mps_root_o, mps_arena_t mps_arena,
Root root;
RootMode mode = (RootMode)mps_rm;
Res res;
-
+
ArenaEnter(arena);
AVER(mps_root_o != NULL);
@@ -1293,7 +1293,7 @@ mps_res_t mps_root_create_reg(mps_root_t *mps_root_o, mps_arena_t mps_arena,
reg_scan_p, mps_size);
ArenaLeave(arena);
-
+
if (res != ResOK) return res;
*mps_root_o = (mps_root_t)root;
return MPS_RES_OK;
@@ -1319,7 +1319,7 @@ void mps_root_destroy(mps_root_t mps_root)
{
Root root = (Root)mps_root;
Arena arena;
-
+
arena = RootArena(root);
ArenaEnter(arena);
@@ -1356,7 +1356,7 @@ mps_res_t mps_thread_reg(mps_thr_t *mps_thr_o, mps_arena_t mps_arena)
res = ThreadRegister(&thread, arena);
ArenaLeave(arena);
-
+
if (res != ResOK) return res;
*mps_thr_o = (mps_thr_t)thread;
return MPS_RES_OK;
@@ -1366,7 +1366,7 @@ void mps_thread_dereg(mps_thr_t mps_thr)
{
Thread thread = (Thread)mps_thr;
Arena arena;
-
+
AVER(ThreadCheckSimple(thread));
arena = ThreadArena(thread);
@@ -1601,7 +1601,7 @@ void mps_message_finalization_ref(mps_addr_t *mps_addr_return,
Ref ref;
AVER(mps_addr_return != NULL);
-
+
ArenaEnter(arena);
AVERT(Arena, arena);
@@ -1613,7 +1613,7 @@ void mps_message_finalization_ref(mps_addr_t *mps_addr_return,
/* MPS_MESSAGE_TYPE_GC */
-size_t mps_message_gc_live_size(mps_arena_t mps_arena,
+size_t mps_message_gc_live_size(mps_arena_t mps_arena,
mps_message_t mps_message)
{
Arena arena = (Arena)mps_arena;
diff --git a/mps/code/mpsicv.c b/mps/code/mpsicv.c
index f2e81fe5070..c581efdc188 100644
--- a/mps/code/mpsicv.c
+++ b/mps/code/mpsicv.c
@@ -288,7 +288,7 @@ static void *test(void *arg, size_t s)
die(mps_pool_create(&mv, arena, mps_class_mv(), 0x10000, 32, 0x10000),
"pool_create(mv)");
-
+
pool_create_v_test(arena, format, chain); /* creates amc pool */
ap_create_v_test(amcpool);
diff --git a/mps/code/mpsioan.c b/mps/code/mpsioan.c
index 893e9632647..3f061df2158 100644
--- a/mps/code/mpsioan.c
+++ b/mps/code/mpsioan.c
@@ -46,7 +46,7 @@ mps_res_t mps_io_create(mps_io_t *mps_io_r)
f = fopen("mpsio.log", "wb");
if(f == NULL)
return MPS_RES_IO;
-
+
*mps_io_r = (mps_io_t)f;
ioFile = f;
return MPS_RES_OK;
@@ -69,7 +69,7 @@ mps_res_t mps_io_write(mps_io_t mps_io, void *buf, size_t size)
n = fwrite(buf, size, 1, f);
if(n != 1)
return MPS_RES_IO;
-
+
return MPS_RES_OK;
}
@@ -78,10 +78,10 @@ mps_res_t mps_io_flush(mps_io_t mps_io)
{
FILE *f = (FILE *)mps_io; /* Should check f == ioFile */
int e;
-
+
e = fflush(f);
if(e == EOF)
return MPS_RES_IO;
-
+
return MPS_RES_OK;
}
diff --git a/mps/code/mpsliban.c b/mps/code/mpsliban.c
index b28e456e085..5b5c16fbcec 100644
--- a/mps/code/mpsliban.c
+++ b/mps/code/mpsliban.c
@@ -16,7 +16,7 @@
* TRANSGRESSIONS (rule.impl.trans)
*
* .trans.file: The ANSI standard says (in section 7.9.1) that FILE is an
- * object type, and hence the casts between FILE and mps_lib_FILE (an
+ * object type, and hence the casts between FILE and mps_lib_FILE (an
* incomplete type) are not necessarily valid. We assume that this trick
* works, however, in all current environments.
*/
diff --git a/mps/code/mv2test.c b/mps/code/mv2test.c
index 20423cae6a0..99bd5edd12d 100644
--- a/mps/code/mv2test.c
+++ b/mps/code/mv2test.c
@@ -124,15 +124,15 @@ static double nrnd(void)
double r1 = 0.27597;
double r2 = 0.27846;
double x, y, Q;
-
-reject:
+
+reject:
u = (double)rnd()/m;
v = (double)rnd()/m;
v = twor * (v - 0.5);
x = u - s;
y = fabs(v) - t;
Q = x * x + y * (a * y - b * x);
-
+
if (Q < r1)
goto accept;
if (Q > r2)
@@ -156,7 +156,7 @@ static mps_pool_t pool;
extern void DescribeIt(void);
-void DescribeIt(void)
+void DescribeIt(void)
{
PoolDescribe((Pool)pool, (mps_lib_FILE *)stderr);
}
@@ -196,7 +196,7 @@ static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size)
/* --- align */
size = ((size+7)/8)*8;
-
+
do {
MPS_RESERVE_BLOCK(res, *p, ap, size);
if(res != MPS_RES_OK)
@@ -250,13 +250,13 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
int j = rand()%(TEST_SET_SIZE-i);
void *tp;
size_t ts;
-
+
tp = ps[j]; ts = ss[j];
ps[j] = ps[i]; ss[j] = ss[i];
ps[i] = tp; ss[i] = ts;
}
/* free some of the objects */
-
+
for(i=x; i 0) {
mps_free(pool, (mps_addr_t)ps[i], ss[i]);
@@ -270,7 +270,7 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
if(res != MPS_RES_OK)
break;
ss[i] = s;
-
+
if (verbose) {
if(i && i%4==0) putchar('\n');
printf("%8lX %6lX ", (unsigned long)ps[i], (unsigned long)ss[i]);
@@ -279,9 +279,9 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
if (verbose)
putchar('\n');
}
-
+
PoolDescribe((Pool)pool, mps_lib_stdout);
-
+
mps_ap_destroy(ap);
mps_pool_destroy(pool);
@@ -299,7 +299,7 @@ static void stress_with_arena_class(mps_arena_class_t aclass)
min = 8;
mean = 42;
max = 8192;
-
+
die(stress(mps_class_mvt(), arena, randomSize,
min, /* min_size */
mean, /* median_size */
diff --git a/mps/code/pool.c b/mps/code/pool.c
index 2389cdcfb2b..71f32977824 100644
--- a/mps/code/pool.c
+++ b/mps/code/pool.c
@@ -175,7 +175,7 @@ failInit:
/* PoolCreate, PoolCreateV: Allocate and initialise pool */
-Res PoolCreate(Pool *poolReturn, Arena arena,
+Res PoolCreate(Pool *poolReturn, Arena arena,
PoolClass class, ...)
{
Res res;
@@ -186,7 +186,7 @@ Res PoolCreate(Pool *poolReturn, Arena arena,
return res;
}
-Res PoolCreateV(Pool *poolReturn, Arena arena,
+Res PoolCreateV(Pool *poolReturn, Arena arena,
PoolClass class, va_list args)
{
Res res;
@@ -199,8 +199,8 @@ Res PoolCreateV(Pool *poolReturn, Arena arena,
/* .space.alloc: Allocate the pool instance structure with the size */
/* requested in the pool class. See .space.free */
- res = ControlAlloc(&base, arena, class->size,
- /* withReservoirPermit */ FALSE);
+ res = ControlAlloc(&base, arena, class->size,
+ /* withReservoirPermit */ FALSE);
if (res != ResOK)
goto failControlAlloc;
@@ -209,12 +209,12 @@ Res PoolCreateV(Pool *poolReturn, Arena arena,
/* instance by using the offset information from the class. */
pool = (Pool)PointerAdd(base, class->offset);
- /* Initialize the pool. */
+ /* Initialize the pool. */
res = PoolInitV(pool, arena, class, args);
- if (res != ResOK)
+ if (res != ResOK)
goto failPoolInit;
-
- *poolReturn = pool;
+
+ *poolReturn = pool;
return ResOK;
failPoolInit:
@@ -228,19 +228,19 @@ failControlAlloc:
void PoolFinish(Pool pool)
{
- AVERT(Pool, pool);
-
+ AVERT(Pool, pool);
+
/* Do any class-specific finishing. */
(*pool->class->finish)(pool);
/* Detach the pool from the arena, and unsig it. */
RingRemove(&pool->arenaRing);
pool->sig = SigInvalid;
-
+
RingFinish(&pool->segRing);
RingFinish(&pool->bufferRing);
RingFinish(&pool->arenaRing);
-
+
EVENT_P(PoolFinish, pool);
}
@@ -253,8 +253,8 @@ void PoolDestroy(Pool pool)
Arena arena;
Addr base;
- AVERT(Pool, pool);
-
+ AVERT(Pool, pool);
+
class = pool->class; /* } In case PoolFinish changes these */
arena = pool->arena; /* } */
@@ -278,7 +278,7 @@ BufferClass PoolDefaultBufferClass(Pool pool)
/* PoolAlloc -- allocate a block of memory from a pool */
-Res PoolAlloc(Addr *pReturn, Pool pool, Size size,
+Res PoolAlloc(Addr *pReturn, Pool pool, Size size,
Bool withReservoirPermit)
{
Res res;
@@ -318,7 +318,7 @@ void PoolFree(Pool pool, Addr old, Size size)
/* The pool methods should check that old is in pool. */
AVER(size > 0);
(*pool->class->free)(pool, old, size);
-
+
EVENT_PAW(PoolFree, pool, old, size);
}
@@ -340,7 +340,7 @@ Res PoolAccess(Pool pool, Seg seg, Addr addr,
/* PoolWhiten, PoolGrey, PoolBlacken -- change color of a segment in the pool */
Res PoolWhiten(Pool pool, Trace trace, Seg seg)
-{
+{
AVERT(Pool, pool);
AVERT(Trace, trace);
AVERT(Seg, seg);
@@ -475,12 +475,12 @@ Res PoolDescribe(Pool pool, mps_lib_FILE *stream)
if (!CHECKT(Pool, pool)) return ResFAIL;
if (stream == NULL) return ResFAIL;
-
+
res = WriteF(stream,
"Pool $P ($U) {\n", (WriteFP)pool, (WriteFU)pool->serial,
- " class $P (\"$S\")\n",
+ " class $P (\"$S\")\n",
(WriteFP)pool->class, pool->class->name,
- " arena $P ($U)\n",
+ " arena $P ($U)\n",
(WriteFP)pool->arena, (WriteFU)pool->arena->serial,
" alignment $W\n", (WriteFW)pool->alignment,
NULL);
diff --git a/mps/code/poolabs.c b/mps/code/poolabs.c
index f7a10130c15..86fd76ddf31 100644
--- a/mps/code/poolabs.c
+++ b/mps/code/poolabs.c
@@ -6,22 +6,22 @@
* PURPOSE
*
* .purpose: This defines the abstract pool classes, giving
- * a single-inheritance framework which concrete classes
- * may utilize. The purpose is to reduce the fragility of class
- * definitions for pool implementations when small changes are
+ * a single-inheritance framework which concrete classes
+ * may utilize. The purpose is to reduce the fragility of class
+ * definitions for pool implementations when small changes are
* made to the pool protocol. For now, the class hierarchy for
* the abstract classes is intended to be useful, but not to
* represent any particular design for pool inheritance.
- *
+ *
* HIERARCHY
*
* .hierarchy: define the following hierarchy of abstract pool classes:
* AbstractPoolClass - implements init, finish, describe
* AbstractAllocFreePoolClass - implements alloc & free
* AbstractBufferPoolClass - implements the buffer protocol
- * AbstractSegBufPoolClass - uses SegBuf buffer class
+ * AbstractSegBufPoolClass - uses SegBuf buffer class
* AbstractScanPoolClass - implements basic scanning
- * AbstractCollectPoolClass - implements basic GC
+ * AbstractCollectPoolClass - implements basic GC
*/
#include "mpm.h"
@@ -39,8 +39,8 @@ typedef PoolClassStruct AbstractCollectPoolClassStruct;
/* Mixins:
*
- * For now (at least) we're avoiding multiple inheritance.
- * However, there is a significant use of multiple inheritance
+ * For now (at least) we're avoiding multiple inheritance.
+ * However, there is a significant use of multiple inheritance
* in practice amongst the pool classes, as there are several
* orthogonal sub-protocols included in the pool protocol.
* The following mixin functions help to provide the inheritance
@@ -68,7 +68,7 @@ void PoolClassMixInBuffer(PoolClass class)
class->bufferFill = PoolTrivBufferFill;
class->bufferEmpty = PoolTrivBufferEmpty;
/* By default, buffered pools treat frame operations as NOOPs */
- class->framePush = PoolTrivFramePush;
+ class->framePush = PoolTrivFramePush;
class->framePop = PoolTrivFramePop;
class->bufferClass = BufferClassGet;
}
@@ -179,7 +179,7 @@ DEFINE_CLASS(AbstractCollectPoolClass, class)
}
-/* PoolNo*, PoolTriv* -- Trivial and non-methods for Pool Classes
+/* PoolNo*, PoolTriv* -- Trivial and non-methods for Pool Classes
*
* See design.mps.pool.no and design.mps.pool.triv
*/
@@ -266,14 +266,14 @@ Res PoolTrivBufferFill(Addr *baseReturn, Addr *limitReturn,
res = PoolAlloc(&p, pool, size, withReservoirPermit);
if(res != ResOK) return res;
-
+
*baseReturn = p;
*limitReturn = AddrAdd(p, size);
return ResOK;
}
-void PoolNoBufferEmpty(Pool pool, Buffer buffer,
+void PoolNoBufferEmpty(Pool pool, Buffer buffer,
Addr init, Addr limit)
{
AVERT(Pool, pool);
diff --git a/mps/code/poolamc.c b/mps/code/poolamc.c
index 88d84502159..b90d90dbc56 100644
--- a/mps/code/poolamc.c
+++ b/mps/code/poolamc.c
@@ -71,10 +71,10 @@ typedef struct amcNailboardStruct {
#define amcNailboardSig ((Sig)0x519A3C4B) /* SIGnature AMC Nailboard */
-/* AMCGSegStruct -- AMC segment structure
+/* AMCGSegStruct -- AMC segment structure
*
* .segtype: AMC segs have a pointer to the type field of either
- * a nailboard or a generation. This initial value is passed
+ * a nailboard or a generation. This initial value is passed
* as an additional parameter when the segment is allocated.
* See design.mps.poolamc.fix.nail.distinguish.
*/
@@ -110,7 +110,7 @@ static Bool amcSegCheck(amcSeg amcseg)
/* AMCSegInit -- initialise an AMC segment */
-static Res AMCSegInit(Seg seg, Pool pool, Addr base, Size size,
+static Res AMCSegInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, va_list args)
{
int *segtype = va_arg(args, int*); /* .segtype */
@@ -341,7 +341,7 @@ static Bool amcNailboardCheck(amcNailboard board)
* This subclass of SegBuf records a link to a generation.
*/
-#define amcBufSig ((Sig)0x519A3CBF) /* SIGnature AMC BuFfer */
+#define amcBufSig ((Sig)0x519A3CBF) /* SIGnature AMC BuFfer */
typedef struct amcBufStruct *amcBuf;
@@ -1967,7 +1967,7 @@ DEFINE_POOL_CLASS(AMCZPoolClass, this)
this->grey = PoolNoGrey;
this->scan = PoolNoScan;
}
-
+
/* mps_class_amc -- return the pool class descriptor to the client */
diff --git a/mps/code/poolams.c b/mps/code/poolams.c
index 6fe8e3616af..80e1333b45d 100644
--- a/mps/code/poolams.c
+++ b/mps/code/poolams.c
@@ -2,7 +2,7 @@
*
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
- *
+ *
* .design: See design.mps.poolams.
*
*
@@ -55,7 +55,7 @@ Bool AMSSegCheck(AMSSeg amsseg)
/* amsCreateTables -- create the tables for an AMS seg */
-static Res amsCreateTables(BT *allocReturn,
+static Res amsCreateTables(BT *allocReturn,
BT *nongreyReturn, BT *nonwhiteReturn,
Arena arena, Count length)
{
@@ -94,7 +94,7 @@ failAlloc:
/* amsDestroyTables -- destroy the tables for an AMS seg */
-static void amsDestroyTables(BT allocTable,
+static void amsDestroyTables(BT allocTable,
BT nongreyTable, BT nonwhiteTable,
Arena arena, Count length)
{
@@ -112,7 +112,7 @@ static void amsDestroyTables(BT allocTable,
/* AMSSegInit -- Init method for AMS segments */
-static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size,
+static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, va_list args)
{
SegClass super;
@@ -142,7 +142,7 @@ static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size,
amsseg->marksChanged = FALSE; /* design.mps.poolams.marked.unused */
amsseg->ambiguousFixes = FALSE;
- res = amsCreateTables(&amsseg->allocTable,
+ res = amsCreateTables(&amsseg->allocTable,
&amsseg->nongreyTable, &amsseg->nonwhiteTable,
arena, amsseg->grains);
if (res != ResOK)
@@ -189,7 +189,7 @@ static void AMSSegFinish(Seg seg)
AVER(SegBuffer(seg) == NULL);
/* keep the destructions in step with AMSSegInit failure cases */
- amsDestroyTables(amsseg->allocTable, amsseg->nongreyTable,
+ amsDestroyTables(amsseg->allocTable, amsseg->nongreyTable,
amsseg->nonwhiteTable, arena, amsseg->grains);
RingRemove(&amsseg->segRing);
@@ -202,7 +202,7 @@ static void AMSSegFinish(Seg seg)
/* finish the superclass fields last */
super = SEG_SUPERCLASS(AMSSegClass);
super->finish(seg);
-}
+}
/* AMSSegMerge & AMSSegSplit -- AMSSeg split & merge methods
@@ -215,7 +215,7 @@ static void AMSSegFinish(Seg seg)
* where the join is aligned with the grain alignment
* See design.mps.poolams.split-merge.constrain.
*
- * .alloc-early: Allocations are performed before calling the
+ * .alloc-early: Allocations are performed before calling the
* next method to simplify the fail cases. See
* design.mps.seg.split-merge.fail
*
@@ -226,7 +226,7 @@ static void AMSSegFinish(Seg seg)
* processing of all such tables by a macro.
*/
-static Res AMSSegMerge(Seg seg, Seg segHi,
+static Res AMSSegMerge(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args)
{
@@ -266,7 +266,7 @@ static Res AMSSegMerge(Seg seg, Seg segHi,
/* Merge the superclass fields via next-method call */
super = SEG_SUPERCLASS(AMSSegClass);
- res = super->merge(seg, segHi, base, mid, limit,
+ res = super->merge(seg, segHi, base, mid, limit,
withReservoirPermit, args);
if (res != ResOK)
goto failSuper;
@@ -300,7 +300,7 @@ static Res AMSSegMerge(Seg seg, Seg segHi,
return ResOK;
failSuper:
- amsDestroyTables(allocTable, nongreyTable, nonwhiteTable,
+ amsDestroyTables(allocTable, nongreyTable, nonwhiteTable,
arena, allGrains);
failCreateTables:
AVERT(AMSSeg, amsseg);
@@ -309,7 +309,7 @@ failCreateTables:
}
-static Res AMSSegSplit(Seg seg, Seg segHi,
+static Res AMSSegSplit(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args)
{
@@ -331,8 +331,8 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
arena = PoolArena(SegPool(seg));
ams = Pool2AMS(SegPool(seg));
- loGrains = AMSGrains(ams, AddrOffset(base, mid));
- hiGrains = AMSGrains(ams, AddrOffset(mid, limit));
+ loGrains = AMSGrains(ams, AddrOffset(base, mid));
+ hiGrains = AMSGrains(ams, AddrOffset(mid, limit));
allGrains = loGrains + hiGrains;
/* checks for .grain-align */
@@ -372,7 +372,7 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
BTDestroy(amsseg->table, arena, allGrains); \
amsseg->table = table ## Lo; \
amssegHi->table = table ## Hi; \
- END
+ END
SPLIT_TABLES(nonwhiteTable, BTSetRange);
SPLIT_TABLES(nongreyTable, BTSetRange);
@@ -403,10 +403,10 @@ static Res AMSSegSplit(Seg seg, Seg segHi,
return ResOK;
failSuper:
- amsDestroyTables(allocTableHi, nongreyTableHi, nonwhiteTableHi,
+ amsDestroyTables(allocTableHi, nongreyTableHi, nonwhiteTableHi,
arena, hiGrains);
failCreateTablesHi:
- amsDestroyTables(allocTableLo, nongreyTableLo, nonwhiteTableLo,
+ amsDestroyTables(allocTableLo, nongreyTableLo, nonwhiteTableLo,
arena, loGrains);
failCreateTablesLo:
AVERT(AMSSeg, amsseg);
@@ -587,14 +587,14 @@ static Res AMSSegCreate(Seg *segReturn, Pool pool, Size size,
if (res != ResOK)
goto failSize;
- res = SegAlloc(&seg, (*ams->segClass)(), segPref, prefSize,
+ res = SegAlloc(&seg, (*ams->segClass)(), segPref, prefSize,
pool, withReservoirPermit);
if (res != ResOK) { /* try to allocate one that's just large enough */
Size minSize = SizeAlignUp(size, ArenaAlign(arena));
if (minSize == prefSize)
goto failSeg;
- res = SegAlloc(&seg, (*ams->segClass)(), segPref, minSize,
+ res = SegAlloc(&seg, (*ams->segClass)(), segPref, minSize,
pool, withReservoirPermit);
if (res != ResOK)
goto failSeg;
@@ -638,7 +638,7 @@ static Res AMSIterate(Seg seg, AMSObjectFunction f, void *closure);
/* AMSInit -- the pool class initialization method
- *
+ *
* Takes one additional argument: the format of the objects
* allocated in the pool. See design.mps.poolams.init.
*/
@@ -702,7 +702,7 @@ Res AMSInitInternal(AMS ams, Format format, Chain chain)
/* AMSFinish -- the pool class finishing method
- *
+ *
* Destroys all the segs in the pool. Can't invalidate the AMS until
* we've destroyed all the segments, as it may be checked.
*/
@@ -723,7 +723,7 @@ void AMSFinish(Pool pool)
/* amsSegAlloc -- try to allocate an area in the given segment
- *
+ *
* Tries to find an area of at least the given size. If successful,
* makes that area black, if necessary, and returns its base and limit
* grain indices.
@@ -775,7 +775,7 @@ static Bool amsSegAlloc(Index *baseReturn, Index *limitReturn,
/* AMSBufferFill -- the pool class buffer fill method
- *
+ *
* Iterates over the segments looking for space. See
* design.mps.poolams.fill.
*/
@@ -847,7 +847,7 @@ found:
/* AMSBufferEmpty -- the pool class buffer empty method
- *
+ *
* Frees the unused part of the buffer. The colour of the area doesn't
* need to be changed. See design.mps.poolams.empty.
*/
@@ -907,7 +907,7 @@ void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
/* amsRangeCondemn -- Condemn a part of an AMS segment
- *
+ *
* I.e., alloc -> white, free -> black.
* Allow calling it with base = limit, to simplify the callers.
*/
@@ -991,7 +991,7 @@ Res AMSWhiten(Pool pool, Trace trace, Seg seg)
/* AMSIterate -- applies a function to each object in a segment
- *
+ *
* AMSIterate(seg, f, closure) applies f to all the
* objects in the segment. It skips the buffer, if any (from
* BufferScanLimit to BufferLimit).
@@ -1027,7 +1027,7 @@ static Res AMSIterate(Seg seg, AMSObjectFunction f, void *closure)
if (buffer != NULL
&& p == BufferScanLimit(buffer) && p != BufferLimit(buffer)) {
/* skip buffer */
- next = BufferLimit(buffer);
+ next = BufferLimit(buffer);
AVER(AddrIsAligned(next, alignment));
} else {
AVER((buffer == NULL)
@@ -1054,7 +1054,7 @@ static Res AMSIterate(Seg seg, AMSObjectFunction f, void *closure)
/* amsScanObject -- scan a single object
- *
+ *
* This is the object function passed to AMSIterate by AMSScan.
*/
@@ -1345,7 +1345,7 @@ void AMSReclaim(Pool pool, Trace trace, Seg seg)
trace->reclaimSize += reclaimed << ams->grainShift;
ams->pgen.totalSize -= reclaimed << ams->grainShift;
/* preservedInPlaceCount is updated on fix */
- trace->preservedInPlaceSize +=
+ trace->preservedInPlaceSize +=
(amsseg->grains - amsseg->free) << ams->grainShift;
if (amsseg->free == amsseg->grains && SegBuffer(seg) == NULL) {
@@ -1359,7 +1359,7 @@ void AMSReclaim(Pool pool, Trace trace, Seg seg)
/* AMSDescribe -- the pool class description method
- *
+ *
* Iterates over the segments, describing all of them.
*/
static Res AMSDescribe(Pool pool, mps_lib_FILE *stream)
diff --git a/mps/code/poolams.h b/mps/code/poolams.h
index e2eec458539..498cc50b04e 100644
--- a/mps/code/poolams.h
+++ b/mps/code/poolams.h
@@ -180,7 +180,7 @@ extern Res AMSBufferInit(Pool pool, Buffer buffer, va_list args);
extern Res AMSBufferFill(Addr *baseReturn, Addr *limitReturn,
Pool pool, Buffer buffer, Size size,
Bool withReservoirPermit);
-extern void AMSBufferEmpty(Pool pool, Buffer buffer,
+extern void AMSBufferEmpty(Pool pool, Buffer buffer,
Addr init, Addr limit);
extern Res AMSWhiten(Pool pool, Trace trace, Seg seg);
diff --git a/mps/code/poolawl.c b/mps/code/poolawl.c
index 7211a29c4e3..482581d37b8 100644
--- a/mps/code/poolawl.c
+++ b/mps/code/poolawl.c
@@ -11,7 +11,7 @@
*
* ASSUMPTIONS (about when to scan single references on accesses)
*
- * .assume.purpose: The purpose of scanning refs singly is to limit the
+ * .assume.purpose: The purpose of scanning refs singly is to limit the
* amount of scanning of weak references which must be performed when
* the mutator hits a barrier. Weak references which are scanned at this
* time are not "weak splatted". Minimizing any loss of weak splats
@@ -19,23 +19,23 @@
*
* .assume.noweak: It follows (from .assume.purpose) that there is no
* benefit from scanning single refs on barrier accesses for segments
- * which don't contain any weak references. However, if a segment
- * contains either all weak refs or a mixture of weak and non-weak
+ * which don't contain any weak references. However, if a segment
+ * contains either all weak refs or a mixture of weak and non-weak
* references then there is a potential benefit.
*
- * .assume.mixedrank: If a segment contains a mixture of references
- * at different ranks (e.g. weak and strong references), there is
+ * .assume.mixedrank: If a segment contains a mixture of references
+ * at different ranks (e.g. weak and strong references), there is
* no way to determine whether or not references at a rank other than
- * the scan state rank will be scanned as a result of normal
+ * the scan state rank will be scanned as a result of normal
* (non-barrier) scanning activity. (@@@@ This is a deficiency in MPS).
- * Assume that such references will, in fact, be scanned at the
+ * Assume that such references will, in fact, be scanned at the
* incorrect rank.
*
- * .assume.samerank: The pool doesn't support segments with mixed
+ * .assume.samerank: The pool doesn't support segments with mixed
* rank segments in any case (despite .assume.mixedrank).
*
* .assume.alltraceable: The pool assumes that all objects are entirely
- * traceable. This must be documented elsewhere for the benefit of the
+ * traceable. This must be documented elsewhere for the benefit of the
* client.
*/
@@ -166,7 +166,7 @@ static void awlStatTotalInit(AWL awl)
/* AWLSegInit -- Init method for AWL segments */
-static Res AWLSegInit(Seg seg, Pool pool, Addr base, Size size,
+static Res AWLSegInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, va_list args)
{
SegClass super;
@@ -271,7 +271,7 @@ static void AWLSegFinish(Seg seg)
super = SEG_SUPERCLASS(AWLSegClass);
super->finish(seg);
}
-
+
/* AWLSegClass -- Class definition for AWL segments */
@@ -310,7 +310,7 @@ static Bool AWLCanTrySingleAccess(AWL awl, Seg seg, Addr addr)
awlseg = Seg2AWLSeg(seg);
AVERT(AWLSeg, awlseg);
-
+
if (AWLHaveTotalSALimit) {
if (AWLTotalSALimit < awl->succAccesses) {
STATISTIC(awl->stats.declined++);
@@ -395,7 +395,7 @@ static void AWLNoteScan(AWL awl, Seg seg, ScanState ss)
} else {
/* This is "failed" scan at improper rank. */
STATISTIC(awl->stats.badScans++);
- }
+ }
/* Reinitialize the segment statistics */
awlseg->singleAccesses = 0;
STATISTIC(awlStatSegInit(awlseg));
@@ -435,7 +435,7 @@ static Res AWLSegCreate(AWLSeg *awlsegReturn,
segPrefStruct = *SegPrefDefault();
SegPrefExpress(&segPrefStruct, SegPrefCollected, NULL);
SegPrefExpress(&segPrefStruct, SegPrefGen, &awl->gen);
- res = SegAlloc(&seg, AWLSegClassGet(), &segPrefStruct, size, pool,
+ res = SegAlloc(&seg, AWLSegClassGet(), &segPrefStruct, size, pool,
reservoirPermit, rankSet);
if (res != ResOK)
return res;
@@ -685,9 +685,9 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg)
} else {
/* Whiten everything except the buffer. */
Addr base = SegBase(seg);
- Index scanLimitIndex = awlIndexOfAddr(base, awl,
+ Index scanLimitIndex = awlIndexOfAddr(base, awl,
BufferScanLimit(buffer));
- Index limitIndex = awlIndexOfAddr(base, awl,
+ Index limitIndex = awlIndexOfAddr(base, awl,
BufferLimit(buffer));
AWLRangeWhiten(awlseg, 0, scanLimitIndex);
@@ -777,7 +777,7 @@ static void AWLBlacken(Pool pool, TraceSet traceSet, Seg seg)
AVERT(AWL, awl);
awlseg = Seg2AWLSeg(seg);
AVERT(AWLSeg, awlseg);
-
+
BTSetRange(awlseg->scanned, 0, awlseg->grains);
}
@@ -1001,7 +1001,7 @@ static Res AWLFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
ref = *refIO;
i = awlIndexOfAddr(SegBase(seg), awl, ref);
-
+
ss->wasMarked = TRUE;
switch(ss->rank) {
@@ -1106,7 +1106,7 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg)
/* AWLAccess -- handle a barrier hit */
-static Res AWLAccess(Pool pool, Seg seg, Addr addr,
+static Res AWLAccess(Pool pool, Seg seg, Addr addr,
AccessSet mode, MutatorFaultContext context)
{
AWL awl;
@@ -1133,7 +1133,7 @@ static Res AWLAccess(Pool pool, Seg seg, Addr addr,
default:
return res;
}
- }
+ }
/* Have to scan the entire seg anyway. */
res = PoolSegAccess(pool, seg, addr, mode, context);
diff --git a/mps/code/poollo.c b/mps/code/poollo.c
index 150bcd8b487..02db2634d9c 100644
--- a/mps/code/poollo.c
+++ b/mps/code/poollo.c
@@ -62,7 +62,7 @@ typedef struct LOSegStruct {
/* forward decls */
-static Res loSegInit(Seg seg, Pool pool, Addr base, Size size,
+static Res loSegInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, va_list args);
static void loSegFinish(Seg seg);
@@ -98,7 +98,7 @@ static Bool LOSegCheck(LOSeg loseg)
/* loSegInit -- Init method for LO segments */
-static Res loSegInit(Seg seg, Pool pool, Addr base, Size size,
+static Res loSegInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, va_list args)
{
SegClass super;
@@ -108,7 +108,7 @@ static Res loSegInit(Seg seg, Pool pool, Addr base, Size size,
Size tablebytes; /* # bytes in each control array */
Arena arena;
/* number of bits needed in each control array */
- unsigned long bits;
+ unsigned long bits;
void *p;
AVERT(Seg, seg);
@@ -306,7 +306,7 @@ static Res loSegCreate(LOSeg *loSegReturn, Pool pool, Size size,
gen = lo->gen;
SegPrefExpress(&segPrefStruct, SegPrefCollected, NULL);
SegPrefExpress(&segPrefStruct, SegPrefGen, &gen);
- res = SegAlloc(&seg, EnsureLOSegClass(), &segPrefStruct,
+ res = SegAlloc(&seg, EnsureLOSegClass(), &segPrefStruct,
asize, pool, withReservoirPermit);
if (res != ResOK)
return res;
@@ -474,12 +474,12 @@ static Res LOInit(Pool pool, va_list arg)
format = va_arg(arg, Format);
AVERT(Format, format);
-
+
lo = PoolPoolLO(pool);
-
+
pool->format = format;
lo->poolStruct.alignment = format->alignment;
- lo->alignShift =
+ lo->alignShift =
SizeLog2((unsigned long)PoolAlignment(&lo->poolStruct));
lo->gen = LOGen; /* may be modified in debugger */
res = ChainCreate(&lo->chain, arena, 1, &loGenParam);
@@ -508,7 +508,7 @@ static void LOFinish(Pool pool)
{
LO lo;
Ring node, nextNode;
-
+
AVERT(Pool, pool);
lo = PoolPoolLO(pool);
AVERT(LO, lo);
@@ -528,8 +528,8 @@ static void LOFinish(Pool pool)
}
-static Res LOBufferFill(Addr *baseReturn, Addr *limitReturn,
- Pool pool, Buffer buffer,
+static Res LOBufferFill(Addr *baseReturn, Addr *limitReturn,
+ Pool pool, Buffer buffer,
Size size, Bool withReservoirPermit)
{
Res res;
@@ -618,7 +618,7 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit)
seg = BufferSeg(buffer);
AVERT(Seg, seg);
AVER(init <= limit);
-
+
loseg = SegLOSeg(seg);
AVERT(LOSeg, loseg);
AVER(loseg->lo == lo);
@@ -657,7 +657,7 @@ static Res LOWhiten(Pool pool, Trace trace, Seg seg)
{
LO lo;
unsigned long bits;
-
+
AVERT(Pool, pool);
lo = PoolPoolLO(pool);
AVERT(LO, lo);
@@ -711,7 +711,7 @@ static Res LOFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
}
/* fall through */
- case RankEXACT:
+ case RankEXACT:
case RankFINAL:
case RankWEAK: {
Size i = AddrOffset(SegBase(seg), (Addr)ref) >> lo->alignShift;
diff --git a/mps/code/poolmfs.c b/mps/code/poolmfs.c
index 1eb1e78e993..667c41781b1 100644
--- a/mps/code/poolmfs.c
+++ b/mps/code/poolmfs.c
@@ -10,11 +10,11 @@
* .design.misplaced: This design is misplaced, it should be in a
* separate document.
*
- * MFS operates in a very simple manner: each region allocated from
- * the arena is divided into units. Free units are kept on a linked
- * list using a header stored in the unit itself. The linked list is
- * not ordered; allocation anddeallocation simply pop and push from
- * the head of the list. This is fast, but successive allocations might
+ * MFS operates in a very simple manner: each region allocated from
+ * the arena is divided into units. Free units are kept on a linked
+ * list using a header stored in the unit itself. The linked list is
+ * not ordered; allocation anddeallocation simply pop and push from
+ * the head of the list. This is fast, but successive allocations might
* have poor locality if previous successive frees did.
*
* .restriction: This pool cannot allocate from the arena control
@@ -26,7 +26,7 @@
*
* .freelist.fragments: The simple freelist policy might lead to poor
* locality of allocation if the list gets fragmented.
- *
+ *
* .buffer.not: This pool doesn't support fast cache allocation, which
* is a shame.
*/
@@ -89,7 +89,7 @@ static Res MFSInit(Pool pool, va_list arg)
AVER(unitSize >= UNIT_MIN);
AVER(extendBy >= unitSize);
-
+
mfs = PoolPoolMFS(pool);
arena = PoolArena(pool);
@@ -134,7 +134,7 @@ static void MFSFinish(Pool pool)
/* == Allocate ==
*
* Allocation simply involves taking a unit from the front of the freelist
- * and returning it. If there are none, a new region is allocated from the
+ * and returning it. If there are none, a new region is allocated from the
* arena.
*/
diff --git a/mps/code/poolmrg.c b/mps/code/poolmrg.c
index e390e630707..f10933c2db8 100644
--- a/mps/code/poolmrg.c
+++ b/mps/code/poolmrg.c
@@ -1,20 +1,20 @@
/* impl.c.poolmrg: MANUAL RANK GUARDIAN POOL
- *
+ *
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
- *
+ *
* DESIGN
- *
+ *
* .design: See design.mps.poolmrg.
*
* NOTES
- *
+ *
* .improve.rank: At the moment, the pool is a guardian for the final
* rank. It could be generalized to be a guardian for an arbitrary
* rank (a guardian for RankEXACT would tell you if the object was
* ambiguously referenced, for example). The code that would need to be
* modified bears this tag.
- *
+ *
* TRANSGRESSIONS
*
* .addr.void-star: Breaks design.mps.type.addr.use all over the place,
@@ -61,7 +61,7 @@ typedef struct LinkStruct {
PARENT(LinkStruct, the.linkRing, (ring))
-/* RefPart -- Protectable part of guardian
+/* RefPart -- Protectable part of guardian
*
* This is trivial, but provides a useful abstraction
* at no performance cost.
@@ -72,7 +72,7 @@ typedef struct RefPartStruct {
} RefPartStruct;
-/* MRGRefPartRef,MRGRefPartSetRef -- Peek and poke the reference
+/* MRGRefPartRef,MRGRefPartSetRef -- Peek and poke the reference
*
* Might be more efficient to take a seg, rather than calculate it
* every time.
@@ -159,7 +159,7 @@ typedef struct MRGRefSegStruct {
#define Seg2LinkSeg(seg) ((MRGLinkSeg)(seg))
#define LinkSeg2Seg(linkseg) ((Seg)(linkseg))
-
+
#define Seg2RefSeg(seg) ((MRGRefSeg)(seg))
#define RefSeg2Seg(refseg) ((Seg)(refseg))
@@ -170,7 +170,7 @@ static SegClass MRGLinkSegClassGet(void);
static SegClass MRGRefSegClassGet(void);
-/* MRGLinkSegCheck -- check a link segment
+/* MRGLinkSegCheck -- check a link segment
*
* .link.nullref: During initialization of a link segment the refSeg
* field will be NULL. This will be initialized when the reference
@@ -208,7 +208,7 @@ static Bool MRGRefSegCheck(MRGRefSeg refseg)
/* MRGLinkSegInit -- initialise a link segment */
-static Res MRGLinkSegInit(Seg seg, Pool pool, Addr base, Size size,
+static Res MRGLinkSegInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, va_list args)
{
SegClass super;
@@ -237,14 +237,14 @@ static Res MRGLinkSegInit(Seg seg, Pool pool, Addr base, Size size,
}
-/* MRGRefSegInit -- initialise a ref segment
+/* MRGRefSegInit -- initialise a ref segment
*
* .ref.initarg: The paired link segment is passed as an additional
- * (vararg) parameter when creating the ref segment. Initially the
+ * (vararg) parameter when creating the ref segment. Initially the
* refSeg field of the link segment is NULL (see .link.nullref).
* It's initialized here to the newly initialized ref segment.
*/
-static Res MRGRefSegInit(Seg seg, Pool pool, Addr base, Size size,
+static Res MRGRefSegInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, va_list args)
{
MRGLinkSeg linkseg = va_arg(args, MRGLinkSeg); /* .ref.initarg */
@@ -269,7 +269,7 @@ static Res MRGRefSegInit(Seg seg, Pool pool, Addr base, Size size,
return res;
/* design.mps.seg.field.rankset.start, .improve.rank */
- SegSetRankSet(seg, RankSetSingle(RankFINAL));
+ SegSetRankSet(seg, RankSetSingle(RankFINAL));
RingInit(&refseg->mrgRing);
RingAppend(&mrg->refRing, &refseg->mrgRing);
@@ -343,7 +343,7 @@ static RefPart MRGRefPartOfLink(Link link, Arena arena)
AVERT(MRGLinkSeg, linkseg);
linkBase = (Link)SegBase(seg);
AVER(link >= linkBase);
- index = link - linkBase;
+ index = link - linkBase;
AVER(index < MRGGuardiansPerSeg(Pool2MRG(SegPool(seg))));
return refPartOfIndex(linkseg->refSeg, index);
@@ -369,16 +369,16 @@ static Link MRGLinkOfRefPart(RefPart refPart, Arena arena)
AVERT(MRGRefSeg, refseg);
refPartBase = (RefPart)SegBase(seg);
AVER(refPart >= refPartBase);
- index = refPart - refPartBase;
+ index = refPart - refPartBase;
AVER(index < MRGGuardiansPerSeg(Pool2MRG(SegPool(seg))));
return linkOfIndex(refseg->linkSeg, index);
}
-/* MRGGuardianInit -- Initialises both parts of a guardian */
+/* MRGGuardianInit -- Initialises both parts of a guardian */
-static void MRGGuardianInit(MRG mrg, Link link, RefPart refPart)
+static void MRGGuardianInit(MRG mrg, Link link, RefPart refPart)
{
AVERT(MRG, mrg);
AVER(link != NULL);
@@ -388,7 +388,7 @@ static void MRGGuardianInit(MRG mrg, Link link, RefPart refPart)
link->state = MRGGuardianFREE;
RingAppend(&mrg->freeRing, &link->the.linkRing);
/* design.mps.poolmrg.free.overwrite */
- MRGRefPartSetRef(PoolArena(&mrg->poolStruct), refPart, 0);
+ MRGRefPartSetRef(PoolArena(&mrg->poolStruct), refPart, 0);
}
@@ -410,7 +410,7 @@ static void MRGMessageDelete(Message message)
{ /* Calculate pool */
Bool b;
- Seg seg;
+ Seg seg;
b = SegOfAddr(&seg, arena, (Addr)message);
AVER(b);
@@ -462,7 +462,7 @@ static MessageClassStruct MRGMessageClassStruct = {
"MRGFinal", /* name */
MRGMessageDelete, /* Delete */
MRGMessageFinalizationRef, /* FinalizationRef */
- MessageNoGCLiveSize, /* GCLiveSize */
+ MessageNoGCLiveSize, /* GCLiveSize */
MessageNoGCCondemnedSize, /* GCCondemnedSize */
MessageNoGCNotCondemnedSize, /* GCNoteCondemnedSize */
MessageClassSig /* design.mps.message.class.sig.double */
@@ -509,7 +509,7 @@ static Res MRGSegPairCreate(MRGRefSeg *refSegReturn, MRG mrg,
pool = MRG2Pool(mrg);
arena = PoolArena(pool);
- nGuardians = MRGGuardiansPerSeg(mrg);
+ nGuardians = MRGGuardiansPerSeg(mrg);
linkSegSize = nGuardians * sizeof(LinkStruct);
linkSegSize = SizeAlignUp(linkSegSize, ArenaAlign(arena));
@@ -522,7 +522,7 @@ static Res MRGSegPairCreate(MRGRefSeg *refSegReturn, MRG mrg,
res = SegAlloc(&segRefPart, EnsureMRGRefSegClass(),
SegPrefDefault(), mrg->extendBy, pool,
- withReservoirPermit,
+ withReservoirPermit,
linkseg); /* .ref.initarg */
if (res != ResOK)
goto failRefPartSegAlloc;
@@ -531,7 +531,7 @@ static Res MRGSegPairCreate(MRGRefSeg *refSegReturn, MRG mrg,
linkBase = (Link)SegBase(segLink);
refPartBase = (RefPart)SegBase(segRefPart);
- for(i = 0; i < nGuardians; ++i)
+ for(i = 0; i < nGuardians; ++i)
MRGGuardianInit(mrg, linkBase + i, refPartBase + i);
AVER((Addr)(&linkBase[i]) <= SegLimit(segLink));
AVER((Addr)(&refPartBase[i]) <= SegLimit(segRefPart));
@@ -588,7 +588,7 @@ static Res MRGRefSegScan(ScanState ss, MRGRefSeg refseg, MRG mrg)
arena = PoolArena(MRG2Pool(mrg));
linkseg = refseg->linkSeg;
- nGuardians = MRGGuardiansPerSeg(mrg);
+ nGuardians = MRGGuardiansPerSeg(mrg);
AVER(nGuardians > 0);
TRACE_SCAN_BEGIN(ss) {
for(i=0; i < nGuardians; ++i) {
@@ -601,7 +601,7 @@ static Res MRGRefSegScan(ScanState ss, MRGRefSeg refseg, MRG mrg)
/* because we are in a scan and the shield is exposed. */
if (TRACE_FIX1(ss, refPart->ref)) {
res = TRACE_FIX2(ss, &(refPart->ref));
- if (res != ResOK)
+ if (res != ResOK)
return res;
if (ss->rank == RankFINAL && !ss->wasMarked) { /* .improve.rank */
@@ -621,10 +621,10 @@ static Res MRGRefSegScan(ScanState ss, MRGRefSeg refseg, MRG mrg)
static Res MRGInit(Pool pool, va_list args)
{
MRG mrg;
-
+
AVER(pool != NULL); /* Can't check more; see pool contract @@@@ */
UNUSED(args);
-
+
mrg = Pool2MRG(pool);
RingInit(&mrg->entryRing);
@@ -709,8 +709,8 @@ Res MRGRegister(Pool pool, Ref ref)
if (RingIsSingle(&mrg->freeRing)) {
/* .refseg.useless: refseg isn't used */
/* @@@@ Should the client be able to use the reservoir for this? */
- res = MRGSegPairCreate(&junk, mrg, /* withReservoirPermit */ FALSE);
- if (res != ResOK)
+ res = MRGSegPairCreate(&junk, mrg, /* withReservoirPermit */ FALSE);
+ if (res != ResOK)
return res;
}
AVER(!RingIsSingle(&mrg->freeRing));
diff --git a/mps/code/poolmv.c b/mps/code/poolmv.c
index 48866b17901..7a9115eecd0 100644
--- a/mps/code/poolmv.c
+++ b/mps/code/poolmv.c
@@ -114,7 +114,7 @@ static Bool MVBlockCheck(MVBlock block)
typedef struct MVSpanStruct *MVSpan;
typedef struct MVSpanStruct {
Sig sig; /* design.mps.sig */
- RingStruct spans; /* all the spans */
+ RingStruct spans; /* all the spans */
MV mv; /* owning MV pool */
Tract tract; /* first tract of the span */
Size size; /* size of the span */
@@ -217,15 +217,15 @@ static Res MVInit(Pool pool, va_list arg)
blockExtendBy = sizeof(MVBlockStruct);
}
- res = PoolInit(&mv->blockPoolStruct.poolStruct,
- arena, PoolClassMFS(),
+ res = PoolInit(&mv->blockPoolStruct.poolStruct,
+ arena, PoolClassMFS(),
blockExtendBy, sizeof(MVBlockStruct));
if(res != ResOK)
return res;
spanExtendBy = sizeof(MVSpanStruct) * (maxSize/extendBy);
- res = PoolInit(&mv->spanPoolStruct.poolStruct,
+ res = PoolInit(&mv->spanPoolStruct.poolStruct,
arena, PoolClassMFS(),
spanExtendBy, sizeof(MVSpanStruct));
if(res != ResOK)
@@ -235,7 +235,7 @@ static Res MVInit(Pool pool, va_list arg)
mv->avgSize = avgSize;
mv->maxSize = maxSize;
RingInit(&mv->spans);
-
+
mv->space = 0;
mv->lost = 0;
@@ -403,7 +403,7 @@ static Res MVSpanFree(MVSpan span, Addr base, Addr limit, Pool blockPool)
AVER(block->next != NULL); /* should at least be a sentinel */
freeAreaSize = AddrOffset(base, block->next->base);
block->limit = base;
- } else {
+ } else {
/* cases 2, 7, and 8: making a new fragment */
Res res;
MVBlock new;
@@ -608,7 +608,7 @@ static void MVFree(Pool pool, Addr old, Size size)
mv->lost += size;
else
mv->space += size;
-
+
/* free space should be less than total space */
AVER(span->space <= SpanInsideSentinels(span));
if(span->space == SpanSize(span)) { /* the whole span is free */
@@ -664,7 +664,7 @@ static Res MVDescribe(Pool pool, mps_lib_FILE *stream)
" maxSize $W\n", (WriteFW)mv->maxSize,
" space $P\n", (WriteFP)mv->space,
NULL);
- if(res != ResOK) return res;
+ if(res != ResOK) return res;
res = WriteF(stream, " Spans\n", NULL);
if(res != ResOK) return res;
@@ -687,7 +687,7 @@ static Res MVDescribe(Pool pool, mps_lib_FILE *stream)
res = WriteF(stream, "$W\n", (WriteFW)span->largest, NULL);
else
res = WriteF(stream, "unknown\n", NULL);
-
+
if(res != ResOK) return res;
}
@@ -779,7 +779,7 @@ DEFINE_POOL_CLASS(MVDebugPoolClass, this)
}
-/* class functions
+/* class functions
*
* Note this is an MPS interface extension
*/
diff --git a/mps/code/poolmv2.c b/mps/code/poolmv2.c
index b8254ae3040..8f92f233ac1 100644
--- a/mps/code/poolmv2.c
+++ b/mps/code/poolmv2.c
@@ -57,7 +57,7 @@ static SegPref MVTSegPref(MVT mvt);
/* Types */
-typedef struct MVTStruct
+typedef struct MVTStruct
{
PoolStruct poolStruct;
CBSStruct cbsStruct; /* The coalescing block structure */
@@ -88,7 +88,7 @@ typedef struct MVTStruct
Size allocated; /* bytes allocated to mutator */
Size available; /* bytes available for allocation */
Size unavailable; /* bytes lost to fragmentation */
-
+
/* pool meters*/
METER_DECL(segAllocs);
METER_DECL(segFrees);
@@ -127,7 +127,7 @@ typedef struct MVTStruct
METER_DECL(exceptions);
METER_DECL(exceptionSplinters);
METER_DECL(exceptionReturns);
-
+
Sig sig;
} MVTStruct;
@@ -168,7 +168,7 @@ static ABQ MVTABQ(MVT mvt)
}
-static CBS MVTCBS(MVT mvt)
+static CBS MVTCBS(MVT mvt)
{
return &mvt->cbsStruct;
}
@@ -207,7 +207,7 @@ static Res MVTInit(Pool pool, va_list arg)
/* can't AVERT mvt, yet */
arena = PoolArena(pool);
AVERT(Arena, arena);
-
+
/* --- Should there be a ResBADARG ? */
minSize = va_arg(arg, Size);
unless (minSize > 0)
@@ -240,7 +240,7 @@ static Res MVTInit(Pool pool, va_list arg)
NULL, NULL, reuseSize, MPS_PF_ALIGN, TRUE, FALSE);
if (res != ResOK)
goto failCBS;
-
+
res = ABQInit(arena, MVTABQ(mvt), (void *)mvt, abqDepth);
if (res != ResOK)
goto failABQ;
@@ -264,14 +264,14 @@ static Res MVTInit(Pool pool, va_list arg)
mvt->splinterSeg = NULL;
mvt->splinterBase = (Addr)0;
mvt->splinterLimit = (Addr)0;
-
+
/* accounting */
mvt->size = 0;
mvt->allocated = 0;
mvt->available = 0;
mvt->availLimit = 0;
mvt->unavailable = 0;
-
+
/* meters*/
METER_INIT(mvt->segAllocs, "segment allocations", (void *)mvt);
METER_INIT(mvt->segFrees, "segment frees", (void *)mvt);
@@ -370,7 +370,7 @@ static void MVTFinish(Pool pool)
Arena arena;
Ring ring;
Ring node, nextNode;
-
+
AVERT(Pool, pool);
mvt = Pool2MVT(pool);
AVERT(MVT, mvt);
@@ -457,7 +457,7 @@ static Res MVTBufferFill(Addr *baseReturn, Addr *limitReturn,
goto done;
}
}
-
+
/* Attempt to retrieve a free block from the ABQ */
ABQRefillIfNecessary(mvt, minSize);
res = ABQPeek(MVTABQ(mvt), &block);
@@ -512,7 +512,7 @@ found:
}
goto done;
}
-
+
/* Attempt to request a block from the arena */
/* see design.mps.poolmvt:impl.c.free.merge.segment */
res = MVTSegAlloc(&seg, mvt, fillSize, pool, withReservoirPermit);
@@ -521,7 +521,7 @@ found:
limit = SegLimit(seg);
goto done;
}
-
+
/* Try contingency */
METER_ACC(mvt->emergencyContingencies, minSize);
res = MVTContingencySearch(&block, MVTCBS(mvt), minSize);
@@ -531,7 +531,7 @@ found:
METER_ACC(mvt->failures, minSize);
AVER(res != ResOK);
return res;
-
+
done:
*baseReturn = base;
*limitReturn = limit;
@@ -555,7 +555,7 @@ done:
*
* See design.mps.poolmvt:impl.c.ap.empty
*/
-static void MVTBufferEmpty(Pool pool, Buffer buffer,
+static void MVTBufferEmpty(Pool pool, Buffer buffer,
Addr base, Addr limit)
{
MVT mvt;
@@ -624,7 +624,7 @@ static void MVTBufferEmpty(Pool pool, Buffer buffer,
* see design.poolmvt.impl.c.free
*/
static void MVTFree(Pool pool, Addr base, Size size)
-{
+{
MVT mvt;
Addr limit;
@@ -647,7 +647,7 @@ static void MVTFree(Pool pool, Addr base, Size size)
METER_ACC(mvt->poolAvailable, mvt->available);
METER_ACC(mvt->poolAllocated, mvt->allocated);
METER_ACC(mvt->poolSize, mvt->size);
-
+
/* design.mps.poolmvt:arch.ap.no-fit.oversize.policy */
/* Return exceptional blocks directly to arena */
if (size > mvt->fillSize) {
@@ -669,7 +669,7 @@ static void MVTFree(Pool pool, Addr base, Size size)
MVTSegFree(mvt, seg);
return;
}
-
+
{
Res res = CBSInsert(MVTCBS(mvt), base, limit);
AVER(res == ResOK);
@@ -781,7 +781,7 @@ static Res MVTDescribe(Pool pool, mps_lib_FILE *stream)
if (res != ResOK) return res;
res = METER_WRITE(mvt->exceptionReturns, stream);
if (res != ResOK) return res;
-
+
res = WriteF(stream, "}\n", NULL);
return res;
}
@@ -826,7 +826,7 @@ size_t mps_mvt_size(mps_pool_t mps_pool)
AVERT(MVT, mvt);
return (size_t)mvt->size;
-}
+}
/* mps_mvt_free_size -- number of bytes comitted to the pool that are
@@ -853,7 +853,7 @@ size_t mps_mvt_free_size(mps_pool_t mps_pool)
/* MVTSegAlloc -- encapsulates SegAlloc with associated accounting and
* metering
*/
-static Res MVTSegAlloc(Seg *segReturn, MVT mvt, Size size,
+static Res MVTSegAlloc(Seg *segReturn, MVT mvt, Size size,
Pool pool, Bool withReservoirPermit)
{
Res res = SegAlloc(segReturn, GCSegClassGet(),
@@ -861,7 +861,7 @@ static Res MVTSegAlloc(Seg *segReturn, MVT mvt, Size size,
if (res == ResOK) {
Size segSize = SegSize(*segReturn);
-
+
/* see design.mps.poolmvt:arch.fragmentation.internal */
AVER(segSize >= mvt->fillSize);
mvt->size += segSize;
@@ -872,12 +872,12 @@ static Res MVTSegAlloc(Seg *segReturn, MVT mvt, Size size,
}
return res;
}
-
+
/* MVTSegFree -- encapsulates SegFree with associated accounting and
* metering
*/
-static void MVTSegFree(MVT mvt, Seg seg)
+static void MVTSegFree(MVT mvt, Seg seg)
{
Size size = SegSize(seg);
@@ -893,18 +893,18 @@ static void MVTSegFree(MVT mvt, Seg seg)
/* MVTReturnBlockSegs -- return (interior) segments of a block to the
* arena
*/
-static Bool MVTReturnBlockSegs(MVT mvt, CBSBlock block, Arena arena)
+static Bool MVTReturnBlockSegs(MVT mvt, CBSBlock block, Arena arena)
{
Addr base, limit;
Bool success = FALSE;
-
+
base = CBSBlockBase(block);
limit = CBSBlockLimit(block);
while (base < limit) {
Seg seg;
Addr segBase, segLimit;
-
+
{
Bool b = SegOfAddr(&seg, arena, base);
AVER(b);
@@ -929,11 +929,11 @@ static Bool MVTReturnBlockSegs(MVT mvt, CBSBlock block, Arena arena)
/* MVTNoteNew -- callback invoked when a block on the CBS >= reuseSize
*/
-static void MVTNoteNew(CBS cbs, CBSBlock block, Size oldSize, Size newSize)
+static void MVTNoteNew(CBS cbs, CBSBlock block, Size oldSize, Size newSize)
{
Res res;
MVT mvt;
-
+
AVERT(CBS, cbs);
mvt = CBSMVT(cbs);
AVERT(MVT, mvt);
@@ -974,7 +974,7 @@ static void MVTNoteDelete(CBS cbs, CBSBlock block, Size oldSize, Size newSize)
AVER(CBSBlockSize(block) < CBSMVT(cbs)->reuseSize);
UNUSED(oldSize);
UNUSED(newSize);
-
+
res = ABQDelete(MVTABQ(CBSMVT(cbs)), block);
AVER(res == ResOK || CBSMVT(cbs)->abqOverflow);
UNUSED(res); /* impl.c.mpm.check.unused */
@@ -984,7 +984,7 @@ static void MVTNoteDelete(CBS cbs, CBSBlock block, Size oldSize, Size newSize)
/* ABQRefillIfNecessary -- refill the ABQ from the CBS if it had
* overflown and is now empty
*/
-static void ABQRefillIfNecessary(MVT mvt, Size size)
+static void ABQRefillIfNecessary(MVT mvt, Size size)
{
AVERT(MVT, mvt);
AVER(size > 0);
@@ -1004,7 +1004,7 @@ static Bool ABQRefillCallback(CBS cbs, CBSBlock block, void *closureP)
{
Res res;
MVT mvt;
-
+
AVERT(CBS, cbs);
mvt = CBSMVT(cbs);
AVERT(MVT, mvt);
@@ -1028,12 +1028,12 @@ static Bool ABQRefillCallback(CBS cbs, CBSBlock block, void *closureP)
return TRUE;
}
-
+
/* Closure for MVTContingencySearch */
typedef struct MVTContigencyStruct *MVTContigency;
-typedef struct MVTContigencyStruct
+typedef struct MVTContigencyStruct
{
CBSBlock blockReturn;
Arena arena;
@@ -1055,7 +1055,7 @@ static Res MVTContingencySearch(CBSBlock *blockReturn, CBS cbs, Size min)
cls.min = min;
cls.steps = 0;
cls.hardSteps = 0;
-
+
CBSIterate(cbs, &MVTContingencyCallback, (void *)&cls);
if (cls.blockReturn != NULL) {
AVER(CBSBlockSize(cls.blockReturn) >= min);
@@ -1066,7 +1066,7 @@ static Res MVTContingencySearch(CBSBlock *blockReturn, CBS cbs, Size min)
*blockReturn = cls.blockReturn;
return ResOK;
}
-
+
return ResFAIL;
}
@@ -1078,14 +1078,14 @@ static Bool MVTContingencyCallback(CBS cbs, CBSBlock block, void *closureP)
{
MVTContigency cl;
Size size;
-
+
AVERT(CBS, cbs);
AVERT(CBSBlock, block);
AVER(closureP != NULL);
cl = (MVTContigency)closureP;
size = CBSBlockSize(block);
-
+
cl->steps++;
if (size < cl->min)
return TRUE;
@@ -1095,14 +1095,14 @@ static Bool MVTContingencyCallback(CBS cbs, CBSBlock block, void *closureP)
cl->blockReturn = block;
return FALSE;
}
-
+
/* do it the hard way */
cl->hardSteps++;
if (MVTCheckFit(block, cl->min, cl->arena)) {
cl->blockReturn = block;
return FALSE;
}
-
+
/* keep looking */
return TRUE;
}
diff --git a/mps/code/poolmv2.h b/mps/code/poolmv2.h
index 757285691ff..488f410cce9 100644
--- a/mps/code/poolmv2.h
+++ b/mps/code/poolmv2.h
@@ -2,9 +2,9 @@
*
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
- *
+ *
* .purpose: The implementation of the new manual-variable pool class
- *
+ *
* .design: See design.mps.poolmv2
*/
diff --git a/mps/code/poolmvff.c b/mps/code/poolmvff.c
index 072ab5b8891..5706d2db7dd 100644
--- a/mps/code/poolmvff.c
+++ b/mps/code/poolmvff.c
@@ -1,5 +1,5 @@
/* impl.c.poolmvff: First Fit Manual Variable Pool
- *
+ *
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
*
@@ -7,7 +7,7 @@
* variable size where address-ordered first fit is an appropriate
* policy. Provision is made to allocate in reverse. This pool
* can allocate across segment boundaries.
- *
+ *
* .design: design.mps.poolmvff
*
*
@@ -75,7 +75,7 @@ typedef MVFFDebugStruct *MVFFDebug;
#define MVFFDebugPoolMVFF(mvffd) (&((mvffd)->mvffStruct))
-/* MVFFAddToFreeList -- Add given range to free list
+/* MVFFAddToFreeList -- Add given range to free list
*
* Updates MVFF counters for additional free space. Returns maximally
* coalesced range containing given range. Does not attempt to free
@@ -109,7 +109,7 @@ static void MVFFAddToFreeList(Addr *baseIO, Addr *limitIO, MVFF mvff) {
* It is not combined with MVFFAddToFreeList because the latter
* is also called when new segments are added under MVFFAlloc.
*/
-static void MVFFFreeSegs(MVFF mvff, Addr base, Addr limit)
+static void MVFFFreeSegs(MVFF mvff, Addr base, Addr limit)
{
Seg seg;
Arena arena;
@@ -143,13 +143,13 @@ static void MVFFFreeSegs(MVFF mvff, Addr base, Addr limit)
mvff->total -= AddrOffset(segBase, segLimit);
SegFree(seg);
}
-
+
/* Avoid calling SegNext if the next segment would fail */
/* the loop test, mainly because there might not be a */
/* next segment. */
if (segLimit == limit) /* segment ends at end of range */
break;
-
+
b = SegNext(&seg, arena, segBase);
AVER(b);
segBase = SegBase(seg);
@@ -166,7 +166,7 @@ static void MVFFFreeSegs(MVFF mvff, Addr base, Addr limit)
* withReservoirPermit flag) of at least the specified size. The
* specified size should be pool-aligned. Adds it to the free list.
*/
-static Res MVFFAddSeg(Seg *segReturn,
+static Res MVFFAddSeg(Seg *segReturn,
MVFF mvff, Size size, Bool withReservoirPermit)
{
Pool pool;
@@ -202,7 +202,7 @@ static Res MVFFAddSeg(Seg *segReturn,
/* try again for a seg just large enough for object */
/* see design.mps.poolmvff.design.seg-fail */
segSize = SizeAlignUp(size, align);
- res = SegAlloc(&seg, SegClassGet(), mvff->segPref, segSize, pool,
+ res = SegAlloc(&seg, SegClassGet(), mvff->segPref, segSize, pool,
withReservoirPermit);
if (res != ResOK) {
return res;
@@ -282,7 +282,7 @@ static Res MVFFAlloc(Addr *aReturn, Pool pool, Size size,
Seg seg;
res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit);
- if (res != ResOK)
+ if (res != ResOK)
return res;
foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size);
@@ -365,7 +365,7 @@ static Res MVFFBufferFill(Addr *baseReturn, Addr *limitReturn,
}
if (!foundBlock) {
res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit);
- if (res != ResOK)
+ if (res != ResOK)
return res;
foundBlock = CBSFindLargest(&base, &limit, CBSOfMVFF(mvff),
CBSFindDeleteENTIRE);
@@ -375,7 +375,7 @@ static Res MVFFBufferFill(Addr *baseReturn, Addr *limitReturn,
AVER(AddrOffset(base, limit) >= size);
mvff->free -= AddrOffset(base, limit);
- *baseReturn = base;
+ *baseReturn = base;
*limitReturn = limit;
return ResOK;
}
@@ -383,7 +383,7 @@ static Res MVFFBufferFill(Addr *baseReturn, Addr *limitReturn,
/* MVFFBufferEmpty -- return unused portion of this buffer */
-static void MVFFBufferEmpty(Pool pool, Buffer buffer,
+static void MVFFBufferEmpty(Pool pool, Buffer buffer,
Addr base, Addr limit)
{
MVFF mvff;
@@ -421,7 +421,7 @@ static Res MVFFInit(Pool pool, va_list arg)
/* .arg: class-specific additional arguments; see */
/* design.mps.poolmvff.method.init */
- /* .arg.check: we do the same checks here and in MVFFCheck */
+ /* .arg.check: we do the same checks here and in MVFFCheck */
/* except for arenaHigh, which is stored only in the segPref. */
extendBy = va_arg(arg, Size);
avgSize = va_arg(arg, Size);
@@ -452,7 +452,7 @@ static Res MVFFInit(Pool pool, va_list arg)
res = ControlAlloc(&p, arena, sizeof(SegPrefStruct), FALSE);
if (res != ResOK)
return res;
-
+
mvff->segPref = (SegPref)p;
*mvff->segPref = *SegPrefDefault();
SegPrefExpress(mvff->segPref, arenaHigh ? SegPrefHigh : SegPrefLow, NULL);
@@ -463,7 +463,7 @@ static Res MVFFInit(Pool pool, va_list arg)
mvff->total = 0;
mvff->free = 0;
- CBSInit(arena, CBSOfMVFF(mvff), (void *)mvff, NULL, NULL, NULL, NULL,
+ CBSInit(arena, CBSOfMVFF(mvff), (void *)mvff, NULL, NULL, NULL, NULL,
mvff->extendBy, align, TRUE, TRUE);
mvff->sig = MVFFSig;
@@ -493,7 +493,7 @@ static void MVFFFinish(Pool pool)
AVER(SegPool(seg) == pool);
SegFree(seg);
}
-
+
/* Could maintain mvff->total here and check it falls to zero, */
/* but that would just make the function slow. If only we had */
/* a way to do operations only if AVERs are turned on. */
@@ -551,7 +551,7 @@ static Res MVFFDescribe(Pool pool, mps_lib_FILE *stream)
res = WriteF(stream, "}\n", NULL);
- return res;
+ return res;
}
@@ -615,7 +615,7 @@ size_t mps_mvff_free_size(mps_pool_t mps_pool)
AVERT(Pool, pool);
mvff = PoolPoolMVFF(pool);
AVERT(MVFF, mvff);
-
+
return (size_t)mvff->free;
}
@@ -632,7 +632,7 @@ size_t mps_mvff_size(mps_pool_t mps_pool)
AVERT(MVFF, mvff);
return (size_t)mvff->total;
-}
+}
/* MVFFCheck -- check the consistency of an MVFF structure */
@@ -648,7 +648,7 @@ static Bool MVFFCheck(MVFF mvff)
CHECKL(mvff->avgSize > 0); /* see .arg.check */
CHECKL(mvff->avgSize <= mvff->extendBy); /* see .arg.check */
CHECKL(mvff->total >= mvff->free);
- CHECKL(SizeIsAligned(mvff->free, PoolAlignment(MVFFPool(mvff))));
+ CHECKL(SizeIsAligned(mvff->free, PoolAlignment(MVFFPool(mvff))));
CHECKL(SizeIsAligned(mvff->total, ArenaAlign(PoolArena(MVFFPool(mvff)))));
CHECKD(CBS, CBSOfMVFF(mvff));
CHECKL(BoolCheck(mvff->slotHigh));
diff --git a/mps/code/pooln.c b/mps/code/pooln.c
index 5fbee580865..3252a5cbb9b 100644
--- a/mps/code/pooln.c
+++ b/mps/code/pooln.c
@@ -35,7 +35,7 @@ static Res NInit(Pool pool, va_list args)
PoolN poolN = PoolPoolN(pool);
UNUSED(args);
-
+
/* Initialize pool-specific structures. */
AVERT(PoolN, poolN);
@@ -119,7 +119,7 @@ static Res NBufferFill(Addr *baseReturn, Addr *limitReturn,
/* NBufferEmpty -- buffer empty method for class N */
-static void NBufferEmpty(Pool pool, Buffer buffer,
+static void NBufferEmpty(Pool pool, Buffer buffer,
Addr init, Addr limit)
{
AVERT(Pool, pool);
@@ -159,7 +159,7 @@ static Res NWhiten(Pool pool, Trace trace, Seg seg)
AVERT(Trace, trace);
AVERT(Seg, seg);
-
+
NOTREACHED; /* pool doesn't have any actions */
return ResUNIMPL;
diff --git a/mps/code/poolsnc.c b/mps/code/poolsnc.c
index 824cb28baf8..a954639f07e 100644
--- a/mps/code/poolsnc.c
+++ b/mps/code/poolsnc.c
@@ -8,9 +8,9 @@
* .design: design.mps.poolsnc
*
* LIGHTWEIGHT FRAMES
- *
+ *
* .lw-frame-state: The pool uses lightweight frames as its only
- * type of allocation frame. The lightweight frame state is set to
+ * type of allocation frame. The lightweight frame state is set to
* Valid whenever a buffer has a segment and Disabled otherwise.
* See design.mps.alloc-frame.lw-frame.states.
*
@@ -68,7 +68,7 @@ static void sncPopPartialSegChain(SNC snc, Buffer buf, Seg upTo);
* This subclass of RankBuf holds a segment chain.
*/
-#define SNCBufSig ((Sig)0x51954CBF) /* SIGnature SNC BuFfer */
+#define SNCBufSig ((Sig)0x51954CBF) /* SIGnature SNC BuFfer */
typedef struct SNCBufStruct *SNCBuf;
@@ -228,7 +228,7 @@ static Bool SNCSegCheck(SNCSeg sncseg)
/* sncSegInit -- Init method for SNC segments */
-static Res sncSegInit(Seg seg, Pool pool, Addr base, Size size,
+static Res sncSegInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, va_list args)
{
SegClass super;
@@ -328,7 +328,7 @@ static void sncPopPartialSegChain(SNC snc, Buffer buf, Seg upTo)
/* sncFindFreeSeg
*
- * attempts to find and detach a large enough segment from the
+ * attempts to find and detach a large enough segment from the
* freelist. returns TRUE on success.
*/
static Bool sncFindFreeSeg(Seg *segReturn, SNC snc, Size size)
@@ -437,7 +437,7 @@ static Res SNCBufferFill(Addr *baseReturn, Addr *limitReturn,
/* No free seg, so create a new one */
arena = PoolArena(pool);
asize = SizeAlignUp(size, ArenaAlign(arena));
- res = SegAlloc(&seg, SNCSegClassGet(), &snc->segPrefStruct,
+ res = SegAlloc(&seg, SNCSegClassGet(), &snc->segPrefStruct,
asize, pool, withReservoirPermit);
if (res != ResOK)
return res;
@@ -460,7 +460,7 @@ found:
}
-static void SNCBufferEmpty(Pool pool, Buffer buffer,
+static void SNCBufferEmpty(Pool pool, Buffer buffer,
Addr init, Addr limit)
{
SNC snc;
@@ -477,7 +477,7 @@ static void SNCBufferEmpty(Pool pool, Buffer buffer,
AVERT(SNC, snc);
AVER(BufferFrameState(buffer) == BufferFrameVALID);
/* .lw-frame-state */
- BufferFrameSetState(buffer, BufferFrameDISABLED);
+ BufferFrameSetState(buffer, BufferFrameDISABLED);
arena = BufferArena(buffer);
@@ -507,7 +507,7 @@ static Res SNCScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
format = pool->format;
base = SegBase(seg);
-
+
/* If the segment is buffered, only walk as far as the end */
/* of the initialized objects. */
if (SegBuffer(seg) != NULL) {
@@ -515,7 +515,7 @@ static Res SNCScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg)
} else {
limit = SegLimit(seg);
}
-
+
if (base < limit) {
res = (*format->scan)(ss, base, limit);
if (res != ResOK) {
@@ -583,7 +583,7 @@ static void SNCFramePopPending(Pool pool, Buffer buf, AllocFrame frame)
AVERT(SNC, snc);
AVER(BufferFrameState(buf) == BufferFrameVALID);
-
+
if (frame == NULL) {
/* corresponds to a pop to bottom of stack. .lw-frame-null */
BufferDetach(buf, pool);
diff --git a/mps/code/prmci3li.c b/mps/code/prmci3li.c
index e21f9bb99aa..04e195bbe34 100644
--- a/mps/code/prmci3li.c
+++ b/mps/code/prmci3li.c
@@ -4,12 +4,12 @@
* Copyright (c) 2001 Ravenbrook Limited.
*
* .purpose: This module implements the part of the protection module
- * that decodes the MutatorFaultContext.
+ * that decodes the MutatorFaultContext.
*
*
* SOURCES
*
- * .source.i486: Intel486 Microprocessor Family Programmer's
+ * .source.i486: Intel486 Microprocessor Family Programmer's
* Reference Manual
*
* .source.linux.kernel: Linux kernel source files.
@@ -58,8 +58,8 @@ MRef Prmci3AddressHoldingReg(MutatorFaultContext context, unsigned int regnum)
/* Prmci3DecodeFaultContext -- decode fault to find faulting address and IP */
-void Prmci3DecodeFaultContext(MRef *faultmemReturn,
- Byte **insvecReturn,
+void Prmci3DecodeFaultContext(MRef *faultmemReturn,
+ Byte **insvecReturn,
MutatorFaultContext context)
{
struct sigcontext *scp;
diff --git a/mps/code/prmci3w3.c b/mps/code/prmci3w3.c
index 4d467c85107..66b512aaba5 100644
--- a/mps/code/prmci3w3.c
+++ b/mps/code/prmci3w3.c
@@ -6,11 +6,11 @@
* PURPOSE
*
* .purpose: This module implements the part of the protection module
- * that decodes the MutatorFaultContext.
+ * that decodes the MutatorFaultContext.
*
* SOURCES
*
- * .source.i486: Intel486 Microprocessor Family Programmer's
+ * .source.i486: Intel486 Microprocessor Family Programmer's
* Reference Manual (book.intel92).
*
* ASSUMPTIONS
@@ -54,7 +54,7 @@ MRef Prmci3AddressHoldingReg(MutatorFaultContext context, unsigned int regnum)
/* Prmci3DecodeFaultContext -- decode fault context */
-void Prmci3DecodeFaultContext(MRef *faultmemReturn, Byte **insvecReturn,
+void Prmci3DecodeFaultContext(MRef *faultmemReturn, Byte **insvecReturn,
MutatorFaultContext context)
{
LPEXCEPTION_RECORD er;
diff --git a/mps/code/proti3.c b/mps/code/proti3.c
index f698364260c..2a026e1514d 100644
--- a/mps/code/proti3.c
+++ b/mps/code/proti3.c
@@ -8,7 +8,7 @@
* functions.
*
* .purpose: This module implements the part of the protection module
- * that implements the MutatorFaultContext type.
+ * that implements the MutatorFaultContext type.
*
* .requirements: Current requirements are for limited support only, for
* stepping the sorts of instructions that the Dylan compiler might
@@ -19,11 +19,11 @@
*
* SOURCES
*
- * .source.i486: Intel486 Microprocessor Family Programmer's
+ * .source.i486: Intel486 Microprocessor Family Programmer's
* Reference Manual
*
- * .source.dylan: Dylan table code implementation. Especially the
- * following HOPE units:
+ * .source.dylan: Dylan table code implementation. Especially the
+ * following HOPE units:
* D-lib-dylan!table.dylan (class , slot entry-element)
* D-dfmc-harp-cg!harp-primitives.dylan (method op--repeated-slot-element)
* D-harp-pentium-harp!moves.dylan (pentium-template ld-index)
@@ -31,18 +31,18 @@
*
* ASSUMPTIONS
*
- * .assume.null: It's always safe for Prot*StepInstruction to return
- * ResUNIMPL. A null implementation of this module would be overly
+ * .assume.null: It's always safe for Prot*StepInstruction to return
+ * ResUNIMPL. A null implementation of this module would be overly
* conservative but otherwise correct.
*
* .assume.want: The Dylan implementation is likely to access a
* weak table vector using either MOV r/m32,r32 or MOV r32,r/m32
- * instructions, where the r/m32 operand will be of one of the forms
+ * instructions, where the r/m32 operand will be of one of the forms
* disp8[reg], disp8[reg1][reg2], disp8[reg1][reg2*4] (see .source.dylan
* and .source.i486)
*
* .assume.i3: Assume the following about the i386 environment:
- * Steppable instructions (.assume.want) use the CS, DS & SS
+ * Steppable instructions (.assume.want) use the CS, DS & SS
* segment registers only (see .source.i486 Table 2-3).
* The procesor runs in 32 bit mode.
* The CS, DS and SS segment registers all describe identical 32-
@@ -57,9 +57,9 @@ SRCID(proti3, "$Id$");
/* DecodeCB -- Decode an Intel x86 control byte into Hi, Medium & Low fields */
-static void DecodeCB(unsigned int *hReturn,
- unsigned int *mReturn,
- unsigned int *lReturn,
+static void DecodeCB(unsigned int *hReturn,
+ unsigned int *mReturn,
+ unsigned int *lReturn,
Byte op)
{
/* see .source.i486 Figure 26-2 */
@@ -74,9 +74,9 @@ static void DecodeCB(unsigned int *hReturn,
/* DecodeSIB -- Decode a Scale Index Base byte for an Intel x86 instruction */
-static void DecodeSIB(unsigned int *sReturn,
- unsigned int *iReturn,
- unsigned int *bReturn,
+static void DecodeSIB(unsigned int *sReturn,
+ unsigned int *iReturn,
+ unsigned int *bReturn,
Byte op)
{
DecodeCB(sReturn, iReturn, bReturn, op);
@@ -85,9 +85,9 @@ static void DecodeSIB(unsigned int *sReturn,
/* DecodeModRM -- Decode a ModR/M byte for an Intel x86 instruction */
-static void DecodeModRM(unsigned int *modReturn,
- unsigned int *rReturn,
- unsigned int *mReturn,
+static void DecodeModRM(unsigned int *modReturn,
+ unsigned int *rReturn,
+ unsigned int *mReturn,
Byte op)
{
DecodeCB(modReturn, rReturn, mReturn, op);
@@ -105,7 +105,7 @@ static Word RegValue(MutatorFaultContext context, unsigned int regnum)
}
-/* Return a byte element of an instruction vector as a
+/* Return a byte element of an instruction vector as a
* Word value, with sign extension
*/
static Word SignedInsElt(Byte insvec[], Count i)
@@ -120,14 +120,14 @@ static Word SignedInsElt(Byte insvec[], Count i)
/* If a MOV instruction is a sufficiently simple example of a
* move between a register and memory (in either direction),
* then find the register, the effective address and the size
- * of the instruction. The instruction is considered sufficiently
+ * of the instruction. The instruction is considered sufficiently
* simple if it uses a single byte displacement, a base register,
* and either no index or a (possibly scaled) register.
*/
-static Bool DecodeSimpleMov(unsigned int *regnumReturn,
- MRef *memReturn,
+static Bool DecodeSimpleMov(unsigned int *regnumReturn,
+ MRef *memReturn,
Size *inslenReturn,
- MutatorFaultContext context,
+ MutatorFaultContext context,
Byte insvec[])
{
unsigned int mod;
@@ -148,7 +148,7 @@ static Bool DecodeSimpleMov(unsigned int *regnumReturn,
unsigned int b;
DecodeSIB(&s, &i, &b, insvec[2]); /* .source.i486 Table 26-3 */
- if (4 == i)
+ if (4 == i)
return FALSE; /* degenerate SIB form - unused by Dylan compiler */
disp = SignedInsElt(insvec, 3);
base = RegValue(context, b);
@@ -170,8 +170,8 @@ static Bool DecodeSimpleMov(unsigned int *regnumReturn,
}
-static Bool IsSimpleMov(Size *inslenReturn,
- MRef *srcReturn,
+static Bool IsSimpleMov(Size *inslenReturn,
+ MRef *srcReturn,
MRef *destReturn,
MutatorFaultContext context)
{
@@ -200,7 +200,7 @@ static Bool IsSimpleMov(Size *inslenReturn,
*srcReturn = Prmci3AddressHoldingReg(context, regnum);
return TRUE;
}
- }
+ }
return FALSE;
}
diff --git a/mps/code/protlii3.c b/mps/code/protlii3.c
index 8e2bfa6dfb5..e85de34246b 100644
--- a/mps/code/protlii3.c
+++ b/mps/code/protlii3.c
@@ -5,7 +5,7 @@
*
* SOURCES
*
- * .source.i486: Intel486 Microprocessor Family Programmer's
+ * .source.i486: Intel486 Microprocessor Family Programmer's
* Reference Manual
*
* .source.linux.kernel: Linux kernel source files.
@@ -33,7 +33,7 @@ SRCID(protlii3, "$Id$");
/* Useful stuff that doesn't appear to be in any header files. */
-/* Interrupt number 14 is Page Fault. */
+/* Interrupt number 14 is Page Fault. */
#define TRAPNO_PAGE_FAULT 14
/* Bits in err field of sigcontext for interrupt 14 (page fault) */
@@ -76,7 +76,7 @@ typedef void (*__real_lii3_sighandler_t)(int, struct sigcontext);
* .sigh.addr: We assume that the OS decodes the address to something
* sensible
*/
-
+
static void sigHandle(int sig, struct sigcontext context) /* .sigh.args */
{
AVER(sig == SIGSEGV);
diff --git a/mps/code/proto1.c b/mps/code/proto1.c
index 5126ee2bafc..80470cade2d 100644
--- a/mps/code/proto1.c
+++ b/mps/code/proto1.c
@@ -90,7 +90,7 @@ static void sigHandle(int sig, siginfo_t *info, void *context)
/* @@ This is really weak.
* Need to implement rest of the contract of sigaction */
-
+
e = sigaction(SIGSEGV, &sigNext, &sa);
AVER(e == 0);
sigemptyset(&sigset);
diff --git a/mps/code/protocol.c b/mps/code/protocol.c
index 205f17c180b..ead6730c01a 100644
--- a/mps/code/protocol.c
+++ b/mps/code/protocol.c
@@ -62,7 +62,7 @@ Bool ProtocolIsSubclass(ProtocolClass sub, ProtocolClass super)
* This default method must be inherited by any subclass
* which does not perform a multiple inheritance.
*/
-static Bool ProtocolCoerceClass(ProtocolClass *coerceResult,
+static Bool ProtocolCoerceClass(ProtocolClass *coerceResult,
ProtocolClass proClass,
ProtocolClass super)
{
@@ -89,7 +89,7 @@ static Bool ProtocolCoerceClass(ProtocolClass *coerceResult,
* This default method must be inherited by any subclass
* which does not perform a multiple inheritance.
*/
-static Bool ProtocolCoerceInst(ProtocolInst *coerceResult,
+static Bool ProtocolCoerceInst(ProtocolInst *coerceResult,
ProtocolInst proInst,
ProtocolClass super)
{
@@ -113,7 +113,7 @@ static Bool ProtocolCoerceInst(ProtocolInst *coerceResult,
/* The class definition for the root of the hierarchy */
-DEFINE_CLASS(ProtocolClass, theClass)
+DEFINE_CLASS(ProtocolClass, theClass)
{
theClass->sig = ProtocolClassSig;
theClass->superclass = theClass;
diff --git a/mps/code/protocol.h b/mps/code/protocol.h
index d688cff6feb..44d3605703c 100644
--- a/mps/code/protocol.h
+++ b/mps/code/protocol.h
@@ -99,22 +99,22 @@ typedef struct ProtocolInstStruct *ProtocolInst;
/* ProtocolCoerceInstMethod -- coerce "pro" to an instance of "interface"
*
- * If "pro" is an instance of "interface", then returns TRUE
- * and sets coerceResult to point directly to the part of "pro"
- * which contains the slots for "interface"
+ * If "pro" is an instance of "interface", then returns TRUE
+ * and sets coerceResult to point directly to the part of "pro"
+ * which contains the slots for "interface"
*/
typedef Bool (*ProtocolCoerceInstMethod)(ProtocolInst *coerceResult,
- ProtocolInst pro,
+ ProtocolInst pro,
ProtocolClass interface);
/* ProtocolCoerceClassMethod -- coerce "proClass" to an "interface" class
*
* If "proClass" is a subclass of "interface", then returns TRUE
- * and sets coerceResult to point directly to the part of
+ * and sets coerceResult to point directly to the part of
* "proClass" which contains the slots for "interface".
*/
typedef Bool (*ProtocolCoerceClassMethod)(ProtocolClass *coerceResult,
- ProtocolClass proClass,
+ ProtocolClass proClass,
ProtocolClass interface);
@@ -135,7 +135,7 @@ typedef struct ProtocolInstStruct {
/* ProtocolClassGet -- Returns the root of the protocol class hierarchy
*
- * Function name conforms to standard conventions for
+ * Function name conforms to standard conventions for
* protocols.
*/
extern ProtocolClass ProtocolClassGet(void);
@@ -174,7 +174,7 @@ extern Bool ProtocolIsSubclass(ProtocolClass sub, ProtocolClass super);
/* SUPERCLASS - get the superclass object, given a class name
*
* Returns the superclass, with type ProtocolClass. Clients will
- * probably wish to cast this. See
+ * probably wish to cast this. See
* design.mps.protocol.int.static-superclass
*/
#define SUPERCLASS(className) \
diff --git a/mps/code/protso.c b/mps/code/protso.c
index 4778b9f2665..1628fde99f3 100644
--- a/mps/code/protso.c
+++ b/mps/code/protso.c
@@ -74,7 +74,7 @@ static struct sigaction sigNext;
* sensible
* .sigh.limit: We throw away the limit information.
*/
-
+
static void sigHandle(int sig, siginfo_t *info, void *context)
{
AVER(sig == SIGSEGV);
diff --git a/mps/code/protsu.c b/mps/code/protsu.c
index 41f1af0bf37..5e20b1c1dc8 100644
--- a/mps/code/protsu.c
+++ b/mps/code/protsu.c
@@ -154,7 +154,7 @@ void ProtSetup(void)
handler_t next;
/* ProtSetup is called exactly once, see design.mps.prot.if.setup */
- AVER(sigNext == NULL);
+ AVER(sigNext == NULL);
next = signal(SIGSEGV, sigHandle);
/* should always succeed as our parameters are valid */
diff --git a/mps/code/protw3.c b/mps/code/protw3.c
index cbdcea360bf..7bfe1b6784b 100644
--- a/mps/code/protw3.c
+++ b/mps/code/protw3.c
@@ -62,7 +62,7 @@ LONG ProtSEHfilter(LPEXCEPTION_POINTERS info)
if(er->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
return EXCEPTION_CONTINUE_SEARCH;
-
+
context.ep = info;
/* assert that the exception is continuable */
diff --git a/mps/code/qs.c b/mps/code/qs.c
index 053cf0462fb..f5cf4935050 100644
--- a/mps/code/qs.c
+++ b/mps/code/qs.c
@@ -330,7 +330,7 @@ static void *go(void *p, size_t s)
testlib_unused(p);
testlib_unused(s);
- die(mps_pool_create(&mpool, arena, mps_class_mv(),
+ die(mps_pool_create(&mpool, arena, mps_class_mv(),
(size_t)65536, sizeof(QSCellStruct) * 1000,
(size_t)65536),
"MVCreate");
diff --git a/mps/code/replay.c b/mps/code/replay.c
index a23fd97b2ad..4400ad684b8 100644
--- a/mps/code/replay.c
+++ b/mps/code/replay.c
@@ -81,12 +81,12 @@ static char *parseArgs(int argc, char *argv[])
{
char *name = "mpsio.log";
int i = 1;
-
+
if (argc >= 1)
prog = argv[0];
else
prog = "unknown";
-
+
while (i < argc) { /* consider argument i */
if (argv[i][0] == '-') { /* it's an option argument */
switch (argv[i][1]) {
@@ -94,9 +94,9 @@ static char *parseArgs(int argc, char *argv[])
++ i;
if (i == argc)
usageError();
- else
+ else
name = argv[i];
- break;
+ break;
case 'p': /* partial log */
partialLog = TRUE;
break;
diff --git a/mps/code/reserv.c b/mps/code/reserv.c
index 2564f1da8c1..72d52241aa9 100644
--- a/mps/code/reserv.c
+++ b/mps/code/reserv.c
@@ -23,7 +23,7 @@ SRCID(reserv, "$Id$");
*
* The reservoir maintains a linked list of tracts in arbitrary order.
* (see .improve.contiguous)
- *
+ *
* Tracts are chained using the TractP field. */
#define resTractNext(tract) ((Tract)TractP((tract)))
@@ -46,7 +46,7 @@ static Res ResPoolInit(Pool pool, va_list arg)
}
-/* ResPoolFinish -- Reservoir pool finish method
+/* ResPoolFinish -- Reservoir pool finish method
*
* .reservoir.finish: This might be called from ArenaFinish, so the
* arena cannot be checked at this time. In order to avoid the check,
@@ -136,8 +136,8 @@ static Bool reservoirIsConsistent(Reservoir reservoir)
}
-/* ReservoirEnsureFull
- *
+/* ReservoirEnsureFull
+ *
* Ensures that the reservoir is the right size, by topping it up with
* fresh memory from the arena if possible. */
@@ -155,7 +155,7 @@ Res ReservoirEnsureFull(Reservoir reservoir)
/* optimize the common case of a full reservoir */
if (reservoir->reservoirSize == limit)
- return ResOK;
+ return ResOK;
pool = &reservoir->poolStruct;
@@ -217,7 +217,7 @@ Res ReservoirWithdraw(Addr *baseReturn, Tract *baseTractReturn,
{
Pool respool;
Arena arena;
-
+
AVER(baseReturn != NULL);
AVER(baseTractReturn != NULL);
AVERT(Reservoir, reservoir);
@@ -233,7 +233,7 @@ Res ReservoirWithdraw(Addr *baseReturn, Tract *baseTractReturn,
/* See .improve.contiguous & change.dylan.jackdaw.160125 */
if (size != ArenaAlign(arena))
return ResMEMORY;
-
+
if (size <= reservoir->reservoirSize) {
/* Return the first tract */
Tract tract = reservoir->reserve;
@@ -250,7 +250,7 @@ Res ReservoirWithdraw(Addr *baseReturn, Tract *baseTractReturn,
return ResOK;
}
- AVER(reservoirIsConsistent(reservoir));
+ AVER(reservoirIsConsistent(reservoir));
return ResMEMORY; /* no suitable region in the reservoir */
}
@@ -282,7 +282,7 @@ void ReservoirDeposit(Reservoir reservoir, Addr base, Size size)
/* Reassign the tract to the reservoir pool */
TractFinish(tract);
TractInit(tract, respool, addr);
- reservoir->reservoirSize += alignment;
+ reservoir->reservoirSize += alignment;
resTractSetNext(tract, reservoir->reserve);
reservoir->reserve = tract;
} else {
@@ -303,7 +303,7 @@ static Count mutatorBufferCount(Globals arena)
{
Ring nodep, nextp;
Count count = 0;
-
+
/* Iterate over all pools, and count the mutator buffers in each */
RING_FOR(nodep, &arena->poolRing, nextp) {
Pool pool = RING_ELT(Pool, arenaRing, nodep);
@@ -352,7 +352,7 @@ void ReservoirSetLimit(Reservoir reservoir, Size size)
/* Shrink the reservoir */
reservoirShrink(reservoir, needed);
reservoir->reservoirLimit = needed;
- AVER(reservoirIsConsistent(reservoir));
+ AVER(reservoirIsConsistent(reservoir));
}
}
@@ -362,7 +362,7 @@ void ReservoirSetLimit(Reservoir reservoir, Size size)
Size ReservoirLimit(Reservoir reservoir)
{
AVERT(Reservoir, reservoir);
- AVER(reservoirIsConsistent(reservoir));
+ AVER(reservoirIsConsistent(reservoir));
return reservoir->reservoirLimit;
}
@@ -389,7 +389,7 @@ Res ReservoirInit(Reservoir reservoir, Arena arena)
reservoir->reserve = NULL;
reservoir->sig = ReservoirSig;
/* initialize the reservoir pool, design.mps.reservoir */
- res = PoolInit(&reservoir->poolStruct,
+ res = PoolInit(&reservoir->poolStruct,
arena, EnsureReservoirPoolClass());
if (res == ResOK) {
AVERT(Reservoir, reservoir);
diff --git a/mps/code/ring.c b/mps/code/ring.c
index 8cb191d3baf..4bd6e924c21 100644
--- a/mps/code/ring.c
+++ b/mps/code/ring.c
@@ -22,7 +22,7 @@ SRCID(ring, "$Id$");
/* RingCheck, RingCheckSingle -- check the validity of a ring node
*
- * RingCheck performs a consistency check on the ring node.
+ * RingCheck performs a consistency check on the ring node.
* RingCheckSingle performs the same check, but also checks that
* the ring node is a singleton (design.mps.ring.def.singleton).
*/
@@ -65,7 +65,7 @@ void (RingInit)(Ring ring)
/* RingFinish -- finish a ring node
*/
-
+
void (RingFinish)(Ring ring)
{
RingFinish(ring); /* impl.h.mpm.ring.finish */
diff --git a/mps/code/root.c b/mps/code/root.c
index 7992a44edd7..b39154fe437 100644
--- a/mps/code/root.c
+++ b/mps/code/root.c
@@ -96,7 +96,7 @@ Bool RootModeCheck(RootMode mode)
Bool RootCheck(Root root)
{
CHECKS(Root, root);
- CHECKU(Arena, root->arena);
+ CHECKU(Arena, root->arena);
CHECKL(root->serial < ArenaGlobals(root->arena)->rootSerial);
CHECKL(RingCheck(&root->arenaRing));
CHECKL(RankCheck(root->rank));
@@ -152,8 +152,8 @@ Bool RootCheck(Root root)
/* rootCreate, RootCreateTable, RootCreateReg, RootCreateFmt, RootCreateFun
*
* RootCreate* set up the appropriate union member, and call the generic
- * create function to do the actual creation
- *
+ * create function to do the actual creation
+ *
* See design.mps.root.init for initial value. */
static Res rootCreate(Root *rootReturn, Arena arena,
@@ -263,7 +263,7 @@ Res RootCreateTable(Root *rootReturn, Arena arena,
AVERT(Arena, arena);
AVER(RankCheck(rank));
AVER(base != 0);
- AVER(base < limit);
+ AVER(base < limit);
theUnion.table.base = base;
theUnion.table.limit = limit;
@@ -418,7 +418,7 @@ void RootGrey(Root root, Trace trace)
{
AVERT(Root, root);
AVERT(Trace, trace);
-
+
root->grey = TraceSetAdd(root->grey, trace);
}
@@ -589,7 +589,7 @@ Res RootDescribe(Root root, mps_lib_FILE *stream)
res = WriteF(stream,
"Root $P ($U) {\n", (WriteFP)root, (WriteFU)root->serial,
- " arena $P ($U)\n", (WriteFP)root->arena,
+ " arena $P ($U)\n", (WriteFP)root->arena,
(WriteFU)root->arena->serial,
" rank $U\n", (WriteFU)root->rank,
" grey $B\n", (WriteFB)root->grey,
@@ -639,7 +639,7 @@ Res RootDescribe(Root root, mps_lib_FILE *stream)
NULL);
if (res != ResOK) return res;
break;
-
+
default:
NOTREACHED;
}
diff --git a/mps/code/sacss.c b/mps/code/sacss.c
index a78546b1592..dda4199bd54 100644
--- a/mps/code/sacss.c
+++ b/mps/code/sacss.c
@@ -85,7 +85,7 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
int j = rnd()%(testSetSIZE-i);
void *tp;
size_t ts;
-
+
tp = ps[j]; ts = ss[j];
ps[j] = ps[i]; ss[j] = ss[i];
ps[i] = tp; ss[i] = ts;
@@ -114,11 +114,11 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena,
case 1: {
res = mps_sac_alloc((mps_addr_t *)&ps[i], sac, ss[i], FALSE);
} break;
- }
+ }
if (res != MPS_RES_OK) return res;
}
}
-
+
mps_sac_destroy(sac);
mps_pool_destroy(pool);
diff --git a/mps/code/seg.c b/mps/code/seg.c
index 9f3b35b3ac4..95e8110bb6d 100644
--- a/mps/code/seg.c
+++ b/mps/code/seg.c
@@ -15,7 +15,7 @@
* .purpose.class.seg-gc: Class GCSeg is a concrete class support all
* all current GC features, and providing full backwards compatibility
* with "old-style" segments. It may be subclassed by clients of the
- * module.
+ * module.
*
* TRANSGRESSIONS
*
@@ -54,7 +54,7 @@ static Res SegInit(Seg seg, Pool pool, Addr base, Size size,
/* SegAlloc -- allocate a segment from the arena */
-Res SegAlloc(Seg *segReturn, SegClass class, SegPref pref,
+Res SegAlloc(Seg *segReturn, SegClass class, SegPref pref,
Size size, Pool pool, Bool withReservoirPermit, ...)
{
Res res;
@@ -189,7 +189,7 @@ static Res SegInit(Seg seg, Pool pool, Addr base, Size size,
res = class->init(seg, pool, base, size, withReservoirPermit, args);
if (res != ResOK)
goto failInit;
-
+
AVERT(Seg, seg);
RingAppend(&pool->segRing, SegPoolRing(seg));
return ResOK;
@@ -252,7 +252,7 @@ static void SegFinish(Seg seg)
/* fund are not protected) */
AVER(seg->sm == AccessSetEMPTY);
AVER(seg->pm == AccessSetEMPTY);
-
+
}
@@ -314,7 +314,7 @@ void SegSetSummary(Seg seg, RefSet summary)
void SegSetRankAndSummary(Seg seg, RankSet rankSet, RefSet summary)
{
- AVERT(Seg, seg);
+ AVERT(Seg, seg);
AVER(RankSetCheck(rankSet));
#ifdef PROTECTION_NONE
@@ -361,7 +361,7 @@ Res SegDescribe(Seg seg, mps_lib_FILE *stream)
res = WriteF(stream,
"Segment $P [$A,$A) {\n", (WriteFP)seg,
(WriteFA)SegBase(seg), (WriteFA)SegLimit(seg),
- " class $P (\"$S\")\n",
+ " class $P (\"$S\")\n",
(WriteFP)seg->class, seg->class->name,
" pool $P ($U)\n",
(WriteFP)pool, (WriteFU)pool->serial,
@@ -377,8 +377,8 @@ Res SegDescribe(Seg seg, mps_lib_FILE *stream)
}
-/* .seg.critical: These seg functions are low-level and used
- * through-out. They are therefore on the critical path and their
+/* .seg.critical: These seg functions are low-level and used
+ * through-out. They are therefore on the critical path and their
* AVERs are so-marked.
*/
@@ -407,7 +407,7 @@ Size SegSize(Seg seg)
AVERT_CRITICAL(Seg, seg);
return AddrOffset(SegBase(seg), SegLimit(seg));
}
-
+
/* SegOfAddr -- return the seg the given address is in, if any */
@@ -441,9 +441,9 @@ Bool SegFirst(Seg *segReturn, Arena arena)
if (TRACT_SEG(&seg, tract)) {
*segReturn = seg;
return TRUE;
- }
+ }
} while (TractNext(&tract, arena, TractBase(tract)));
- }
+ }
return FALSE;
}
@@ -483,7 +483,7 @@ Bool SegNext(Seg *segReturn, Arena arena, Addr addr)
} else {
base = TractBase(tract);
}
- }
+ }
return FALSE;
}
@@ -520,7 +520,7 @@ Res SegMerge(Seg *mergedSegReturn, Seg segLo, Seg segHi,
/* Invoke class-specific methods to do the merge */
va_start(args, withReservoirPermit);
- res = class->merge(segLo, segHi, base, mid, limit,
+ res = class->merge(segLo, segHi, base, mid, limit,
withReservoirPermit, args);
va_end(args);
if (ResOK != res)
@@ -579,7 +579,7 @@ Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at,
/* Invoke class-specific methods to do the split */
va_start(args, withReservoirPermit);
- res = class->split(seg, segNew, base, at, limit,
+ res = class->split(seg, segNew, base, at, limit,
withReservoirPermit, args);
va_end(args);
if (ResOK != res)
@@ -602,10 +602,10 @@ failControl:
/* Class Seg -- The most basic segment class
*
- * .seg.method.check: Many seg methods are lightweight and used
+ * .seg.method.check: Many seg methods are lightweight and used
* frequently. Their parameters are checked by the corresponding
- * dispatching function, and so the their parameter AVERs are
- * marked as critical.
+ * dispatching function, and so the their parameter AVERs are
+ * marked as critical.
*/
@@ -618,7 +618,7 @@ Bool SegCheck(Seg seg)
Pool pool;
Addr addr;
Size align;
-
+
CHECKS(Seg, seg);
CHECKL(TraceSetCheck(seg->white));
@@ -653,7 +653,7 @@ Bool SegCheck(Seg seg)
/* CHECKL(RingNext(&seg->poolRing) != &seg->poolRing); */
CHECKL(RingCheck(&seg->poolRing));
-
+
/* "pm", "sm", and "depth" not checked. See .check.shield. */
CHECKL(RankSetCheck(seg->rankSet));
if (seg->rankSet == RankSetEMPTY) {
@@ -680,7 +680,7 @@ Bool SegCheck(Seg seg)
/* segTrivInit -- method to initialize the base fields of a segment */
-static Res segTrivInit(Seg seg, Pool pool, Addr base, Size size,
+static Res segTrivInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, va_list args)
{
/* all the initialization happens in SegInit so checks are safe */
@@ -787,7 +787,7 @@ static void segNoSetBuffer(Seg seg, Buffer buffer)
/* segNoMerge -- merge method for segs which don't support merge */
-static Res segNoMerge(Seg seg, Seg segHi,
+static Res segNoMerge(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args)
{
@@ -804,13 +804,13 @@ static Res segNoMerge(Seg seg, Seg segHi,
}
-/* segTrivMerge -- Basic Seg merge method
+/* segTrivMerge -- Basic Seg merge method
*
- * .similar: Segments must be "sufficiently similar".
- * See design.mps.seg.merge.inv.similar
+ * .similar: Segments must be "sufficiently similar".
+ * See design.mps.seg.merge.inv.similar
*/
-static Res segTrivMerge(Seg seg, Seg segHi,
+static Res segTrivMerge(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args)
{
@@ -873,7 +873,7 @@ static Res segTrivMerge(Seg seg, Seg segHi,
/* segNoSplit -- split method for segs which don't support splitting */
-static Res segNoSplit(Seg seg, Seg segHi,
+static Res segNoSplit(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args)
{
@@ -893,7 +893,7 @@ static Res segNoSplit(Seg seg, Seg segHi,
/* segTrivSplit -- Basic Seg split method */
-static Res segTrivSplit(Seg seg, Seg segHi,
+static Res segTrivSplit(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args)
{
@@ -921,7 +921,7 @@ static Res segTrivSplit(Seg seg, Seg segHi,
/* Segment may not be exposed, or in the shield cache */
/* See design.mps.seg.split-merge.shield & impl.c.shield.def.depth */
AVER(seg->depth == 0);
-
+
/* Full initialization for segHi. Just modify seg. */
seg->limit = mid;
segHi->limit = limit;
@@ -1045,7 +1045,7 @@ Bool GCSegCheck(GCSeg gcseg)
if (seg->rankSet == RankSetEMPTY) {
/* design.mps.seg.field.rankSet.empty */
CHECKL(gcseg->summary == RefSetEMPTY);
- }
+ }
return TRUE;
}
@@ -1132,7 +1132,7 @@ static void gcSegSetGreyInternal(Seg seg, TraceSet oldGrey, TraceSet grey)
GCSeg gcseg;
Arena arena;
Rank rank;
-
+
/* Internal method. Parameters are checked by caller */
gcseg = SegGCSeg(seg);
arena = PoolArena(SegPool(seg));
@@ -1189,7 +1189,7 @@ static void gcSegSetGrey(Seg seg, TraceSet grey)
GCSeg gcseg;
TraceSet oldGrey, flippedTraces;
Arena arena;
-
+
AVERT_CRITICAL(Seg, seg); /* .seg.method.check */
AVER_CRITICAL(TraceSetCheck(grey)); /* .seg.method.check */
AVER(seg->rankSet != RankSetEMPTY);
@@ -1302,9 +1302,9 @@ static void gcSegSetRankSet(Seg seg, RankSet rankSet)
/* gcSegSetSummary -- GCSeg method to change the summary on a segment
*
* In fact, we only need to raise the write barrier if the
- * segment contains references, and its summary is strictly smaller
+ * segment contains references, and its summary is strictly smaller
* than the summary of the unprotectable data (i.e. the mutator).
- * We don't maintain such a summary, assuming that the mutator can
+ * We don't maintain such a summary, assuming that the mutator can
* access all references, so its summary is RefSetUNIV.
*/
@@ -1403,13 +1403,13 @@ static void gcSegSetBuffer(Seg seg, Buffer buffer)
}
-/* gcSegMerge -- GCSeg merge method
+/* gcSegMerge -- GCSeg merge method
*
* .buffer: Can't merge two segments both with buffers.
* See design.mps.seg.merge.inv.buffer.
*/
-static Res gcSegMerge(Seg seg, Seg segHi,
+static Res gcSegMerge(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args)
{
@@ -1441,7 +1441,7 @@ static Res gcSegMerge(Seg seg, Seg segHi,
/* Merge the superclass fields via next-method call */
super = SEG_SUPERCLASS(GCSegClass);
- res = super->merge(seg, segHi, base, mid, limit,
+ res = super->merge(seg, segHi, base, mid, limit,
withReservoirPermit, args);
if (res != ResOK)
goto failSuper;
@@ -1479,7 +1479,7 @@ failSuper:
/* gcSegSplit -- GCSeg split method */
-static Res gcSegSplit(Seg seg, Seg segHi,
+static Res gcSegSplit(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args)
{
@@ -1499,7 +1499,7 @@ static Res gcSegSplit(Seg seg, Seg segHi,
AVER(SegBase(seg) == base);
AVER(SegLimit(seg) == limit);
AVER(BoolCheck(withReservoirPermit));
-
+
grey = SegGrey(seg);
buf = gcseg->buffer; /* Look for buffer to reassign to segHi */
if (buf != NULL) {
@@ -1509,11 +1509,11 @@ static Res gcSegSplit(Seg seg, Seg segHi,
} else {
buf = NULL; /* buffer lies below split and is unaffected */
}
- }
+ }
/* Split the superclass fields via next-method call */
super = SEG_SUPERCLASS(GCSegClass);
- res = super->split(seg, segHi, base, mid, limit,
+ res = super->split(seg, segHi, base, mid, limit,
withReservoirPermit, args);
if (res != ResOK)
goto failSuper;
@@ -1589,7 +1589,7 @@ Bool SegClassCheck(SegClass class)
CHECKL(FUNCHECK(class->describe));
CHECKS(SegClass, class);
return TRUE;
-}
+}
/* SegClass -- the vanilla segment class definition */
@@ -1601,9 +1601,9 @@ DEFINE_CLASS(SegClass, class)
class->size = sizeof(SegStruct);
class->init = segTrivInit;
class->finish = segTrivFinish;
- class->setSummary = segNoSetSummary;
- class->buffer = segNoBuffer;
- class->setBuffer = segNoSetBuffer;
+ class->setSummary = segNoSetSummary;
+ class->buffer = segNoBuffer;
+ class->setBuffer = segNoSetBuffer;
class->setGrey = segNoSetGrey;
class->setWhite = segNoSetWhite;
class->setRankSet = segNoSetRankSet;
@@ -1616,7 +1616,7 @@ DEFINE_CLASS(SegClass, class)
/* GCSegClass -- GC-supporting segment class definition */
-
+
typedef SegClassStruct GCSegClassStruct;
DEFINE_CLASS(GCSegClass, class)
@@ -1626,9 +1626,9 @@ DEFINE_CLASS(GCSegClass, class)
class->size = sizeof(GCSegStruct);
class->init = gcSegInit;
class->finish = gcSegFinish;
- class->setSummary = gcSegSetSummary;
- class->buffer = gcSegBuffer;
- class->setBuffer = gcSegSetBuffer;
+ class->setSummary = gcSegSetSummary;
+ class->buffer = gcSegBuffer;
+ class->setBuffer = gcSegSetBuffer;
class->setGrey = gcSegSetGrey;
class->setWhite = gcSegSetWhite;
class->setRankSet = gcSegSetRankSet;
diff --git a/mps/code/segsmss.c b/mps/code/segsmss.c
index 04469b5aa1f..3acd229a41c 100644
--- a/mps/code/segsmss.c
+++ b/mps/code/segsmss.c
@@ -3,8 +3,8 @@
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
*
- * .design: Adapted from amsss.c (because AMS already supports
- * a protocol for subclassing AMS segments). Defines a new pool
+ * .design: Adapted from amsss.c (because AMS already supports
+ * a protocol for subclassing AMS segments). Defines a new pool
* class, AMST. Segments are split and merged during BufferFill
* operations. Buffered segments are also split and merged between
* allocation requests.
@@ -112,7 +112,7 @@ static Bool AMSTSegCheck(AMSTSeg amstseg)
/* amstSegInit -- initialise an amst segment */
-static Res amstSegInit(Seg seg, Pool pool, Addr base, Size size,
+static Res amstSegInit(Seg seg, Pool pool, Addr base, Size size,
Bool reservoirPermit, va_list args)
{
SegClass super;
@@ -163,20 +163,20 @@ static void amstSegFinish(Seg seg)
/* finish the superclass fields last */
super = SEG_SUPERCLASS(AMSTSegClass);
super->finish(seg);
-}
+}
-/* amstSegMerge -- AMSTSeg merge method
+/* amstSegMerge -- AMSTSeg merge method
*
- * .fail: Test proper handling of the most complex failure cases
- * by deliberately detecting failure sometimes after calling the
- * next method. We handle the error by calling the anti-method.
+ * .fail: Test proper handling of the most complex failure cases
+ * by deliberately detecting failure sometimes after calling the
+ * next method. We handle the error by calling the anti-method.
* This isn't strictly safe (see design.mps.poolams.split-merge.fail).
- * But we assume here that we won't run out of memory when calling the
+ * But we assume here that we won't run out of memory when calling the
* anti-method.
*/
-static Res amstSegMerge(Seg seg, Seg segHi,
+static Res amstSegMerge(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args)
{
@@ -195,7 +195,7 @@ static Res amstSegMerge(Seg seg, Seg segHi,
/* Merge the superclass fields via direct next-method call */
super = SEG_SUPERCLASS(AMSTSegClass);
- res = super->merge(seg, segHi, base, mid, limit,
+ res = super->merge(seg, segHi, base, mid, limit,
withReservoirPermit, args);
if (res != ResOK)
goto failSuper;
@@ -215,7 +215,7 @@ static Res amstSegMerge(Seg seg, Seg segHi,
failDeliberate:
/* Call the anti-method (see .fail) */
- res = super->split(seg, segHi, base, mid, limit,
+ res = super->split(seg, segHi, base, mid, limit,
withReservoirPermit, args);
AVER(res == ResOK);
res = ResFAIL;
@@ -228,7 +228,7 @@ failSuper:
/* amstSegSplit -- AMSTSeg split method */
-static Res amstSegSplit(Seg seg, Seg segHi,
+static Res amstSegSplit(Seg seg, Seg segHi,
Addr base, Addr mid, Addr limit,
Bool withReservoirPermit, va_list args)
{
@@ -246,7 +246,7 @@ static Res amstSegSplit(Seg seg, Seg segHi,
/* Split the superclass fields via direct next-method call */
super = SEG_SUPERCLASS(AMSTSegClass);
- res = super->split(seg, segHi, base, mid, limit,
+ res = super->split(seg, segHi, base, mid, limit,
withReservoirPermit, args);
if (res != ResOK)
goto failSuper;
@@ -270,7 +270,7 @@ static Res amstSegSplit(Seg seg, Seg segHi,
failDeliberate:
/* Call the anti-method. (see .fail) */
- res = super->merge(seg, segHi, base, mid, limit,
+ res = super->merge(seg, segHi, base, mid, limit,
withReservoirPermit, args);
AVER(res == ResOK);
res = ResFAIL;
@@ -413,7 +413,7 @@ static Bool AMSSegRegionIsFree(Seg seg, Addr base, Addr limit)
sbase = SegBase(seg);
ams = Pool2AMS(SegPool(seg));
- bgrain = AMSGrains(ams, AddrOffset(sbase, base));
+ bgrain = AMSGrains(ams, AddrOffset(sbase, base));
lgrain = AMSGrains(ams, AddrOffset(sbase, limit));
if (amsseg->allocTableInUse) {
@@ -510,17 +510,17 @@ static void AMSAllocateRange(Seg seg, Addr base, Addr limit)
/* AMSTBufferFill -- the pool class buffer fill method
- *
- * Calls next method - but possibly splits or merges the chosen
+ *
+ * Calls next method - but possibly splits or merges the chosen
* segment.
*
- * .merge: A merge is performed when the next method returns
+ * .merge: A merge is performed when the next method returns
* the entire segment, this segment had previously been split
* from the segment below, and the segment below is appropriately
* similar (i.e. not already attached to a buffer and similarly grey)
*
- * .split: If we're not merging, a split is performed if the next method
- * returns the entire segment, and yet lower half of the segment would
+ * .split: If we're not merging, a split is performed if the next method
+ * returns the entire segment, and yet lower half of the segment would
* meet the request.
*/
static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn,
@@ -581,7 +581,7 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn,
Seg segLo, segHi;
Res sres;
AMSUnallocateRange(seg, mid, limit);
- sres = SegSplit(&segLo, &segHi, seg, mid, withReservoirPermit);
+ sres = SegSplit(&segLo, &segHi, seg, mid, withReservoirPermit);
if (ResOK == sres) { /* successful split */
limit = mid; /* range is lower segment */
} else { /* failed to split */
@@ -607,7 +607,7 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn,
* been split and the segment above meets the constraints (i.e. empty,
* not already attached to a buffer and similar colour)
*
- * .bsplit: Whether or not a merge happpened, a split is performed if
+ * .bsplit: Whether or not a merge happpened, a split is performed if
* the limit of the buffered region is arena aligned, and yet does not
* correspond to the segment limit, provided that the part of the segment
* above the buffer is all free.
@@ -641,12 +641,12 @@ static void AMSTStressBufferedSeg(Seg seg, Buffer buffer)
printf("J");
} else {
/* deliberate fails only */
- AVER(amst->failSegs);
+ AVER(amst->failSegs);
}
}
}
- if (SegLimit(seg) != limit &&
+ if (SegLimit(seg) != limit &&
AddrIsAligned(limit, ArenaAlign(arena)) &&
AMSSegRegionIsFree(seg, limit, SegLimit(seg))) {
/* .bsplit */
@@ -658,7 +658,7 @@ static void AMSTStressBufferedSeg(Seg seg, Buffer buffer)
printf("C");
} else {
/* deliberate fails only */
- AVER(amst->failSegs);
+ AVER(amst->failSegs);
}
}
}
@@ -706,7 +706,7 @@ static mps_class_t mps_class_amst(void)
#define exactRootsCOUNT 50
#define ambigRootsCOUNT 100
#define sizeScale 4
-/* This is enough for five GCs. */
+/* This is enough for five GCs. */
#define totalSizeMAX sizeScale * 800 * (size_t)1024
#define totalSizeSTEP 200 * (size_t)1024
/* objNULL needs to be odd so that it's ignored in exactRoots. */
diff --git a/mps/code/shield.c b/mps/code/shield.c
index 17195fdfcf2..7e1fe864f00 100644
--- a/mps/code/shield.c
+++ b/mps/code/shield.c
@@ -16,20 +16,20 @@
* .def.synced: a seg is synced if the prot and shield modes are the
* same, and unsynced otherwise.
* .def.depth: the depth of a segment is defined as
- * depth == #exposes - #covers + I(in cache), where
+ * depth == #exposes - #covers + #(in cache), where
* #exposes = the total number of times the seg has been exposed
* #covers = the total number of times the seg has been covered
- * I(in cache) = 1 if the segment is in the cache
- * 0 otherwise
- * The cache is initially empty and cover should not be called
- * without a matching expose, so this figure should always be
+ * #(in cache) = the number of times the seg appears in the cache
+ * The cache is initially empty and Cover should not be called
+ * without a matching Expose, so this figure should always be
* non-negative.
* .def.total.depth: The total depth is the sum of the depth over
* all segments
* .def.outside: being outside the shield is being between calls
* to leave and enter, and similarly .def.inside: being inside the
* shield is being between calls to enter and leave.
- * .def.suspended: suspended is true iff the threads are suspended
+ * .def.suspended: suspended is true iff the mutator is suspended.
+ * .def.shielded: a segment is shielded if the shield mode is non-zero.
*
*
* Properties
@@ -46,7 +46,7 @@
*
* These invariants are maintained by the code.
*
- * .inv.outside.running: The mutator is running while outside the
+ * .inv.outside.running: The mutator is not suspended while outside the
* shield.
* .inv.unsynced.suspended: If any segment is not synced,
* the mutator is suspended.
@@ -62,7 +62,7 @@
* As the depth of a segment cannot be negative
* total depth == 0 => for all segments, depth == 0
* => all segs are synced (by .inv.unsynced.depth)
- *
+ *
* If the mutator is running then all segs must be synced
* (.inv.unsynced.suspend). Which means that the hardware protection
* (prot mode) must reflect the software protection (shield mode).
@@ -140,7 +140,7 @@ static void flush(Arena arena, Size i)
AVER(SegDepth(seg) > 0);
--arena->shDepth;
SegSetDepth(seg, SegDepth(seg) - 1);
-
+
if (SegDepth(seg) == 0)
sync(arena, seg);
diff --git a/mps/code/sos8gp.gmk b/mps/code/sos8gp.gmk
index 619243c3cbe..8d954b5473b 100644
--- a/mps/code/sos8gp.gmk
+++ b/mps/code/sos8gp.gmk
@@ -12,6 +12,6 @@ MPMPF = mpsliban.c mpsioan.c lockan.c than.c vmso.c \
MPMS = sssos8.s
SWPF = than.c vmso.c protsw.c prmcan.c ssan.c
# Need to pass the profile option to the linker as well.
-LIBS = -lm -pg
+LIBS = -lm -pg
include gp.gmk
diff --git a/mps/code/splay.c b/mps/code/splay.c
index 2fd8e632750..4d30a062de1 100644
--- a/mps/code/splay.c
+++ b/mps/code/splay.c
@@ -3,12 +3,12 @@
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
*
- * .purpose: Splay trees are used to manage potentially unbounded
+ * .purpose: Splay trees are used to manage potentially unbounded
* collections of ordered things.
*
* .source: design.mps.splay
*
- * .note.stack: It's important that the MPS have a bounded stack
+ * .note.stack: It's important that the MPS have a bounded stack
* size, and this is a problem for tree algorithms. Basically,
* we have to avoid recursion.
*/
@@ -33,7 +33,7 @@ SRCID(splay, "$Id$");
#define SplayCompare(tree, key, node) \
(((tree)->compare)((key), (node)))
-Bool SplayTreeCheck(SplayTree tree)
+Bool SplayTreeCheck(SplayTree tree)
{
UNUSED(tree);
CHECKL(tree != NULL);
@@ -42,7 +42,7 @@ Bool SplayTreeCheck(SplayTree tree)
return TRUE;
}
-Bool SplayNodeCheck(SplayNode node)
+Bool SplayNodeCheck(SplayNode node)
{
UNUSED(node);
CHECKL(node != NULL);
@@ -51,7 +51,7 @@ Bool SplayNodeCheck(SplayNode node)
void SplayTreeInit(SplayTree tree, SplayCompareMethod compare,
- SplayUpdateNodeMethod updateNode)
+ SplayUpdateNodeMethod updateNode)
{
AVER(tree != NULL);
AVER(FUNCHECK(compare));
@@ -64,7 +64,7 @@ void SplayTreeInit(SplayTree tree, SplayCompareMethod compare,
AVERT(SplayTree, tree);
}
-void SplayNodeInit(SplayNode node)
+void SplayNodeInit(SplayNode node)
{
AVER(node != NULL);
@@ -75,7 +75,7 @@ void SplayNodeInit(SplayNode node)
AVERT(SplayNode, node);
}
-void SplayNodeFinish(SplayNode node)
+void SplayNodeFinish(SplayNode node)
{
AVERT(SplayNode, node);
@@ -84,14 +84,14 @@ void SplayNodeFinish(SplayNode node)
SplayNodeSetRightChild(node, NULL);
}
-void SplayTreeFinish(SplayTree tree)
+void SplayTreeFinish(SplayTree tree)
{
AVERT(SplayTree, tree);
SplayTreeSetRoot(tree, NULL);
tree->compare = NULL;
}
-static void SplayNodeUpdate(SplayTree tree, SplayNode node)
+static void SplayNodeUpdate(SplayTree tree, SplayNode node)
{
AVERT(SplayTree, tree);
AVERT(SplayNode, node);
@@ -111,7 +111,7 @@ static void SplayNodeUpdate(SplayTree tree, SplayNode node)
* See design.mps.splay.impl.link.right.
*/
-static void SplayLinkRight(SplayNode *topIO, SplayNode *rightIO)
+static void SplayLinkRight(SplayNode *topIO, SplayNode *rightIO)
{
AVERT(SplayNode, *topIO);
AVERT(SplayNode, *rightIO);
@@ -127,7 +127,7 @@ static void SplayLinkRight(SplayNode *topIO, SplayNode *rightIO)
*topIO = SplayNodeLeftChild(*topIO);
/* The following line is only required for .link.right.first. */
- SplayNodeSetLeftChild(*rightIO, NULL);
+ SplayNodeSetLeftChild(*rightIO, NULL);
}
/* SplayLinkLeft -- Move top to right child of top
@@ -153,7 +153,7 @@ static void SplayLinkLeft(SplayNode *topIO, SplayNode *leftIO) {
*topIO = SplayNodeRightChild(*topIO);
/* The following line is only required for .link.left.first. */
- SplayNodeSetRightChild(*leftIO, NULL);
+ SplayNodeSetRightChild(*leftIO, NULL);
}
/* SplayRotateLeft -- Rotate right child edge of node
@@ -220,27 +220,27 @@ static void SplayRotateRight(SplayNode *nodeIO, SplayTree tree) {
/* SplayAssemble -- Assemble left right and top trees into one
*
- * We do this by moving the children of the top tree to the last and
+ * We do this by moving the children of the top tree to the last and
* first nodes in the left and right trees, and then moving the tops
* of the left and right trees to the children of the top tree.
- *
- * When we reach this function, the nodes between the roots of the
+ *
+ * When we reach this function, the nodes between the roots of the
* left and right trees and their last and first nodes respectively
* will have out of date client properties.
*
* See design.mps.splay.impl.assemble.
*/
-static void SplayAssemble(SplayTree tree, SplayNode top,
+static void SplayAssemble(SplayTree tree, SplayNode top,
SplayNode leftTop, SplayNode leftLast,
SplayNode rightTop, SplayNode rightFirst) {
AVERT(SplayTree, tree);
AVERT(SplayNode, top);
- AVER(leftTop == NULL ||
+ AVER(leftTop == NULL ||
(SplayNodeCheck(leftTop) && SplayNodeCheck(leftLast)));
- AVER(rightTop == NULL ||
+ AVER(rightTop == NULL ||
(SplayNodeCheck(rightTop) && SplayNodeCheck(rightFirst)));
-
+
if(leftTop != NULL) {
SplayNodeSetRightChild(leftLast, SplayNodeLeftChild(top));
SplayNodeSetLeftChild(top, leftTop);
@@ -307,7 +307,7 @@ static void SplayAssemble(SplayTree tree, SplayNode top,
}
/* otherwise leave top->right alone */
- if(tree->updateNode != NULL)
+ if(tree->updateNode != NULL)
SplayNodeUpdate(tree, top);
}
@@ -320,11 +320,11 @@ static void SplayAssemble(SplayTree tree, SplayNode top,
* See design.mps.splay.impl.splay.
*/
-static Bool SplaySplay(SplayNode *nodeReturn, SplayTree tree,
+static Bool SplaySplay(SplayNode *nodeReturn, SplayTree tree,
void *key, SplayCompareMethod compareMethod) {
/* The sides structure avoids a boundary case in SplayLink* */
SplayNodeStruct sides; /* rightTop and leftTop */
- SplayNode top, leftLast, rightFirst;
+ SplayNode top, leftLast, rightFirst;
Bool found;
Compare compareTop;
@@ -338,7 +338,7 @@ static Bool SplaySplay(SplayNode *nodeReturn, SplayTree tree,
*nodeReturn = NULL;
return FALSE;
}
-
+
/* short-circuit case where node is already top */
compareTop = (*compareMethod)(key, top);
if(compareTop == CompareEQUAL) {
@@ -439,7 +439,7 @@ static Bool SplaySplay(SplayNode *nodeReturn, SplayTree tree,
}
compareTop = (*compareMethod)(key, top);
} /* end while(TRUE) */
-
+
terminalZig:
SplayLinkRight(&top, &rightFirst);
found = FALSE;
@@ -451,7 +451,7 @@ terminalZag:
goto assemble;
assemble:
- SplayAssemble(tree, top,
+ SplayAssemble(tree, top,
SplayNodeRightChild(&sides), leftLast,
SplayNodeLeftChild(&sides), rightFirst);
@@ -579,7 +579,7 @@ Res SplayTreeSearch(SplayNode *nodeReturn, SplayTree tree, void *key) {
}
-/* SplayTreePredecessor -- Splays a tree at the root's predecessor
+/* SplayTreePredecessor -- Splays a tree at the root's predecessor
*
* Must not be called on en empty tree. Predecessor need not exist,
* in which case NULL is returned, and the tree is unchanged.
@@ -616,7 +616,7 @@ static SplayNode SplayTreePredecessor(SplayTree tree, void *key) {
}
-/* SplayTreeSuccessor -- Splays a tree at the root's successor
+/* SplayTreeSuccessor -- Splays a tree at the root's successor
*
* Must not be called on en empty tree. Successor need not exist,
* in which case NULL is returned, and the tree is unchanged.
@@ -699,14 +699,14 @@ Res SplayTreeNeighbours(SplayNode *leftReturn, SplayNode *rightReturn,
/* SplayTreeFirst, SplayTreeNext -- Iterators
*
- * SplayTreeFirst receives a key that must precede all
- * nodes in the tree. It returns NULL if the tree is empty.
- * Otherwise, it splays the tree to the first node, and returns the
+ * SplayTreeFirst receives a key that must precede all
+ * nodes in the tree. It returns NULL if the tree is empty.
+ * Otherwise, it splays the tree to the first node, and returns the
* new root. See design.mps.splay.function.splay.tree.first.
*
- * SplayTreeNext takes a tree and splays it to the successor of the
- * old root, and returns the new root. Returns NULL is there are
- * no successors. It takes a key for the old root. See
+ * SplayTreeNext takes a tree and splays it to the successor of the
+ * old root, and returns the new root. Returns NULL is there are
+ * no successors. It takes a key for the old root. See
* design.mps.splay.function.splay.tree.next.
*/
@@ -797,7 +797,7 @@ typedef struct {
SplayTree tree;
} SplayFindClosureStruct, *SplayFindClosure;
-static Compare SplayFindFirstCompare(void *key, SplayNode node)
+static Compare SplayFindFirstCompare(void *key, SplayNode node)
{
SplayFindClosure closure;
void *closureP;
@@ -828,7 +828,7 @@ static Compare SplayFindFirstCompare(void *key, SplayNode node)
}
}
-static Compare SplayFindLastCompare(void *key, SplayNode node)
+static Compare SplayFindLastCompare(void *key, SplayNode node)
{
SplayFindClosure closure;
void *closureP;
@@ -863,10 +863,10 @@ static Compare SplayFindLastCompare(void *key, SplayNode node)
/* SplayFindFirst -- Find first node that satisfies client property
*
* This function finds the first node (in address order) in the given
- * tree that satisfies some property defined by the client. The
+ * tree that satisfies some property defined by the client. The
* property is such that the client can detect, given a sub-tree,
* whether that sub-tree contains any nodes satisfying the property.
- *
+ *
* The given callbacks testNode and testTree detect this property in
* a single node or a sub-tree rooted at a node, and both receive the
* arbitrary closures closureP and closureS.
@@ -896,7 +896,7 @@ Bool SplayFindFirst(SplayNode *nodeReturn, SplayTree tree,
closureStruct.testTree = testTree;
closureStruct.tree = tree;
- if(SplaySplay(&node, tree, (void *)&closureStruct,
+ if(SplaySplay(&node, tree, (void *)&closureStruct,
&SplayFindFirstCompare)) {
*nodeReturn = node;
return TRUE;
@@ -932,7 +932,7 @@ Bool SplayFindLast(SplayNode *nodeReturn, SplayTree tree,
closureStruct.testTree = testTree;
closureStruct.tree = tree;
- if(SplaySplay(&node, tree, (void *)&closureStruct,
+ if(SplaySplay(&node, tree, (void *)&closureStruct,
&SplayFindLastCompare)) {
*nodeReturn = node;
return TRUE;
@@ -964,7 +964,7 @@ Bool SplayRoot(SplayNode *nodeReturn, SplayTree tree)
/* SplayNodeRefresh -- Updates the client property that has changed at a node
*
* This function undertakes to call the client updateNode callback for each
- * node affected by the change in properties at the given node (which has
+ * node affected by the change in properties at the given node (which has
* the given key) in an appropriate order.
*
* The function fullfils its job by first splaying at the given node, and
@@ -984,7 +984,7 @@ void SplayNodeRefresh(SplayTree tree, SplayNode node, void *key)
AVER(node == node2);
(*tree->updateNode)(tree, node, SplayNodeLeftChild(node),
- SplayNodeRightChild(node));
+ SplayNodeRightChild(node));
}
@@ -993,7 +993,7 @@ void SplayNodeRefresh(SplayTree tree, SplayNode node, void *key)
* See design.mps.splay.function.splay.tree.describe.
*/
-Res SplayTreeDescribe(SplayTree tree, mps_lib_FILE *stream,
+Res SplayTreeDescribe(SplayTree tree, mps_lib_FILE *stream,
SplayNodeDescribeMethod nodeDescribe) {
Res res;
diff --git a/mps/code/splay.h b/mps/code/splay.h
index 481655a3a59..e073c262b4b 100644
--- a/mps/code/splay.h
+++ b/mps/code/splay.h
@@ -21,7 +21,7 @@ typedef Bool (*SplayTestNodeMethod)(SplayTree tree, SplayNode node,
typedef Bool (*SplayTestTreeMethod)(SplayTree tree, SplayNode node,
void *closureP, unsigned long closureS);
typedef void (*SplayUpdateNodeMethod)(SplayTree tree, SplayNode node,
- SplayNode leftChild,
+ SplayNode leftChild,
SplayNode rightChild);
typedef Res (*SplayNodeDescribeMethod)(SplayNode node, mps_lib_FILE *stream);
enum {
@@ -56,12 +56,12 @@ extern Res SplayTreeDelete(SplayTree tree, SplayNode node, void *key);
extern Res SplayTreeSearch(SplayNode *nodeReturn,
SplayTree tree, void *key );
-extern Res SplayTreeNeighbours(SplayNode *leftReturn,
+extern Res SplayTreeNeighbours(SplayNode *leftReturn,
SplayNode *rightReturn,
SplayTree tree, void *key);
extern SplayNode SplayTreeFirst(SplayTree tree, void *zeroKey);
-extern SplayNode SplayTreeNext(SplayTree tree, SplayNode oldNode,
+extern SplayNode SplayTreeNext(SplayTree tree, SplayNode oldNode,
void *oldKey);
extern Bool SplayFindFirst(SplayNode *nodeReturn, SplayTree tree,
diff --git a/mps/code/sslii3.c b/mps/code/sslii3.c
index d3a4b0dd27a..c9c12c13119 100644
--- a/mps/code/sslii3.c
+++ b/mps/code/sslii3.c
@@ -3,7 +3,7 @@
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
*
- * This scans the stack and fixes the registers which may contain
+ * This scans the stack and fixes the registers which may contain
* roots. See design.mps.thread-manager
*
* The registers edi, esi, ebx are the registers defined to be preserved
diff --git a/mps/code/ssw3i3.c b/mps/code/ssw3i3.c
index de68747cb74..ff02c6b3af4 100644
--- a/mps/code/ssw3i3.c
+++ b/mps/code/ssw3i3.c
@@ -3,7 +3,7 @@
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
*
- * This scans the stack and fixes the registers which may contain
+ * This scans the stack and fixes the registers which may contain
* roots. See design.mps.thread-manager
*
* The registers edi, esi, ebx are the registers defined to be preserved
diff --git a/mps/code/table.c b/mps/code/table.c
index f793c4d7d90..4e56f64725e 100644
--- a/mps/code/table.c
+++ b/mps/code/table.c
@@ -80,7 +80,7 @@ static TableEntry TableFind(Table table, Word key, int skip_deleted)
{
ulong hash;
size_t i, mask = table->length - 1;
-
+
hash = TableHash(key) & mask;
i = hash;
do {
@@ -122,7 +122,7 @@ static Res TableGrow(Table table)
newArray[i].value = NULL;
newArray[i].status = tableUNUSED;
}
-
+
table->length = newLength;
table->array = newArray;
table->limit *= 2;
@@ -167,7 +167,7 @@ extern Res TableCreate(Table *tableReturn, size_t length)
table->array[i].value = NULL;
table->array[i].status = tableUNUSED;
}
-
+
*tableReturn = table;
return ResOK;
@@ -237,7 +237,7 @@ extern Res TableDefine(Table table, Word key, void *value)
extern Res TableRedefine(Table table, Word key, void *value)
{
TableEntry entry = TableFind(table, key, 1 /* skip deletions */);
-
+
if (entry == NULL || entry->status != tableACTIVE)
return ResFAIL;
assert(entry->key == key);
diff --git a/mps/code/teletest.c b/mps/code/teletest.c
index a2b8089d070..3d39e9a186e 100644
--- a/mps/code/teletest.c
+++ b/mps/code/teletest.c
@@ -17,7 +17,7 @@
SRCID(teletest, "$Id$");
-static mps_arena_t arena;
+static mps_arena_t arena;
#define MAX_ARGS 3
@@ -47,7 +47,7 @@ static void callControl(mps_word_t reset, mps_word_t flip)
}
-static void doControl(void)
+static void doControl(void)
{
callControl(args[0], args[1]);
}
@@ -57,38 +57,38 @@ static void doRead(void)
{
mps_word_t old;
old = mps_telemetry_control((mps_word_t)0, (mps_word_t)0);
-
+
(void)printf(WORD_FORMAT "\n", old);
}
-static void doSet(void)
+static void doSet(void)
{
callControl(args[0], args[0]);
}
-static void doReset(void)
+static void doReset(void)
{
callControl(args[0], (mps_word_t)0);
}
-static void doFlip(void)
+static void doFlip(void)
{
callControl((mps_word_t)0, args[0]);
}
-static void doIntern(void)
+static void doIntern(void)
{
mps_word_t id;
id = mps_telemetry_intern(stringArg);
(void)printf(WORD_FORMAT "\n", id);
}
-
-static void doLabel(void)
+
+static void doLabel(void)
{
mps_telemetry_label((mps_addr_t)args[0], args[1]);
}
@@ -97,7 +97,7 @@ static void doFlush(void)
{
mps_telemetry_flush();
}
-
+
static void doQuit(void)
{
mps_arena_destroy(arena);
@@ -194,13 +194,13 @@ static void obeyCommand(char *command)
printf("command not understood\n> %s\n", command);
doHelp();
}
-
+
#define testArenaSIZE (((size_t)64)<<20)
extern int main(int argc, char *argv[])
{
- testlib_unused(argc);
+ testlib_unused(argc);
testlib_unused(argv);
die(mps_arena_create((mps_arena_t*)&arena, mps_arena_class_vm(),
diff --git a/mps/code/testlib.h b/mps/code/testlib.h
index a843ef8f302..6245ef46a2c 100644
--- a/mps/code/testlib.h
+++ b/mps/code/testlib.h
@@ -96,7 +96,7 @@ extern void die(mps_res_t res, const char *s);
/* die_expect -- get expected result or die
*
* If the first argument is not thename as the second argument,
- * prints the third argument on stderr and exits the program.
+ * prints the third argument on stderr and exits the program.
* Otherwise does nothing.
*
* Typical use:
diff --git a/mps/code/th.h b/mps/code/th.h
index 505808ab605..82ba8c250f5 100644
--- a/mps/code/th.h
+++ b/mps/code/th.h
@@ -56,7 +56,7 @@ extern void ThreadRingSuspend(Ring threadRing);
extern void ThreadRingResume(Ring threadRing);
-/* ThreadRingThread
+/* ThreadRingThread
*
* Return the thread from an element of the Arena's
* thread ring.
diff --git a/mps/code/than.c b/mps/code/than.c
index 2e12568520f..67f57844b42 100644
--- a/mps/code/than.c
+++ b/mps/code/than.c
@@ -51,7 +51,7 @@ Res ThreadRegister(Thread *threadReturn, Arena arena)
AVER(threadReturn != NULL);
- res = ControlAlloc(&p, arena, sizeof(ThreadStruct),
+ res = ControlAlloc(&p, arena, sizeof(ThreadStruct),
/* withReservoirPermit */ FALSE);
if(res != ResOK) return res;
thread = (Thread)p;
@@ -131,10 +131,10 @@ Res ThreadScan(ScanState ss, Thread thread, void *stackBot)
Res ThreadDescribe(Thread thread, mps_lib_FILE *stream)
{
Res res;
-
+
res = WriteF(stream,
"Thread $P ($U) {\n", (WriteFP)thread, (WriteFU)thread->serial,
- " arena $P ($U)\n",
+ " arena $P ($U)\n",
(WriteFP)thread->arena, (WriteFU)thread->arena->serial,
"} Thread $P ($U)\n", (WriteFP)thread, (WriteFU)thread->serial,
NULL);
diff --git a/mps/code/thw3i3.c b/mps/code/thw3i3.c
index 0b1affc1827..60251ca0c1f 100644
--- a/mps/code/thw3i3.c
+++ b/mps/code/thw3i3.c
@@ -34,14 +34,14 @@
* i.e. stack pointer points to the last allocated location;
* stack grows downwards.
*
- * .stack.below-bottom: it's legal for the stack pointer to be at a
- * higher address than the registered bottom of stack. This might
- * happen if the stack of another thread doesn't contain any frames
+ * .stack.below-bottom: it's legal for the stack pointer to be at a
+ * higher address than the registered bottom of stack. This might
+ * happen if the stack of another thread doesn't contain any frames
* belonging to the client language. In this case, the stack should
* not be scanned.
*
* .stack.align: assume roots on the stack are always word-aligned,
- * but don't assume that the stack pointer is necessarily
+ * but don't assume that the stack pointer is necessarily
* word-aligned at the time of reading the context of another thread.
*
* .i3: assumes MPS_ARCH_I3
@@ -122,7 +122,7 @@ Res ThreadRegister(Thread *threadReturn, Arena arena)
AVER(threadReturn != NULL);
AVERT(Arena, arena);
- res = ControlAlloc(&p, arena, sizeof(ThreadStruct),
+ res = ControlAlloc(&p, arena, sizeof(ThreadStruct),
/* withReservoirPermit */ FALSE);
if(res != ResOK)
return res;
@@ -314,10 +314,10 @@ Arena ThreadArena(Thread thread)
Res ThreadDescribe(Thread thread, mps_lib_FILE *stream)
{
Res res;
-
+
res = WriteF(stream,
"Thread $P ($U) {\n", (WriteFP)thread, (WriteFU)thread->serial,
- " arena $P ($U)\n",
+ " arena $P ($U)\n",
(WriteFP)thread->arena, (WriteFU)thread->arena->serial,
" handle $W\n", (WriteFW)thread->handle,
" id $U\n", (WriteFU)thread->id,
diff --git a/mps/code/trace.c b/mps/code/trace.c
index 7ff3de4e2a1..5b118e87bb6 100644
--- a/mps/code/trace.c
+++ b/mps/code/trace.c
@@ -35,11 +35,11 @@ typedef struct TraceMessageStruct {
#define MessageTraceMessage(message) \
(PARENT(TraceMessageStruct, messageStruct, message))
-static Bool TraceMessageCheck(TraceMessage message)
+static Bool TraceMessageCheck(TraceMessage message)
{
CHECKS(TraceMessage, message);
CHECKD(Message, TraceMessageMessage(message));
- CHECKL(MessageGetType(TraceMessageMessage(message)) ==
+ CHECKL(MessageGetType(TraceMessageMessage(message)) ==
MessageTypeGC);
/* We can't check anything about the statistics. In particular, */
/* liveSize may exceed condemnedSize because they are only estimates. */
@@ -60,7 +60,7 @@ static void TraceMessageDelete(Message message)
ControlFree(arena, (void *)tMessage, sizeof(TraceMessageStruct));
}
-static Size TraceMessageLiveSize(Message message)
+static Size TraceMessageLiveSize(Message message)
{
TraceMessage tMessage;
@@ -71,7 +71,7 @@ static Size TraceMessageLiveSize(Message message)
return tMessage->liveSize;
}
-static Size TraceMessageCondemnedSize(Message message)
+static Size TraceMessageCondemnedSize(Message message)
{
TraceMessage tMessage;
@@ -82,7 +82,7 @@ static Size TraceMessageCondemnedSize(Message message)
return tMessage->condemnedSize;
}
-static Size TraceMessageNotCondemnedSize(Message message)
+static Size TraceMessageNotCondemnedSize(Message message)
{
TraceMessage tMessage;
@@ -381,7 +381,7 @@ Res TraceAddWhite(Trace trace, Seg seg)
trace->white = ZoneSetUnion(trace->white, ZoneSetOfSeg(trace->arena, seg));
/* if the pool is a moving GC, then condemned objects may move */
if (pool->class->attr & AttrMOVINGGC) {
- trace->mayMove = ZoneSetUnion(trace->mayMove,
+ trace->mayMove = ZoneSetUnion(trace->mayMove,
ZoneSetOfSeg(trace->arena, seg));
}
}
@@ -650,7 +650,7 @@ found:
trace->emergency = FALSE;
trace->chain = NULL;
trace->condemned = (Size)0; /* nothing condemned yet */
- trace->notCondemned = (Size)0;
+ trace->notCondemned = (Size)0;
trace->foundation = (Size)0; /* nothing grey yet */
trace->rate = (Size)0; /* no scanning to be done yet */
STATISTIC(trace->greySegCount = (Count)0);
@@ -712,7 +712,7 @@ void TraceDestroy(Trace trace)
/* Notify all the chains. */
RING_FOR(chainNode, &trace->arena->chainRing, nextChainNode) {
Chain chain = RING_ELT(Chain, chainRing, chainNode);
-
+
ChainEndGC(chain, trace);
}
} else {
@@ -1083,7 +1083,7 @@ Res TraceFix(ScanState ss, Ref *refIO)
if (res != ResOK)
return res;
}
- } else {
+ } else {
/* Tract isn't white. Don't compute seg for non-statistical */
/* variety. See design.mps.trace.fix.tractofaddr */
STATISTIC_STAT
@@ -1136,7 +1136,7 @@ Res TraceFixEmergency(ScanState ss, Ref *refIO)
pool = TractPool(tract);
PoolFixEmergency(pool, ss, seg, refIO);
}
- } else {
+ } else {
/* Tract isn't white. Don't compute seg for non-statistical */
/* variety. See design.mps.trace.fix.tractofaddr */
STATISTIC_STAT
@@ -1163,7 +1163,7 @@ Res TraceFixEmergency(ScanState ss, Ref *refIO)
/* traceScanSingleRefRes -- scan a single reference, with result code */
-static Res traceScanSingleRefRes(TraceSet ts, Rank rank, Arena arena,
+static Res traceScanSingleRefRes(TraceSet ts, Rank rank, Arena arena,
Seg seg, Ref *refIO)
{
RefSet summary;
@@ -1422,7 +1422,7 @@ void TraceStart(Trace trace, double mortality, double finishingTime)
if (TraceSetIsMember(SegGrey(seg), trace))
trace->foundation += size;
}
-
+
if ((SegPool(seg)->class->attr & AttrGC)
&& !TraceSetIsMember(SegWhite(seg), trace))
trace->notCondemned += size;
@@ -1464,7 +1464,7 @@ void TraceStart(Trace trace, double mortality, double finishingTime)
}
-/* traceWorkClock -- a measure of the work done for this trace
+/* traceWorkClock -- a measure of the work done for this trace
*
* .workclock: Segment scanning work is the regulator. */
@@ -1514,9 +1514,9 @@ void TracePoll(Globals globals)
Trace trace;
Res res;
Arena arena;
-
+
AVERT(Globals, globals);
- arena = GlobalsArena(globals);
+ arena = GlobalsArena(globals);
if (arena->busyTraces == TraceSetEMPTY) {
/* If no traces are going on, see if we need to start one. */
@@ -1616,18 +1616,18 @@ void ArenaRelease(Globals globals)
/* ArenaClamp -- finish all collections and clamp the arena */
-
+
void ArenaPark(Globals globals)
{
TraceId ti;
Trace trace;
Arena arena;
-
+
AVERT(Globals, globals);
- arena = GlobalsArena(globals);
-
+ arena = GlobalsArena(globals);
+
globals->clamped = TRUE;
-
+
while (arena->busyTraces != TraceSetEMPTY) {
/* Poll active traces to make progress. */
TRACE_SET_ITER(ti, trace, arena->busyTraces, arena)
diff --git a/mps/code/tract.c b/mps/code/tract.c
index ee7c7af54d9..546f68803b4 100644
--- a/mps/code/tract.c
+++ b/mps/code/tract.c
@@ -32,7 +32,7 @@ Bool TractCheck(Tract tract)
CHECKL(AddrIsAligned(TractBase(tract), ArenaAlign(TractArena(tract))));
if (TractHasSeg(tract)) {
CHECKL(TraceSetCheck(TractWhite(tract)));
- CHECKU(Seg, (Seg)TractP(tract));
+ CHECKU(Seg, (Seg)TractP(tract));
} else {
CHECKL(TractWhite(tract) == TraceSetEMPTY);
}
@@ -71,8 +71,8 @@ void TractFinish(Tract tract)
-/* .tract.critical: These tract functions are low-level and used
- * throughout. They are therefore on the critical path and their
+/* .tract.critical: These tract functions are low-level and used
+ * throughout. They are therefore on the critical path and their
* AVERs are so-marked.
*/
@@ -392,8 +392,8 @@ Index IndexOfAddr(Chunk chunk, Addr addr)
/* Page table functions */
-/* .tract.critical: These Tract functions are low-level and are on
- * the critical path in various ways. The more common therefore
+/* .tract.critical: These Tract functions are low-level and are on
+ * the critical path in various ways. The more common therefore
* use AVER_CRITICAL.
*/
@@ -410,7 +410,7 @@ Bool TractOfAddr(Tract *tractReturn, Arena arena, Addr addr)
Bool b;
Index i;
Chunk chunk;
-
+
/* design.mps.trace.fix.noaver */
AVER_CRITICAL(tractReturn != NULL); /* .tract.critical */
AVERT_CRITICAL(Arena, arena);
@@ -434,7 +434,7 @@ Bool TractOfAddr(Tract *tractReturn, Arena arena, Addr addr)
/* TractOfBaseAddr -- return a tract given a base address
- *
+ *
* The address must have been allocated to some pool.
*/
diff --git a/mps/code/tract.h b/mps/code/tract.h
index 64972b1621a..92db6991721 100644
--- a/mps/code/tract.h
+++ b/mps/code/tract.h
@@ -16,7 +16,7 @@
*
* .tract: Tracts represent the grains of memory allocation from
* the arena. See design.mps.arena.
- *
+ *
* .bool: The hasSeg field is a boolean, but can't be represented
* as type Bool. See design.mps.arena.tract.field.hasSeg.
*/
@@ -49,7 +49,7 @@ extern void TractFinish(Tract tract);
/* TRACT_*SEG -- Test / set / unset seg->tract associations
*
- * These macros all multiply evaluate the tract parameter
+ * These macros all multiply evaluate the tract parameter
*/
#define TRACT_SEG(segReturn, tract) \
@@ -71,7 +71,7 @@ extern void TractFinish(Tract tract);
* .page: The "pool" field must be the first field of the "tail"
* field of this union. See design.mps.arena.tract.field.pool.
*
- * .states: Pages (hence PageStructs that describe them) can be in
+ * .states: Pages (hence PageStructs that describe them) can be in
* one of 3 states:
* allocated (to a pool as tracts)
* allocated pages are mapped
diff --git a/mps/code/vman.c b/mps/code/vman.c
index be78c4b3626..1d3959d0aee 100644
--- a/mps/code/vman.c
+++ b/mps/code/vman.c
@@ -83,16 +83,16 @@ Res VMCreate(VM *vmReturn, Size size)
AVER(vm->limit < AddrAdd((Addr)vm->block, size));
memset((void *)vm->block, VMJunkBYTE, size);
-
+
/* Lie about the reserved address space, to simulate real */
/* virtual memory. */
vm->reserved = size - VMANPageALIGNMENT;
vm->mapped = (Size)0;
-
+
vm->sig = VMSig;
AVERT(VM, vm);
-
+
EVENT_PAA(VMCreate, vm, vm->base, vm->limit);
*vmReturn = vm;
return ResOK;
@@ -110,10 +110,10 @@ void VMDestroy(VM vm)
memset((void *)vm->base, VMJunkBYTE, AddrOffset(vm->base, vm->limit));
free(vm->block);
-
+
vm->sig = SigInvalid;
free(vm);
-
+
EVENT_P(VMDestroy, vm);
}
@@ -193,7 +193,7 @@ void VMUnmap(VM vm, Addr base, Addr limit)
AVER(limit <= vm->limit);
AVER(AddrIsAligned(base, VMANPageALIGNMENT));
AVER(AddrIsAligned(limit, VMANPageALIGNMENT));
-
+
size = AddrOffset(base, limit);
memset((void *)base, VM_JUNKBYTE, size);
diff --git a/mps/code/vmli.c b/mps/code/vmli.c
index ddba5914a42..b35fecc4251 100644
--- a/mps/code/vmli.c
+++ b/mps/code/vmli.c
@@ -3,8 +3,8 @@
* $Id$
* Copyright (c) 2001 Ravenbrook Limited.
*
- * .purpose: This is the implementation of the virtual memory mapping
- * interface (vm.h) for Linux. It was created by copying vmo1.c (the
+ * .purpose: This is the implementation of the virtual memory mapping
+ * interface (vm.h) for Linux. It was created by copying vmo1.c (the
* DIGITAL UNIX implementation) as that seemed to be closest.
*
* .design: See design.mps.vm. .design.linux: mmap(2) is used to
diff --git a/mps/code/w3almv.nmk b/mps/code/w3almv.nmk
index d3c7819ad9a..1ed1df7113c 100644
--- a/mps/code/w3almv.nmk
+++ b/mps/code/w3almv.nmk
@@ -46,7 +46,7 @@ TESTLIB =
# Source to object file mappings
# and CFLAGS amalgamation
-# %%VARIETY %%PART: Add new macros which expand to the files included
+# %%VARIETY %%PART: Add new macros which expand to the files included
# in the part for each variety
# %%VARIETY: Add a CFLAGS macro which expands to the flags that that variety
# should use when compiling C. And a LINKFLAGS macro which expands to the
diff --git a/mps/code/w3i3mv.nmk b/mps/code/w3i3mv.nmk
index a982ac6518c..cf71b4fef1b 100644
--- a/mps/code/w3i3mv.nmk
+++ b/mps/code/w3i3mv.nmk
@@ -51,7 +51,7 @@ TESTLIB =
# Source to object file mappings and CFLAGS amalgamation
-# %%VARIETY %%PART: Add new macros which expand to the files included
+# %%VARIETY %%PART: Add new macros which expand to the files included
# in the part for each variety
# %%VARIETY: Add a CFLAGS macro which expands to the flags that that variety
# should use when compiling C. And a LINKFLAGS macro which expands to the
diff --git a/mps/code/w3ppmv.nmk b/mps/code/w3ppmv.nmk
index ec800e2939a..6a36749b5db 100644
--- a/mps/code/w3ppmv.nmk
+++ b/mps/code/w3ppmv.nmk
@@ -43,7 +43,7 @@ TESTLIB =
# Source to object file mappings
# and CFLAGS amalgamation
-# %%VARIETY %%PART: Add new macros which expand to the files included
+# %%VARIETY %%PART: Add new macros which expand to the files included
# in the part for each variety
# %%VARIETY: Add a CFLAGS macro which expands to the flags that that variety
# should use when compiling C. And a LINKFLAGS macro which expands to the
diff --git a/mps/code/walk.c b/mps/code/walk.c
index ddca8f40e7f..13fabfe7525 100644
--- a/mps/code/walk.c
+++ b/mps/code/walk.c
@@ -46,7 +46,7 @@ static void ArenaFormattedObjectsStep(Addr object, Format format, Pool pool,
AVERT(FormattedObjectsStepClosure, c);
AVER(s == 0);
- (*c->f)((mps_addr_t)object, (mps_fmt_t)format, (mps_pool_t)pool,
+ (*c->f)((mps_addr_t)object, (mps_fmt_t)format, (mps_pool_t)pool,
c->p, c->s);
}
@@ -147,7 +147,7 @@ void mps_arena_formatted_objects_walk(mps_arena_t mps_arena,
* Defined as a subclass of ScanState. */
/* SIGnature Roots Step CLOsure */
-#define rootsStepClosureSig ((Sig)0x51965C10)
+#define rootsStepClosureSig ((Sig)0x51965C10)
typedef struct rootsStepClosureStruct *rootsStepClosure;
typedef struct rootsStepClosureStruct {
@@ -183,7 +183,7 @@ static Bool rootsStepClosureCheck(rootsStepClosure rsc)
*
* Initialize the parent ScanState too. */
-static void rootsStepClosureInit(rootsStepClosure rsc,
+static void rootsStepClosureInit(rootsStepClosure rsc,
Globals arena, Trace trace,
TraceFixMethod rootFix,
mps_roots_stepper_t f, void *p, Size s)
@@ -316,7 +316,7 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f,
return res;
/* Set the white set to universal so that the scanner */
/* doesn't filter out any references from roots into the arena. */
- trace->white = ZoneSetUNIV;
+ trace->white = ZoneSetUNIV;
/* Make the roots grey so that they are scanned */
res = RootsIterate(arenaGlobals, (RootIterateFn)RootGrey, (void *)trace);
/* Make this trace look like any other trace. */