diff --git a/mps/.travis.yml b/mps/.travis.yml index 07a0dd66528..bc1dfd5bfb1 100644 --- a/mps/.travis.yml +++ b/mps/.travis.yml @@ -9,3 +9,5 @@ notifications: email: - mps-travis@ravenbrook.com irc: "irc.freenode.net#memorypoolsystem" +script: + - ./configure --prefix=$PWD/prefix && make install && make test diff --git a/mps/Makefile.in b/mps/Makefile.in index e8899cce95c..1495a0fc313 100644 --- a/mps/Makefile.in +++ b/mps/Makefile.in @@ -13,7 +13,10 @@ INSTALL=@INSTALL@ INSTALL_DATA=@INSTALL_DATA@ INSTALL_PROGRAM=@INSTALL_PROGRAM@ MAKE=@MAKE@ -MPS_TARGET_NAME=@MPS_TARGET_NAME@ +MPS_OS_NAME=@MPS_OS_NAME@ +MPS_ARCH_NAME=@MPS_ARCH_NAME@ +MPS_BUILD_NAME=@MPS_BUILD_NAME@ +MPS_TARGET_NAME=$(MPS_OS_NAME)$(MPS_ARCH_NAME)$(MPS_BUILD_NAME) EXTRA_TARGETS=@EXTRA_TARGETS@ prefix=$(DESTDIR)@prefix@ TARGET_OPTS=-C code -f $(MPS_TARGET_NAME).gmk EXTRA_TARGETS="$(EXTRA_TARGETS)" @@ -31,15 +34,15 @@ install-make-build: make-install-dirs build-via-make $(INSTALL_DATA) code/mps*.h $(prefix)/include/ $(INSTALL_DATA) code/$(MPS_TARGET_NAME)/cool/mps.a $(prefix)/lib/libmps-debug.a $(INSTALL_DATA) code/$(MPS_TARGET_NAME)/hot/mps.a $(prefix)/lib/libmps.a - $(INSTALL_PROGRAM) $(addprefix code/$(MPS_TARGET_NAME)/hot/Release/,$(EXTRA_TARGETS)) $(prefix)/bin + $(INSTALL_PROGRAM) $(addprefix code/$(MPS_TARGET_NAME)/hot/,$(EXTRA_TARGETS)) $(prefix)/bin build-via-xcode: - $(XCODEBUILD) -config Release $(XCODEBUILD) -config Debug + $(XCODEBUILD) -config Release clean-xcode-build: - $(XCODEBUILD) -config Release clean $(XCODEBUILD) -config Debug clean + $(XCODEBUILD) -config Release clean install-xcode-build: make-install-dirs build-via-xcode $(INSTALL_DATA) code/mps*.h $(prefix)/include/ @@ -67,12 +70,13 @@ make-install-dirs: install: @INSTALL_TARGET@ -test-make-build: @BUILD_TARGET@ - $(MAKE) $(TARGET_OPTS) VARIETY=cool testrun - $(MAKE) $(TARGET_OPTS) VARIETY=hot testrun +test-make-build: + $(MAKE) $(TARGET_OPTS) testci + $(MAKE) -C code -f anan$(MPS_BUILD_NAME).gmk VARIETY=cool clean testansi + $(MAKE) -C code -f anan$(MPS_BUILD_NAME).gmk VARIETY=cool CFLAGS="-DCONFIG_POLL_NONE" clean testpoll test-xcode-build: - $(XCODEBUILD) -config Release -target testrun - $(XCODEBUILD) -config Debug -target testrun + $(XCODEBUILD) -config Debug -target testci + $(XCODEBUILD) -config Release -target testci test: @TEST_TARGET@ diff --git a/mps/code/abqtest.c b/mps/code/abqtest.c index 367bafe730b..9aad3351cb6 100644 --- a/mps/code/abqtest.c +++ b/mps/code/abqtest.c @@ -96,6 +96,7 @@ static Bool TestDeleteCallback(Bool *deleteReturn, void *element, { TestBlock *a = (TestBlock *)element; TestClosure cl = (TestClosure)closureP; + AVER(closureS == UNUSED_SIZE); UNUSED(closureS); if (*a == cl->b) { *deleteReturn = TRUE; @@ -144,7 +145,7 @@ static void step(void) cdie(b != NULL, "found to delete"); cl.b = b; cl.res = ResFAIL; - ABQIterate(&abq, TestDeleteCallback, &cl, 0); + ABQIterate(&abq, TestDeleteCallback, &cl, UNUSED_SIZE); cdie(cl.res == ResOK, "ABQIterate"); } } diff --git a/mps/code/amcss.c b/mps/code/amcss.c index 593bb6f7e5a..d91e819e0b7 100644 --- a/mps/code/amcss.c +++ b/mps/code/amcss.c @@ -289,6 +289,7 @@ static void test(mps_arena_t arena, mps_class_t pool_class, size_t roots_count) mps_pool_destroy(pool); mps_chain_destroy(chain); mps_fmt_destroy(format); + mps_arena_release(arena); } int main(int argc, char *argv[]) diff --git a/mps/code/amcsshe.c b/mps/code/amcsshe.c index 8b3a0c65e36..cd19b4219e6 100644 --- a/mps/code/amcsshe.c +++ b/mps/code/amcsshe.c @@ -225,6 +225,7 @@ static void *test(mps_arena_t arena, mps_class_t pool_class, size_t roots_count) } (void)mps_commit(busy_ap, busy_init, 64); + mps_arena_park(arena); mps_ap_destroy(busy_ap); mps_ap_destroy(ap); mps_root_destroy(exactRoot); @@ -233,6 +234,7 @@ static void *test(mps_arena_t arena, mps_class_t pool_class, size_t roots_count) mps_pool_destroy(pool); mps_chain_destroy(chain); mps_fmt_destroy(format); + mps_arena_release(arena); return NULL; } diff --git a/mps/code/amcssth.c b/mps/code/amcssth.c index 2f91edee93e..7c7bbf2ecb1 100644 --- a/mps/code/amcssth.c +++ b/mps/code/amcssth.c @@ -149,17 +149,6 @@ static void init(void) } -/* finish -- finish roots and chain */ - -static void finish(void) -{ - mps_root_destroy(exactRoot); - mps_root_destroy(ambigRoot); - mps_chain_destroy(chain); - mps_fmt_destroy(format); -} - - /* churn -- create an object and install into roots */ static void churn(mps_ap_t ap, size_t roots_count) @@ -218,7 +207,7 @@ static void *kid_thread(void *arg) /* test -- the body of the test */ -static void *test_pool(mps_class_t pool_class, size_t roots_count, int mode) +static void test_pool(mps_pool_t pool, size_t roots_count, int mode) { size_t i; mps_word_t collections, rampSwitch; @@ -226,14 +215,10 @@ static void *test_pool(mps_class_t pool_class, size_t roots_count, int mode) int ramping; mps_ap_t ap, busy_ap; mps_addr_t busy_init; - mps_pool_t pool; testthr_t kids[10]; closure_s cl; int walked = FALSE, ramped = FALSE; - die(mps_pool_create(&pool, arena, pool_class, format, chain), - "pool_create(amc)"); - cl.pool = pool; cl.roots_count = roots_count; @@ -252,7 +237,7 @@ static void *test_pool(mps_class_t pool_class, size_t roots_count, int mode) die(mps_ap_alloc_pattern_begin(busy_ap, ramp), "pattern begin (busy_ap)"); ramping = 1; while (collections < collectionsCOUNT) { - unsigned long c; + mps_word_t c; size_t r; c = mps_collections(arena); @@ -260,7 +245,7 @@ static void *test_pool(mps_class_t pool_class, size_t roots_count, int mode) if (collections != c) { collections = c; printf("\nCollection %lu started, %lu objects, committed=%lu.\n", - c, objs, (unsigned long)mps_arena_committed(arena)); + (unsigned long)c, objs, (unsigned long)mps_arena_committed(arena)); report(arena); for (i = 0; i < exactRootsCOUNT; ++i) @@ -323,16 +308,13 @@ static void *test_pool(mps_class_t pool_class, size_t roots_count, int mode) for (i = 0; i < sizeof(kids)/sizeof(kids[0]); ++i) testthr_join(&kids[i], NULL); - - mps_pool_destroy(pool); - - return NULL; } static void test_arena(int mode) { mps_thr_t thread; mps_root_t reg_root; + mps_pool_t amc_pool, amcz_pool; void *marker = ▮ die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), @@ -345,12 +327,23 @@ static void test_arena(int mode) die(mps_root_create_reg(®_root, arena, mps_rank_ambig(), 0, thread, mps_stack_scan_ambig, marker, 0), "root_create"); - test_pool(mps_class_amc(), exactRootsCOUNT, mode); - test_pool(mps_class_amcz(), 0, mode); + die(mps_pool_create(&amc_pool, arena, mps_class_amc(), format, chain), + "pool_create(amc)"); + die(mps_pool_create(&amcz_pool, arena, mps_class_amcz(), format, chain), + "pool_create(amcz)"); + test_pool(amc_pool, exactRootsCOUNT, mode); + test_pool(amcz_pool, 0, mode); + + mps_arena_park(arena); + mps_pool_destroy(amc_pool); + mps_pool_destroy(amcz_pool); mps_root_destroy(reg_root); mps_thread_dereg(thread); - finish(); + mps_root_destroy(exactRoot); + mps_root_destroy(ambigRoot); + mps_chain_destroy(chain); + mps_fmt_destroy(format); report(arena); mps_arena_destroy(arena); } diff --git a/mps/code/amsss.c b/mps/code/amsss.c index 988b8258175..ea6b17ea72f 100644 --- a/mps/code/amsss.c +++ b/mps/code/amsss.c @@ -106,7 +106,7 @@ static mps_addr_t make(void) /* test -- the actual stress test */ static mps_pool_debug_option_s freecheckOptions = - { NULL, 0, (const void *)"Dead", 4 }; + { NULL, 0, "Dead", 4 }; static void test_pool(mps_class_t pool_class, mps_arg_s args[], mps_bool_t haveAmbiguous) @@ -234,6 +234,7 @@ int main(int argc, char *argv[]) } MPS_ARGS_END(args); } + mps_arena_park(arena); mps_chain_destroy(chain); mps_fmt_destroy(format); mps_thread_dereg(thread); diff --git a/mps/code/amssshe.c b/mps/code/amssshe.c index ce5dd4800c8..206e7c29ffe 100644 --- a/mps/code/amssshe.c +++ b/mps/code/amssshe.c @@ -139,6 +139,7 @@ static void *test(void *arg, size_t s) } (void)mps_commit(busy_ap, busy_init, 64); + mps_arena_park(arena); mps_ap_destroy(busy_ap); mps_ap_destroy(ap); mps_root_destroy(exactRoot); @@ -146,6 +147,7 @@ static void *test(void *arg, size_t s) mps_pool_destroy(pool); mps_chain_destroy(chain); mps_fmt_destroy(format); + mps_arena_release(arena); return NULL; } diff --git a/mps/code/anangc.gmk b/mps/code/anangc.gmk new file mode 100644 index 00000000000..f0a7d2ff515 --- /dev/null +++ b/mps/code/anangc.gmk @@ -0,0 +1,66 @@ +# -*- makefile -*- +# +# anangc.gmk: BUILD FOR ANSI/ANSI/GCC PLATFORM +# +# $Id$ +# Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license. + +PFM = anangc + +MPMPF = \ + lockan.c \ + prmcan.c \ + protan.c \ + span.c \ + ssan.c \ + than.c \ + vman.c + +LIBS = -lm -lpthread + +include gc.gmk + +CFLAGSCOMPILER += -DCONFIG_PF_ANSI -DCONFIG_THREAD_SINGLE + +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2014 Ravenbrook Limited . +# All rights reserved. This is an open source license. Contact +# Ravenbrook for commercial licensing options. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Redistributions in any form must be accompanied by information on how +# to obtain complete source code for this software and any accompanying +# software that uses this software. The source code must either be +# included in the distribution or be available for no more than the cost +# of distribution plus a nominal fee, and must be freely redistributable +# under reasonable conditions. For an executable file, complete source +# code means the source code for all modules it contains. It does not +# include source code for modules or files that typically accompany the +# major components of the operating system on which the executable file +# runs. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/code/ananll.gmk b/mps/code/ananll.gmk new file mode 100644 index 00000000000..cc95645f212 --- /dev/null +++ b/mps/code/ananll.gmk @@ -0,0 +1,66 @@ +# -*- makefile -*- +# +# ananll.gmk: BUILD FOR ANSI/ANSI/Clang PLATFORM +# +# $Id$ +# Copyright (c) 2014 Ravenbrook Limited. See end of file for license. + +PFM = ananll + +MPMPF = \ + lockan.c \ + prmcan.c \ + protan.c \ + span.c \ + ssan.c \ + than.c \ + vman.c + +LIBS = -lm -lpthread + +include ll.gmk + +CFLAGSCOMPILER += -DCONFIG_PF_ANSI -DCONFIG_THREAD_SINGLE + +include comm.gmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2014 Ravenbrook Limited . +# All rights reserved. This is an open source license. Contact +# Ravenbrook for commercial licensing options. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Redistributions in any form must be accompanied by information on how +# to obtain complete source code for this software and any accompanying +# software that uses this software. The source code must either be +# included in the distribution or be available for no more than the cost +# of distribution plus a nominal fee, and must be freely redistributable +# under reasonable conditions. For an executable file, complete source +# code means the source code for all modules it contains. It does not +# include source code for modules or files that typically accompany the +# major components of the operating system on which the executable file +# runs. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/code/ananmv.nmk b/mps/code/ananmv.nmk new file mode 100644 index 00000000000..ea68b19ee68 --- /dev/null +++ b/mps/code/ananmv.nmk @@ -0,0 +1,129 @@ +# ananmv.nmk: ANSI/ANSI/MICROSOFT VISUAL C/C++ NMAKE FILE -*- makefile -*- +# +# $Id$ +# Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license. + +PFM = ananmv + +PFMDEFS = /DCONFIG_PF_ANSI /DCONFIG_THREAD_SINGLE + +# MPM platform-specific sources. +MPMPF = \ + \ + \ + \ + \ + \ + \ + + +!INCLUDE commpre.nmk +!INCLUDE mv.nmk + + +# Source to object file mappings and CFLAGS amalgamation +# +# %%VARIETY %%PART: When adding a new variety or part, add new macros which +# expand to the files included in the part for each variety +# +# %%VARIETY: When adding a new variety, add a CFLAGS macro which expands to +# the flags that that variety should use when compiling C. And a LINKFLAGS +# macro which expands to the flags that the variety should use when building +# executables. And a LIBFLAGS macro which expands to the flags that the +# variety should use when building libraries + +!IF "$(VARIETY)" == "hot" +CFLAGS=$(CFLAGSCOMMONPRE) $(CFHOT) $(CFLAGSCOMMONPOST) +CFLAGSSQL=$(CFLAGSSQLPRE) $(CFHOT) $(CFLAGSSQLPOST) +LINKFLAGS=$(LINKFLAGSCOMMON) $(LFHOT) +LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSHOT) +MPMOBJ0 = $(MPM:<=ananmv\hot\) +FMTDYOBJ0 = $(FMTDY:<=ananmv\hot\) +FMTTESTOBJ0 = $(FMTTEST:<=ananmv\hot\) +FMTSCHEMEOBJ0 = $(FMTSCHEME:<=ananmv\hot\) +POOLNOBJ0 = $(POOLN:<=ananmv\hot\) +TESTLIBOBJ0 = $(TESTLIB:<=ananmv\hot\) +TESTTHROBJ0 = $(TESTTHR:<=ananmv\hot\) + +!ELSEIF "$(VARIETY)" == "cool" +CFLAGS=$(CFLAGSCOMMONPRE) $(CFCOOL) $(CFLAGSCOMMONPOST) +CFLAGSSQL=$(CFLAGSSQLPRE) $(CFCOOL) $(CFLAGSSQLPOST) +LINKFLAGS=$(LINKFLAGSCOMMON) $(LFCOOL) +LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSCOOL) +MPMOBJ0 = $(MPM:<=ananmv\cool\) +FMTDYOBJ0 = $(FMTDY:<=ananmv\cool\) +FMTTESTOBJ0 = $(FMTTEST:<=ananmv\cool\) +FMTSCHEMEOBJ0 = $(FMTSCHEME:<=ananmv\cool\) +POOLNOBJ0 = $(POOLN:<=ananmv\cool\) +TESTLIBOBJ0 = $(TESTLIB:<=ananmv\cool\) +TESTTHROBJ0 = $(TESTTHR:<=ananmv\cool\) + +!ELSEIF "$(VARIETY)" == "rash" +CFLAGS=$(CFLAGSCOMMONPRE) $(CFRASH) $(CFLAGSCOMMONPOST) +CFLAGSSQL=$(CFLAGSSQLPRE) $(CFRASH) $(CFLAGSSQLPOST) +LINKFLAGS=$(LINKFLAGSCOMMON) $(LFRASH) +LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSRASH) +MPMOBJ0 = $(MPM:<=ananmv\rash\) +FMTDYOBJ0 = $(FMTDY:<=ananmv\rash\) +FMTTESTOBJ0 = $(FMTTEST:<=ananmv\rash\) +FMTSCHEMEOBJ0 = $(FMTSCHEME:<=ananmv\rash\) +POOLNOBJ0 = $(POOLN:<=ananmv\rash\) +TESTLIBOBJ0 = $(TESTLIB:<=ananmv\rash\) +TESTTHROBJ0 = $(TESTTHR:<=ananmv\rash\) + +!ENDIF + +# %%PART: When adding a new part, add new macros which expand to the object +# files included in the part + +MPMOBJ = $(MPMOBJ0:>=.obj) +FMTDYOBJ = $(FMTDYOBJ0:>=.obj) +FMTTESTOBJ = $(FMTTESTOBJ0:>=.obj) +FMTSCHEMEOBJ = $(FMTSCHEMEOBJ0:>=.obj) +POOLNOBJ = $(POOLNOBJ0:>=.obj) +TESTLIBOBJ = $(TESTLIBOBJ0:>=.obj) +TESTTHROBJ = $(TESTTHROBJ0:>=.obj) + + +!INCLUDE commpost.nmk + + +# C. COPYRIGHT AND LICENSE +# +# Copyright (C) 2001-2014 Ravenbrook Limited . +# All rights reserved. This is an open source license. Contact +# Ravenbrook for commercial licensing options. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Redistributions in any form must be accompanied by information on how +# to obtain complete source code for this software and any accompanying +# software that uses this software. The source code must either be +# included in the distribution or be available for no more than the cost +# of distribution plus a nominal fee, and must be freely redistributable +# under reasonable conditions. For an executable file, complete source +# code means the source code for all modules it contains. It does not +# include source code for modules or files that typically accompany the +# major components of the operating system on which the executable file +# runs. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mps/code/apss.c b/mps/code/apss.c index 8c90536fa00..55717dbfeeb 100644 --- a/mps/code/apss.c +++ b/mps/code/apss.c @@ -43,28 +43,25 @@ static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size) /* stress -- create a pool of the requested type and allocate in it */ -static mps_res_t stress(mps_class_t class, size_t (*size)(size_t i), - mps_arena_t arena, ...) +static mps_res_t stress(mps_arena_t arena, mps_align_t align, + size_t (*size)(size_t i, mps_align_t align), + const char *name, mps_class_t class, mps_arg_s args[]) { mps_res_t res = MPS_RES_OK; mps_pool_t pool; mps_ap_t ap; - va_list arg; size_t i, k; int *ps[testSetSIZE]; size_t ss[testSetSIZE]; - va_start(arg, arena); - res = mps_pool_create_v(&pool, arena, class, arg); - va_end(arg); - if (res != MPS_RES_OK) - return res; + printf("stress %s\n", name); + die(mps_pool_create_k(&pool, arena, class, args), "pool_create"); die(mps_ap_create(&ap, pool, mps_rank_exact()), "BufferCreate"); /* allocate a load of objects */ for (i=0; i> (i / 10)), 2) + 1, MPS_PF_ALIGN); + return alignUp(rnd() % max((maxSize >> (i / 10)), 2) + 1, align); } static mps_pool_debug_option_s bothOptions = { - /* .fence_template = */ (const void *)"postpostpostpost", - /* .fence_size = */ MPS_PF_ALIGN, - /* .free_template = */ (const void *)"DEAD", + /* .fence_template = */ "post", + /* .fence_size = */ 4, + /* .free_template = */ "DEAD", /* .free_size = */ 4 }; static mps_pool_debug_option_s fenceOptions = { - /* .fence_template = */ (const void *)"\0XXX ''\"\"'' XXX\0", - /* .fence_size = */ 16, + /* .fence_template = */ "123456789abcdef", + /* .fence_size = */ 15, /* .free_template = */ NULL, /* .free_size = */ 0 }; + /* testInArena -- test all the pool classes in the given arena */ static void testInArena(mps_arena_t arena, mps_pool_debug_option_s *options) { - mps_res_t res; + MPS_ARGS_BEGIN(args) { + mps_align_t align = sizeof(void *) << (rnd() % 4); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE); + die(stress(arena, align, randomSizeAligned, "MVFF", mps_class_mvff(), args), + "stress MVFF"); + } MPS_ARGS_END(args); /* IWBN to test MVFFDebug, but the MPS doesn't support debugging APs, */ /* yet (MV Debug works here, because it fakes it through PoolAlloc). */ - printf("MVFF\n"); - res = stress(mps_class_mvff(), randomSizeAligned, arena, - (size_t)65536, (size_t)32, (mps_align_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE); - if (res == MPS_RES_COMMIT_LIMIT) return; - die(res, "stress MVFF"); - printf("MV debug\n"); - res = stress(mps_class_mv_debug(), randomSizeAligned, arena, - options, (size_t)65536, (size_t)32, (size_t)65536); - if (res == MPS_RES_COMMIT_LIMIT) return; - die(res, "stress MV debug"); + MPS_ARGS_BEGIN(args) { + mps_align_t align = (mps_align_t)1 << (rnd() % 6); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + die(stress(arena, align, randomSizeAligned, "MV", mps_class_mv(), args), + "stress MV"); + } MPS_ARGS_END(args); - printf("MV\n"); - res = stress(mps_class_mv(), randomSizeAligned, arena, - (size_t)65536, (size_t)32, (size_t)65536); - if (res == MPS_RES_COMMIT_LIMIT) return; - die(res, "stress MV"); + MPS_ARGS_BEGIN(args) { + mps_align_t align = (mps_align_t)1 << (rnd() % 6); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, options); + die(stress(arena, align, randomSizeAligned, "MV debug", + mps_class_mv_debug(), args), + "stress MV debug"); + } MPS_ARGS_END(args); - printf("MVT\n"); - res = stress(mps_class_mvt(), randomSizeAligned, arena, - (size_t)8, (size_t)32, (size_t)65536, (mps_word_t)4, - (mps_word_t)50); - if (res == MPS_RES_COMMIT_LIMIT) return; - die(res, "stress MVT"); + MPS_ARGS_BEGIN(args) { + mps_align_t align = sizeof(void *) << (rnd() % 4); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + die(stress(arena, align, randomSizeAligned, "MVT", mps_class_mvt(), args), + "stress MVT"); + } MPS_ARGS_END(args); } diff --git a/mps/code/arena.c b/mps/code/arena.c index fa70084f194..424d00cebae 100644 --- a/mps/code/arena.c +++ b/mps/code/arena.c @@ -19,7 +19,7 @@ SRCID(arena, "$Id$"); #define ArenaControlPool(arena) MV2Pool(&(arena)->controlPoolStruct) #define ArenaCBSBlockPool(arena) (&(arena)->freeCBSBlockPoolStruct.poolStruct) -#define ArenaFreeCBS(arena) (&(arena)->freeCBSStruct) +#define ArenaFreeLand(arena) (&(arena)->freeLandStruct.landStruct) /* Forward declarations */ @@ -153,9 +153,9 @@ Bool ArenaCheck(Arena arena) CHECKL(LocusCheck(arena)); - CHECKL(BoolCheck(arena->hasFreeCBS)); - if (arena->hasFreeCBS) - CHECKD(CBS, ArenaFreeCBS(arena)); + CHECKL(BoolCheck(arena->hasFreeLand)); + if (arena->hasFreeLand) + CHECKD(Land, ArenaFreeLand(arena)); CHECKL(BoolCheck(arena->zoned)); @@ -200,7 +200,7 @@ Res ArenaInit(Arena arena, ArenaClass class, Align alignment, ArgList args) arena->poolReady = FALSE; /* */ arena->lastTract = NULL; arena->lastTractBase = NULL; - arena->hasFreeCBS = FALSE; + arena->hasFreeLand = FALSE; arena->freeZones = ZoneSetUNIV; arena->zoned = zoned; @@ -216,14 +216,15 @@ Res ArenaInit(Arena arena, ArenaClass class, Align alignment, ArgList args) goto failGlobalsInit; arena->sig = ArenaSig; + AVERT(Arena, arena); /* Initialise a pool to hold the arena's CBS blocks. This pool can't be allowed to extend itself using ArenaAlloc because it is used during ArenaAlloc, so MFSExtendSelf is set to FALSE. Failures to extend are - handled where the CBS is used. */ + handled where the Land is used. */ MPS_ARGS_BEGIN(piArgs) { - MPS_ARGS_ADD(piArgs, MPS_KEY_MFS_UNIT_SIZE, sizeof(CBSBlockStruct)); + MPS_ARGS_ADD(piArgs, MPS_KEY_MFS_UNIT_SIZE, sizeof(CBSZonedBlockStruct)); MPS_ARGS_ADD(piArgs, MPS_KEY_EXTEND_BY, arena->alignment); MPS_ARGS_ADD(piArgs, MFSExtendSelf, FALSE); res = PoolInit(ArenaCBSBlockPool(arena), arena, PoolClassMFS(), piArgs); @@ -232,17 +233,17 @@ Res ArenaInit(Arena arena, ArenaClass class, Align alignment, ArgList args) if (res != ResOK) goto failMFSInit; - /* Initialise the freeCBS. */ - MPS_ARGS_BEGIN(cbsiArgs) { - MPS_ARGS_ADD(cbsiArgs, CBSBlockPool, ArenaCBSBlockPool(arena)); - res = CBSInit(ArenaFreeCBS(arena), arena, arena, alignment, - /* fastFind */ TRUE, arena->zoned, cbsiArgs); - } MPS_ARGS_END(cbsiArgs); + /* Initialise the freeLand. */ + MPS_ARGS_BEGIN(liArgs) { + MPS_ARGS_ADD(liArgs, CBSBlockPool, ArenaCBSBlockPool(arena)); + res = LandInit(ArenaFreeLand(arena), CBSZonedLandClassGet(), arena, + alignment, arena, liArgs); + } MPS_ARGS_END(liArgs); AVER(res == ResOK); /* no allocation, no failure expected */ if (res != ResOK) - goto failCBSInit; - /* Note that although freeCBS is initialised, it doesn't have any memory - for its blocks, so hasFreeCBS remains FALSE until later. */ + goto failLandInit; + /* Note that although freeLand is initialised, it doesn't have any memory + for its blocks, so hasFreeLand remains FALSE until later. */ /* initialize the reservoir, */ res = ReservoirInit(&arena->reservoirStruct, arena); @@ -253,8 +254,8 @@ Res ArenaInit(Arena arena, ArenaClass class, Align alignment, ArgList args) return ResOK; failReservoirInit: - CBSFinish(ArenaFreeCBS(arena)); -failCBSInit: + LandFinish(ArenaFreeLand(arena)); +failLandInit: PoolFinish(ArenaCBSBlockPool(arena)); failMFSInit: GlobalsFinish(ArenaGlobals(arena)); @@ -304,15 +305,15 @@ Res ArenaCreate(Arena *arenaReturn, ArenaClass class, ArgList args) goto failStripeSize; } - /* With the primary chunk initialised we can add page memory to the freeCBS + /* With the primary chunk initialised we can add page memory to the freeLand that describes the free address space in the primary chunk. */ - arena->hasFreeCBS = TRUE; - res = ArenaFreeCBSInsert(arena, - PageIndexBase(arena->primary, - arena->primary->allocBase), - arena->primary->limit); + arena->hasFreeLand = TRUE; + res = ArenaFreeLandInsert(arena, + PageIndexBase(arena->primary, + arena->primary->allocBase), + arena->primary->limit); if (res != ResOK) - goto failPrimaryCBS; + goto failPrimaryLand; res = ControlInit(arena); if (res != ResOK) @@ -329,7 +330,7 @@ Res ArenaCreate(Arena *arenaReturn, ArenaClass class, ArgList args) failGlobalsCompleteCreate: ControlFinish(arena); failControlInit: -failPrimaryCBS: +failPrimaryLand: failStripeSize: (*class->finish)(arena); failInit: @@ -359,6 +360,8 @@ static void arenaMFSPageFreeVisitor(Pool pool, Addr base, Size size, void *closureP, Size closureS) { AVERT(Pool, pool); + AVER(closureP == UNUSED_POINTER); + AVER(closureS == UNUSED_SIZE); UNUSED(closureP); UNUSED(closureS); UNUSED(size); @@ -378,16 +381,16 @@ void ArenaDestroy(Arena arena) arena->poolReady = FALSE; ControlFinish(arena); - /* We must tear down the freeCBS before the chunks, because pages + /* We must tear down the freeLand before the chunks, because pages containing CBS blocks might be allocated in those chunks. */ - AVER(arena->hasFreeCBS); - arena->hasFreeCBS = FALSE; - CBSFinish(ArenaFreeCBS(arena)); + AVER(arena->hasFreeLand); + arena->hasFreeLand = FALSE; + LandFinish(ArenaFreeLand(arena)); /* The CBS block pool can't free its own memory via ArenaFree because - that would use the ZonedCBS. */ - MFSFinishTracts(ArenaCBSBlockPool(arena), - arenaMFSPageFreeVisitor, NULL, 0); + that would use the freeLand. */ + MFSFinishTracts(ArenaCBSBlockPool(arena), arenaMFSPageFreeVisitor, + UNUSED_POINTER, UNUSED_SIZE); PoolFinish(ArenaCBSBlockPool(arena)); /* Call class-specific finishing. This will call ArenaFinish. */ @@ -600,9 +603,10 @@ Res ControlDescribe(Arena arena, mps_lib_FILE *stream, Count depth) /* arenaAllocPage -- allocate one page from the arena * - * This is a primitive allocator used to allocate pages for the arena CBS. - * It is called rarely and can use a simple search. It may not use the - * CBS or any pool, because it is used as part of the bootstrap. + * This is a primitive allocator used to allocate pages for the arena + * Land. It is called rarely and can use a simple search. It may not + * use the Land or any pool, because it is used as part of the + * bootstrap. */ static Res arenaAllocPageInChunk(Addr *baseReturn, Chunk chunk, Pool pool) @@ -684,7 +688,7 @@ static Res arenaExtendCBSBlockPool(Range pageRangeReturn, Arena arena) return ResOK; } -/* arenaExcludePage -- exclude CBS block pool's page from CBSs +/* arenaExcludePage -- exclude CBS block pool's page from free land * * Exclude the page we specially allocated for the CBS block pool * so that it doesn't get reallocated. @@ -695,20 +699,20 @@ static void arenaExcludePage(Arena arena, Range pageRange) RangeStruct oldRange; Res res; - res = CBSDelete(&oldRange, ArenaFreeCBS(arena), pageRange); - AVER(res == ResOK); /* we just gave memory to the CBSs */ + res = LandDelete(&oldRange, ArenaFreeLand(arena), pageRange); + AVER(res == ResOK); /* we just gave memory to the Land */ } -/* arenaCBSInsert -- add a block to an arena CBS, extending pool if necessary +/* arenaLandInsert -- add range to arena's land, maybe extending block pool * - * The arena's CBSs can't get memory in the usual way because they are used - * in the basic allocator, so we allocate pages specially. + * The arena's land can't get memory in the usual way because it is + * used in the basic allocator, so we allocate pages specially. * * Only fails if it can't get a page for the block pool. */ -static Res arenaCBSInsert(Range rangeReturn, Arena arena, Range range) +static Res arenaLandInsert(Range rangeReturn, Arena arena, Range range) { Res res; @@ -716,17 +720,17 @@ static Res arenaCBSInsert(Range rangeReturn, Arena arena, Range range) AVERT(Arena, arena); AVERT(Range, range); - res = CBSInsert(rangeReturn, ArenaFreeCBS(arena), range); + res = LandInsert(rangeReturn, ArenaFreeLand(arena), range); - if (res == ResLIMIT) { /* freeCBS MFS pool ran out of blocks */ + if (res == ResLIMIT) { /* CBS block pool ran out of blocks */ RangeStruct pageRange; res = arenaExtendCBSBlockPool(&pageRange, arena); if (res != ResOK) return res; /* .insert.exclude: Must insert before exclude so that we can bootstrap when the zoned CBS is empty. */ - res = CBSInsert(rangeReturn, ArenaFreeCBS(arena), range); - AVER(res == ResOK); /* we just gave memory to the CBSs */ + res = LandInsert(rangeReturn, ArenaFreeLand(arena), range); + AVER(res == ResOK); /* we just gave memory to the CBS block pool */ arenaExcludePage(arena, &pageRange); } @@ -734,16 +738,16 @@ static Res arenaCBSInsert(Range rangeReturn, Arena arena, Range range) } -/* ArenaFreeCBSInsert -- add a block to arena CBS, maybe stealing memory +/* ArenaFreeLandInsert -- add range to arena's land, maybe stealing memory * - * See arenaCBSInsert. This function may only be applied to mapped pages - * and may steal them to store CBS nodes if it's unable to allocate - * space for CBS nodes. + * See arenaLandInsert. This function may only be applied to mapped + * pages and may steal them to store Land nodes if it's unable to + * allocate space for CBS blocks. * * IMPORTANT: May update rangeIO. */ -static void arenaCBSInsertSteal(Range rangeReturn, Arena arena, Range rangeIO) +static void arenaLandInsertSteal(Range rangeReturn, Arena arena, Range rangeIO) { Res res; @@ -751,7 +755,7 @@ static void arenaCBSInsertSteal(Range rangeReturn, Arena arena, Range rangeIO) AVERT(Arena, arena); AVERT(Range, rangeIO); - res = arenaCBSInsert(rangeReturn, arena, rangeIO); + res = arenaLandInsert(rangeReturn, arena, rangeIO); if (res != ResOK) { Addr pageBase; @@ -772,22 +776,22 @@ static void arenaCBSInsertSteal(Range rangeReturn, Arena arena, Range rangeIO) MFSExtend(ArenaCBSBlockPool(arena), pageBase, ArenaAlign(arena)); /* Try again. */ - res = CBSInsert(rangeReturn, ArenaFreeCBS(arena), rangeIO); - AVER(res == ResOK); /* we just gave memory to the CBS */ + res = LandInsert(rangeReturn, ArenaFreeLand(arena), rangeIO); + AVER(res == ResOK); /* we just gave memory to the CBS block pool */ } - AVER(res == ResOK); /* not expecting other kinds of error from the CBS */ + AVER(res == ResOK); /* not expecting other kinds of error from the Land */ } -/* ArenaFreeCBSInsert -- add block to free CBS, extending pool if necessary +/* ArenaFreeLandInsert -- add range to arena's land, maybe extending block pool * * The inserted block of address space may not abut any existing block. * This restriction ensures that we don't coalesce chunks and allocate * object across the boundary, preventing chunk deletion. */ -Res ArenaFreeCBSInsert(Arena arena, Addr base, Addr limit) +Res ArenaFreeLandInsert(Arena arena, Addr base, Addr limit) { RangeStruct range, oldRange; Res res; @@ -795,7 +799,7 @@ Res ArenaFreeCBSInsert(Arena arena, Addr base, Addr limit) AVERT(Arena, arena); RangeInit(&range, base, limit); - res = arenaCBSInsert(&oldRange, arena, &range); + res = arenaLandInsert(&oldRange, arena, &range); if (res != ResOK) return res; @@ -808,7 +812,7 @@ Res ArenaFreeCBSInsert(Arena arena, Addr base, Addr limit) } -/* ArenaFreeCBSDelete -- remove a block from free CBS, extending pool if necessary +/* ArenaFreeLandDelete -- remove range from arena's land, maybe extending block pool * * This is called from ChunkFinish in order to remove address space from * the arena. @@ -819,13 +823,13 @@ Res ArenaFreeCBSInsert(Arena arena, Addr base, Addr limit) * so we can't test that path. */ -void ArenaFreeCBSDelete(Arena arena, Addr base, Addr limit) +void ArenaFreeLandDelete(Arena arena, Addr base, Addr limit) { RangeStruct range, oldRange; Res res; RangeInit(&range, base, limit); - res = CBSDelete(&oldRange, ArenaFreeCBS(arena), &range); + res = LandDelete(&oldRange, ArenaFreeLand(arena), &range); /* Shouldn't be any other kind of failure because we were only deleting a non-coalesced block. See .chunk.no-coalesce and @@ -834,13 +838,13 @@ void ArenaFreeCBSDelete(Arena arena, Addr base, Addr limit) } -static Res arenaAllocFromCBS(Tract *tractReturn, ZoneSet zones, Bool high, +static Res arenaAllocFromLand(Tract *tractReturn, ZoneSet zones, Bool high, Size size, Pool pool) { Arena arena; RangeStruct range, oldRange; Chunk chunk; - Bool b; + Bool found, b; Index baseIndex; Count pages; Res res; @@ -857,8 +861,8 @@ static Res arenaAllocFromCBS(Tract *tractReturn, ZoneSet zones, Bool high, /* Step 1. Find a range of address space. */ - res = CBSFindInZones(&range, &oldRange, ArenaFreeCBS(arena), - size, zones, high); + res = LandFindInZones(&found, &range, &oldRange, ArenaFreeLand(arena), + size, zones, high); if (res == ResLIMIT) { /* found block, but couldn't store info */ RangeStruct pageRange; @@ -866,17 +870,17 @@ static Res arenaAllocFromCBS(Tract *tractReturn, ZoneSet zones, Bool high, if (res != ResOK) /* disastrously short on memory */ return res; arenaExcludePage(arena, &pageRange); - res = CBSFindInZones(&range, &oldRange, ArenaFreeCBS(arena), - size, zones, high); + res = LandFindInZones(&found, &range, &oldRange, ArenaFreeLand(arena), + size, zones, high); AVER(res != ResLIMIT); } - if (res == ResFAIL) /* out of address space */ - return ResRESOURCE; - AVER(res == ResOK); /* unexpected error from ZoneCBS */ if (res != ResOK) /* defensive return */ return res; + + if (!found) /* out of address space */ + return ResRESOURCE; /* Step 2. Make memory available in the address space range. */ @@ -900,7 +904,7 @@ static Res arenaAllocFromCBS(Tract *tractReturn, ZoneSet zones, Bool high, failMark: { - Res insertRes = arenaCBSInsert(&oldRange, arena, &range); + Res insertRes = arenaLandInsert(&oldRange, arena, &range); AVER(insertRes == ResOK); /* We only just deleted it. */ /* If the insert does fail, we lose some address space permanently. */ } @@ -941,10 +945,10 @@ static Res arenaAllocPolicy(Tract *tractReturn, Arena arena, SegPref pref, } } - /* Plan A: allocate from the free CBS in the requested zones */ + /* Plan A: allocate from the free Land in the requested zones */ zones = ZoneSetDiff(pref->zones, pref->avoid); if (zones != ZoneSetEMPTY) { - res = arenaAllocFromCBS(&tract, zones, pref->high, size, pool); + res = arenaAllocFromLand(&tract, zones, pref->high, size, pool); if (res == ResOK) goto found; } @@ -956,7 +960,7 @@ static Res arenaAllocPolicy(Tract *tractReturn, Arena arena, SegPref pref, See also job003384. */ moreZones = ZoneSetUnion(pref->zones, ZoneSetDiff(arena->freeZones, pref->avoid)); if (moreZones != zones) { - res = arenaAllocFromCBS(&tract, moreZones, pref->high, size, pool); + res = arenaAllocFromLand(&tract, moreZones, pref->high, size, pool); if (res == ResOK) goto found; } @@ -967,13 +971,13 @@ static Res arenaAllocPolicy(Tract *tractReturn, Arena arena, SegPref pref, if (res != ResOK) return res; if (zones != ZoneSetEMPTY) { - res = arenaAllocFromCBS(&tract, zones, pref->high, size, pool); + res = arenaAllocFromLand(&tract, zones, pref->high, size, pool); if (res == ResOK) goto found; } if (moreZones != zones) { zones = ZoneSetUnion(zones, ZoneSetDiff(arena->freeZones, pref->avoid)); - res = arenaAllocFromCBS(&tract, moreZones, pref->high, size, pool); + res = arenaAllocFromLand(&tract, moreZones, pref->high, size, pool); if (res == ResOK) goto found; } @@ -985,7 +989,7 @@ static Res arenaAllocPolicy(Tract *tractReturn, Arena arena, SegPref pref, /* TODO: log an event for this */ evenMoreZones = ZoneSetDiff(ZoneSetUNIV, pref->avoid); if (evenMoreZones != moreZones) { - res = arenaAllocFromCBS(&tract, evenMoreZones, pref->high, size, pool); + res = arenaAllocFromLand(&tract, evenMoreZones, pref->high, size, pool); if (res == ResOK) goto found; } @@ -994,7 +998,7 @@ static Res arenaAllocPolicy(Tract *tractReturn, Arena arena, SegPref pref, common ambiguous bit patterns pin them down, causing the zone check to give even more false positives permanently, and possibly retaining garbage indefinitely. */ - res = arenaAllocFromCBS(&tract, ZoneSetUNIV, pref->high, size, pool); + res = arenaAllocFromLand(&tract, ZoneSetUNIV, pref->high, size, pool); if (res == ResOK) goto found; @@ -1112,7 +1116,7 @@ void ArenaFree(Addr base, Size size, Pool pool) RangeInit(&range, base, limit); - arenaCBSInsertSteal(&oldRange, arena, &range); /* may update range */ + arenaLandInsertSteal(&oldRange, arena, &range); /* may update range */ (*arena->class->free)(RangeBase(&range), RangeSize(&range), pool); diff --git a/mps/code/arg.c b/mps/code/arg.c index 407487b4060..784a7e57f30 100644 --- a/mps/code/arg.c +++ b/mps/code/arg.c @@ -159,7 +159,7 @@ Bool ArgPick(ArgStruct *argOut, ArgList args, Key key) { return FALSE; found: - AVER(key->check(&args[i])); + AVERT(Arg, &args[i]); *argOut = args[i]; for(;;) { args[i] = args[i + 1]; diff --git a/mps/code/boot.c b/mps/code/boot.c index eff8409fc1e..af365d3b4f1 100644 --- a/mps/code/boot.c +++ b/mps/code/boot.c @@ -30,7 +30,7 @@ Bool BootBlockCheck(BootBlock boot) CHECKL(boot->limit != NULL); CHECKL(boot->base <= boot->alloc); CHECKL(boot->alloc <= boot->limit); - CHECKL(boot->alloc < boot->limit); + CHECKL(boot->base < boot->limit); return TRUE; } diff --git a/mps/code/buffer.c b/mps/code/buffer.c index 53fc1c5dfd6..0198e7d461a 100644 --- a/mps/code/buffer.c +++ b/mps/code/buffer.c @@ -199,8 +199,6 @@ static Res BufferInit(Buffer buffer, BufferClass class, AVER(buffer != NULL); AVERT(BufferClass, class); AVERT(Pool, pool); - /* The PoolClass should support buffer protocols */ - AVER(PoolHasAttr(pool, AttrBUF)); arena = PoolArena(pool); /* Initialize the buffer. See for a definition of */ @@ -377,8 +375,6 @@ void BufferFinish(Buffer buffer) pool = BufferPool(buffer); - /* The PoolClass should support buffer protocols */ - AVER(PoolHasAttr(pool, AttrBUF)); AVER(BufferIsReady(buffer)); /* */ diff --git a/mps/code/cbs.c b/mps/code/cbs.c index 9c74cf0f890..66101f4801a 100644 --- a/mps/code/cbs.c +++ b/mps/code/cbs.c @@ -26,55 +26,34 @@ SRCID(cbs, "$Id$"); #define CBSBlockSize(block) AddrOffset((block)->base, (block)->limit) +#define cbsLand(cbs) (&((cbs)->landStruct)) +#define cbsOfLand(land) PARENT(CBSStruct, landStruct, land) #define cbsSplay(cbs) (&((cbs)->splayTreeStruct)) #define cbsOfSplay(_splay) PARENT(CBSStruct, splayTreeStruct, _splay) #define cbsBlockTree(block) (&((block)->treeStruct)) #define cbsBlockOfTree(_tree) TREE_ELT(CBSBlock, treeStruct, _tree) +#define cbsFastBlockOfTree(_tree) \ + PARENT(CBSFastBlockStruct, cbsBlockStruct, cbsBlockOfTree(_tree)) +#define cbsZonedBlockOfTree(_tree) \ + PARENT(CBSZonedBlockStruct, cbsFastBlockStruct, cbsFastBlockOfTree(_tree)) #define cbsBlockKey(block) (&((block)->base)) #define cbsBlockPool(cbs) RVALUE((cbs)->blockPool) -/* cbsEnter, cbsLeave -- Avoid re-entrance - * - * .enter-leave: The callbacks are restricted in what they may call. - * These functions enforce this. - * - * .enter-leave.simple: Simple queries may be called from callbacks. - */ - -static void cbsEnter(CBS cbs) -{ - /* Don't need to check as always called from interface function. */ - AVER(!cbs->inCBS); - cbs->inCBS = TRUE; - return; -} - -static void cbsLeave(CBS cbs) -{ - /* Don't need to check as always called from interface function. */ - AVER(cbs->inCBS); - cbs->inCBS = FALSE; - return; -} - - /* CBSCheck -- Check CBS */ Bool CBSCheck(CBS cbs) { /* See .enter-leave.simple. */ + Land land; CHECKS(CBS, cbs); - CHECKL(cbs != NULL); + land = cbsLand(cbs); + CHECKD(Land, land); CHECKD(SplayTree, cbsSplay(cbs)); - /* nothing to check about treeSize */ CHECKD(Pool, cbs->blockPool); - CHECKU(Arena, cbs->arena); - CHECKL(BoolCheck(cbs->fastFind)); - CHECKL(BoolCheck(cbs->inCBS)); CHECKL(BoolCheck(cbs->ownPool)); - CHECKL(BoolCheck(cbs->zoned)); - /* No MeterCheck */ + CHECKL(SizeIsAligned(cbs->size, LandAlignment(land))); + CHECKL((cbs->size == 0) == (cbs->treeSize == 0)); return TRUE; } @@ -83,7 +62,6 @@ Bool CBSCheck(CBS cbs) ATTRIBUTE_UNUSED static Bool CBSBlockCheck(CBSBlock block) { - /* See .enter-leave.simple. */ UNUSED(block); /* Required because there is no signature */ CHECKL(block != NULL); /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */ @@ -140,7 +118,7 @@ static Bool cbsTestNode(SplayTree splay, Tree tree, AVERT(Tree, tree); AVER(closureP == NULL); AVER(size > 0); - AVER(cbsOfSplay(splay)->fastFind); + AVER(IsLandSubclass(cbsLand(cbsOfSplay(splay)), CBSFastLandClass)); block = cbsBlockOfTree(tree); @@ -150,51 +128,45 @@ static Bool cbsTestNode(SplayTree splay, Tree tree, static Bool cbsTestTree(SplayTree splay, Tree tree, void *closureP, Size size) { - CBSBlock block; + CBSFastBlock block; AVERT(SplayTree, splay); AVERT(Tree, tree); -#if 0 AVER(closureP == NULL); AVER(size > 0); -#endif - UNUSED(closureP); - UNUSED(size); - AVER(cbsOfSplay(splay)->fastFind); + AVER(IsLandSubclass(cbsLand(cbsOfSplay(splay)), CBSFastLandClass)); - block = cbsBlockOfTree(tree); + block = cbsFastBlockOfTree(tree); return block->maxSize >= size; } -/* cbsUpdateNode -- update size info after restructuring */ +/* cbsUpdateFastNode -- update size info after restructuring */ -static void cbsUpdateNode(SplayTree splay, Tree tree) +static void cbsUpdateFastNode(SplayTree splay, Tree tree) { Size maxSize; - CBSBlock block; AVERT_CRITICAL(SplayTree, splay); AVERT_CRITICAL(Tree, tree); - AVER_CRITICAL(cbsOfSplay(splay)->fastFind); + AVER_CRITICAL(IsLandSubclass(cbsLand(cbsOfSplay(splay)), CBSFastLandClass)); - block = cbsBlockOfTree(tree); - maxSize = CBSBlockSize(block); + maxSize = CBSBlockSize(cbsBlockOfTree(tree)); if (TreeHasLeft(tree)) { - Size size = cbsBlockOfTree(TreeLeft(tree))->maxSize; + Size size = cbsFastBlockOfTree(TreeLeft(tree))->maxSize; if (size > maxSize) maxSize = size; } if (TreeHasRight(tree)) { - Size size = cbsBlockOfTree(TreeRight(tree))->maxSize; + Size size = cbsFastBlockOfTree(TreeRight(tree))->maxSize; if (size > maxSize) maxSize = size; } - block->maxSize = maxSize; + cbsFastBlockOfTree(tree)->maxSize = maxSize; } @@ -203,69 +175,57 @@ static void cbsUpdateNode(SplayTree splay, Tree tree) static void cbsUpdateZonedNode(SplayTree splay, Tree tree) { ZoneSet zones; + CBSZonedBlock zonedBlock; CBSBlock block; Arena arena; AVERT_CRITICAL(SplayTree, splay); AVERT_CRITICAL(Tree, tree); - AVER_CRITICAL(cbsOfSplay(splay)->fastFind); - AVER_CRITICAL(cbsOfSplay(splay)->zoned); + AVER_CRITICAL(IsLandSubclass(cbsLand(cbsOfSplay(splay)), CBSZonedLandClass)); - cbsUpdateNode(splay, tree); + cbsUpdateFastNode(splay, tree); - block = cbsBlockOfTree(tree); - arena = cbsOfSplay(splay)->arena; + zonedBlock = cbsZonedBlockOfTree(tree); + block = &zonedBlock->cbsFastBlockStruct.cbsBlockStruct; + arena = LandArena(cbsLand(cbsOfSplay(splay))); zones = ZoneSetOfRange(arena, CBSBlockBase(block), CBSBlockLimit(block)); if (TreeHasLeft(tree)) - zones = ZoneSetUnion(zones, cbsBlockOfTree(TreeLeft(tree))->zones); + zones = ZoneSetUnion(zones, cbsZonedBlockOfTree(TreeLeft(tree))->zones); if (TreeHasRight(tree)) - zones = ZoneSetUnion(zones, cbsBlockOfTree(TreeRight(tree))->zones); + zones = ZoneSetUnion(zones, cbsZonedBlockOfTree(TreeRight(tree))->zones); - block->zones = zones; + zonedBlock->zones = zones; } -/* CBSInit -- Initialise a CBS structure +/* cbsInit -- Initialise a CBS structure * - * See . + * See . */ -ARG_DEFINE_KEY(cbs_extend_by, Size); ARG_DEFINE_KEY(cbs_block_pool, Pool); -Res CBSInit(CBS cbs, Arena arena, void *owner, Align alignment, - Bool fastFind, Bool zoned, ArgList args) +static Res cbsInitComm(Land land, ArgList args, SplayUpdateNodeMethod update, + Size blockStructSize) { - Size extendBy = CBS_EXTEND_BY_DEFAULT; - Bool extendSelf = TRUE; + CBS cbs; + LandClass super; ArgStruct arg; Res res; Pool blockPool = NULL; - SplayUpdateNodeMethod update; - AVERT(Arena, arena); - AVER(cbs != NULL); - AVERT(Align, alignment); - AVERT(Bool, fastFind); - AVERT(Bool, zoned); + AVERT(Land, land); + super = LAND_SUPERCLASS(CBSLandClass); + res = (*super->init)(land, args); + if (res != ResOK) + return res; if (ArgPick(&arg, args, CBSBlockPool)) blockPool = arg.val.pool; - if (ArgPick(&arg, args, MPS_KEY_CBS_EXTEND_BY)) - extendBy = arg.val.size; - if (ArgPick(&arg, args, MFSExtendSelf)) - extendSelf = arg.val.b; - - update = SplayTrivUpdate; - if (fastFind) - update = cbsUpdateNode; - if (zoned) { - AVER(fastFind); - update = cbsUpdateZonedNode; - } + cbs = cbsOfLand(land); SplayTreeInit(cbsSplay(cbs), cbsCompare, cbsKey, update); if (blockPool != NULL) { @@ -273,43 +233,57 @@ Res CBSInit(CBS cbs, Arena arena, void *owner, Align alignment, cbs->ownPool = FALSE; } else { MPS_ARGS_BEGIN(pcArgs) { - MPS_ARGS_ADD(pcArgs, MPS_KEY_MFS_UNIT_SIZE, sizeof(CBSBlockStruct)); - MPS_ARGS_ADD(pcArgs, MPS_KEY_EXTEND_BY, extendBy); - MPS_ARGS_ADD(pcArgs, MFSExtendSelf, extendSelf); - res = PoolCreate(&cbs->blockPool, arena, PoolClassMFS(), pcArgs); + MPS_ARGS_ADD(pcArgs, MPS_KEY_MFS_UNIT_SIZE, blockStructSize); + res = PoolCreate(&cbs->blockPool, LandArena(land), PoolClassMFS(), pcArgs); } MPS_ARGS_END(pcArgs); if (res != ResOK) return res; cbs->ownPool = TRUE; } cbs->treeSize = 0; + cbs->size = 0; - cbs->arena = arena; - cbs->fastFind = fastFind; - cbs->zoned = zoned; - cbs->alignment = alignment; - cbs->inCBS = TRUE; + cbs->blockStructSize = blockStructSize; METER_INIT(cbs->treeSearch, "size of tree", (void *)cbs); cbs->sig = CBSSig; AVERT(CBS, cbs); - EVENT2(CBSInit, cbs, owner); - cbsLeave(cbs); return ResOK; } +static Res cbsInit(Land land, ArgList args) +{ + return cbsInitComm(land, args, SplayTrivUpdate, + sizeof(CBSBlockStruct)); +} -/* CBSFinish -- Finish a CBS structure +static Res cbsInitFast(Land land, ArgList args) +{ + return cbsInitComm(land, args, cbsUpdateFastNode, + sizeof(CBSFastBlockStruct)); +} + +static Res cbsInitZoned(Land land, ArgList args) +{ + return cbsInitComm(land, args, cbsUpdateZonedNode, + sizeof(CBSZonedBlockStruct)); +} + + +/* cbsFinish -- Finish a CBS structure * - * See . + * See . */ -void CBSFinish(CBS cbs) +static void cbsFinish(Land land) { + CBS cbs; + + AVERT(Land, land); + cbs = cbsOfLand(land); AVERT(CBS, cbs); - cbsEnter(cbs); METER_EMIT(&cbs->treeSearch); @@ -321,6 +295,23 @@ void CBSFinish(CBS cbs) } +/* cbsSize -- total size of ranges in CBS + * + * See . + */ + +static Size cbsSize(Land land) +{ + CBS cbs; + + AVERT(Land, land); + cbs = cbsOfLand(land); + AVERT(CBS, cbs); + + return cbs->size; +} + + /* Node change operators * * These four functions are called whenever blocks are created, @@ -331,19 +322,23 @@ void CBSFinish(CBS cbs) static void cbsBlockDelete(CBS cbs, CBSBlock block) { Bool b; + Size size; AVERT(CBS, cbs); AVERT(CBSBlock, block); + size = CBSBlockSize(block); METER_ACC(cbs->treeSearch, cbs->treeSize); b = SplayTreeDelete(cbsSplay(cbs), cbsBlockTree(block)); AVER(b); /* expect block to be in the tree */ STATISTIC(--cbs->treeSize); + AVER(cbs->size >= size); + cbs->size -= size; /* make invalid */ block->limit = block->base; - PoolFree(cbsBlockPool(cbs), (Addr)block, sizeof(CBSBlockStruct)); + PoolFree(cbsBlockPool(cbs), (Addr)block, cbs->blockStructSize); } static void cbsBlockShrunk(CBS cbs, CBSBlock block, Size oldSize) @@ -355,11 +350,10 @@ static void cbsBlockShrunk(CBS cbs, CBSBlock block, Size oldSize) newSize = CBSBlockSize(block); AVER(oldSize > newSize); + AVER(cbs->size >= oldSize - newSize); - if (cbs->fastFind) { - SplayNodeRefresh(cbsSplay(cbs), cbsBlockTree(block)); - AVER(CBSBlockSize(block) <= block->maxSize); - } + SplayNodeRefresh(cbsSplay(cbs), cbsBlockTree(block)); + cbs->size -= oldSize - newSize; } static void cbsBlockGrew(CBS cbs, CBSBlock block, Size oldSize) @@ -372,10 +366,8 @@ static void cbsBlockGrew(CBS cbs, CBSBlock block, Size oldSize) newSize = CBSBlockSize(block); AVER(oldSize < newSize); - if (cbs->fastFind) { - SplayNodeRefresh(cbsSplay(cbs), cbsBlockTree(block)); - AVER(CBSBlockSize(block) <= block->maxSize); - } + SplayNodeRefresh(cbsSplay(cbs), cbsBlockTree(block)); + cbs->size += newSize - oldSize; } /* cbsBlockAlloc -- allocate a new block and set its base and limit, @@ -391,7 +383,7 @@ static Res cbsBlockAlloc(CBSBlock *blockReturn, CBS cbs, Range range) AVERT(CBS, cbs); AVERT(Range, range); - res = PoolAlloc(&p, cbsBlockPool(cbs), sizeof(CBSBlockStruct), + res = PoolAlloc(&p, cbsBlockPool(cbs), cbs->blockStructSize, /* withReservoirPermit */ FALSE); if (res != ResOK) goto failPoolAlloc; @@ -400,7 +392,8 @@ static Res cbsBlockAlloc(CBSBlock *blockReturn, CBS cbs, Range range) TreeInit(cbsBlockTree(block)); block->base = RangeBase(range); block->limit = RangeLimit(range); - block->maxSize = CBSBlockSize(block); + + SplayNodeInit(cbsSplay(cbs), cbsBlockTree(block)); AVERT(CBSBlock, block); *blockReturn = block; @@ -424,13 +417,21 @@ static void cbsBlockInsert(CBS cbs, CBSBlock block) b = SplayTreeInsert(cbsSplay(cbs), cbsBlockTree(block)); AVER(b); STATISTIC(++cbs->treeSize); + cbs->size += CBSBlockSize(block); } -/* cbsInsertIntoTree -- Insert a range into the tree */ +/* cbsInsert -- Insert a range into the CBS + * + * See . + * + * .insert.alloc: Will only allocate a block if the range does not + * abut an existing range. + */ -static Res cbsInsertIntoTree(Range rangeReturn, CBS cbs, Range range) +static Res cbsInsert(Range rangeReturn, Land land, Range range) { + CBS cbs; Bool b; Res res; Addr base, limit, newBase, newLimit; @@ -440,10 +441,11 @@ static Res cbsInsertIntoTree(Range rangeReturn, CBS cbs, Range range) Size oldSize; AVER(rangeReturn != NULL); - AVERT(CBS, cbs); + AVERT(Land, land); AVERT(Range, range); - AVER(RangeIsAligned(range, cbs->alignment)); + AVER(RangeIsAligned(range, LandAlignment(land))); + cbs = cbsOfLand(land); base = RangeBase(range); limit = RangeLimit(range); @@ -524,46 +526,28 @@ fail: } -/* CBSInsert -- Insert a range into the CBS +/* cbsDelete -- Remove a range from a CBS * - * See . + * See . * - * .insert.alloc: Will only allocate a block if the range does not - * abut an existing range. + * .delete.alloc: Will only allocate a block if the range splits + * an existing range. */ -Res CBSInsert(Range rangeReturn, CBS cbs, Range range) -{ - Res res; - - AVERT(CBS, cbs); - cbsEnter(cbs); - - AVER(rangeReturn != NULL); - AVERT(Range, range); - AVER(RangeIsAligned(range, cbs->alignment)); - - res = cbsInsertIntoTree(rangeReturn, cbs, range); - - cbsLeave(cbs); - return res; -} - - -/* cbsDeleteFromTree -- delete blocks from the tree */ - -static Res cbsDeleteFromTree(Range rangeReturn, CBS cbs, Range range) +static Res cbsDelete(Range rangeReturn, Land land, Range range) { + CBS cbs; Res res; CBSBlock cbsBlock; Tree tree; Addr base, limit, oldBase, oldLimit; Size oldSize; + AVERT(Land, land); + cbs = cbsOfLand(land); AVER(rangeReturn != NULL); - AVERT(CBS, cbs); AVERT(Range, range); - AVER(RangeIsAligned(range, cbs->alignment)); + AVER(RangeIsAligned(range, LandAlignment(land))); base = RangeBase(range); limit = RangeLimit(range); @@ -628,32 +612,6 @@ failSplayTreeSearch: } -/* CBSDelete -- Remove a range from a CBS - * - * See . - * - * .delete.alloc: Will only allocate a block if the range splits - * an existing range. - */ - -Res CBSDelete(Range rangeReturn, CBS cbs, Range range) -{ - Res res; - - AVERT(CBS, cbs); - cbsEnter(cbs); - - AVER(rangeReturn != NULL); - AVERT(Range, range); - AVER(RangeIsAligned(range, cbs->alignment)); - - res = cbsDeleteFromTree(rangeReturn, cbs, range); - - cbsLeave(cbs); - return res; -} - - static Res cbsBlockDescribe(CBSBlock block, mps_lib_FILE *stream) { Res res; @@ -662,11 +620,9 @@ static Res cbsBlockDescribe(CBSBlock block, mps_lib_FILE *stream) return ResFAIL; res = WriteF(stream, 0, - "[$P,$P) {$U, $B}", + "[$P,$P)", (WriteFP)block->base, (WriteFP)block->limit, - (WriteFU)block->maxSize, - (WriteFB)block->zones, NULL); return res; } @@ -684,25 +640,74 @@ static Res cbsSplayNodeDescribe(Tree tree, mps_lib_FILE *stream) return res; } +static Res cbsFastBlockDescribe(CBSFastBlock block, mps_lib_FILE *stream) +{ + Res res; -/* CBSIterate -- iterate over all blocks in CBS + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, 0, + "[$P,$P) {$U}", + (WriteFP)block->cbsBlockStruct.base, + (WriteFP)block->cbsBlockStruct.limit, + (WriteFU)block->maxSize, + NULL); + return res; +} + +static Res cbsFastSplayNodeDescribe(Tree tree, mps_lib_FILE *stream) +{ + Res res; + + if (tree == TreeEMPTY) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = cbsFastBlockDescribe(cbsFastBlockOfTree(tree), stream); + return res; +} + +static Res cbsZonedBlockDescribe(CBSZonedBlock block, mps_lib_FILE *stream) +{ + Res res; + + if (stream == NULL) + return ResFAIL; + + res = WriteF(stream, 0, + "[$P,$P) {$U, $B}", + (WriteFP)block->cbsFastBlockStruct.cbsBlockStruct.base, + (WriteFP)block->cbsFastBlockStruct.cbsBlockStruct.limit, + (WriteFU)block->cbsFastBlockStruct.maxSize, + (WriteFB)block->zones, + NULL); + return res; +} + +static Res cbsZonedSplayNodeDescribe(Tree tree, mps_lib_FILE *stream) +{ + Res res; + + if (tree == TreeEMPTY) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + + res = cbsZonedBlockDescribe(cbsZonedBlockOfTree(tree), stream); + return res; +} + + +/* cbsIterate -- iterate over all blocks in CBS * - * Applies a visitor to all isolated contiguous ranges in a CBS. - * It receives a pointer, ``Size`` closure pair to pass on to the - * visitor function, and an visitor function to invoke on every range - * in address order. If the visitor returns ``FALSE``, then the iteration - * is terminated. - * - * The visitor function may not modify the CBS during the iteration. - * This is because CBSIterate uses TreeTraverse, which does not permit - * modification, for speed and to avoid perturbing the splay tree balance. - * - * See . + * See . */ typedef struct CBSIterateClosure { - CBS cbs; - CBSVisitor iterate; + Land land; + LandVisitor visitor; void *closureP; Size closureS; } CBSIterateClosure; @@ -712,26 +717,32 @@ static Bool cbsIterateVisit(Tree tree, void *closureP, Size closureS) CBSIterateClosure *closure = closureP; RangeStruct range; CBSBlock cbsBlock; - CBS cbs = closure->cbs; + Land land = closure->land; + CBS cbs = cbsOfLand(land); + Bool cont = TRUE; + AVER(closureS == UNUSED_SIZE); UNUSED(closureS); cbsBlock = cbsBlockOfTree(tree); RangeInit(&range, CBSBlockBase(cbsBlock), CBSBlockLimit(cbsBlock)); - if (!closure->iterate(cbs, &range, closure->closureP, closure->closureS)) + cont = (*closure->visitor)(land, &range, closure->closureP, closure->closureS); + if (!cont) return FALSE; METER_ACC(cbs->treeSearch, cbs->treeSize); return TRUE; } -void CBSIterate(CBS cbs, CBSVisitor visitor, - void *closureP, Size closureS) +static Bool cbsIterate(Land land, LandVisitor visitor, + void *closureP, Size closureS) { + CBS cbs; SplayTree splay; CBSIterateClosure closure; + AVERT(Land, land); + cbs = cbsOfLand(land); AVERT(CBS, cbs); - cbsEnter(cbs); AVER(FUNCHECK(visitor)); splay = cbsSplay(cbs); @@ -739,36 +750,19 @@ void CBSIterate(CBS cbs, CBSVisitor visitor, /* searches and meter it. */ METER_ACC(cbs->treeSearch, cbs->treeSize); - closure.cbs = cbs; - closure.iterate = visitor; + closure.land = land; + closure.visitor = visitor; closure.closureP = closureP; closure.closureS = closureS; - (void)TreeTraverse(SplayTreeRoot(splay), splay->compare, splay->nodeKey, - cbsIterateVisit, &closure, 0); - - cbsLeave(cbs); - return; -} - - -/* FindDeleteCheck -- check method for a FindDelete value */ - -Bool FindDeleteCheck(FindDelete findDelete) -{ - CHECKL(findDelete == FindDeleteNONE - || findDelete == FindDeleteLOW - || findDelete == FindDeleteHIGH - || findDelete == FindDeleteENTIRE); - UNUSED(findDelete); /* */ - - return TRUE; + return TreeTraverse(SplayTreeRoot(splay), splay->compare, splay->nodeKey, + cbsIterateVisit, &closure, UNUSED_SIZE); } /* cbsFindDeleteRange -- delete appropriate range of block found */ static void cbsFindDeleteRange(Range rangeReturn, Range oldRangeReturn, - CBS cbs, Range range, Size size, + Land land, Range range, Size size, FindDelete findDelete) { Bool callDelete = TRUE; @@ -776,11 +770,11 @@ static void cbsFindDeleteRange(Range rangeReturn, Range oldRangeReturn, AVER(rangeReturn != NULL); AVER(oldRangeReturn != NULL); - AVERT(CBS, cbs); + AVERT(Land, land); AVERT(Range, range); - AVER(RangeIsAligned(range, cbs->alignment)); + AVER(RangeIsAligned(range, LandAlignment(land))); AVER(size > 0); - AVER(SizeIsAligned(size, cbs->alignment)); + AVER(SizeIsAligned(size, LandAlignment(land))); AVER(RangeSize(range) >= size); AVERT(FindDelete, findDelete); @@ -814,32 +808,36 @@ static void cbsFindDeleteRange(Range rangeReturn, Range oldRangeReturn, if (callDelete) { Res res; - res = cbsDeleteFromTree(oldRangeReturn, cbs, rangeReturn); + res = cbsDelete(oldRangeReturn, land, rangeReturn); /* Can't have run out of memory, because all our callers pass in blocks that were just found in the tree, and we only - deleted from one end of the block, so cbsDeleteFromTree did not + deleted from one end of the block, so cbsDelete did not need to allocate a new block. */ AVER(res == ResOK); + } else { + RangeCopy(oldRangeReturn, rangeReturn); } } /* CBSFindFirst -- find the first block of at least the given size */ -Bool CBSFindFirst(Range rangeReturn, Range oldRangeReturn, - CBS cbs, Size size, FindDelete findDelete) +static Bool cbsFindFirst(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) { + CBS cbs; Bool found; Tree tree; + AVERT(Land, land); + cbs = cbsOfLand(land); AVERT(CBS, cbs); - cbsEnter(cbs); + AVER(IsLandSubclass(cbsLand(cbs), CBSFastLandClass)); AVER(rangeReturn != NULL); AVER(oldRangeReturn != NULL); AVER(size > 0); - AVER(SizeIsAligned(size, cbs->alignment)); - AVER(cbs->fastFind); + AVER(SizeIsAligned(size, LandAlignment(land))); AVERT(FindDelete, findDelete); METER_ACC(cbs->treeSearch, cbs->treeSize); @@ -852,16 +850,17 @@ Bool CBSFindFirst(Range rangeReturn, Range oldRangeReturn, AVER(CBSBlockSize(block) >= size); RangeInit(&range, CBSBlockBase(block), CBSBlockLimit(block)); AVER(RangeSize(&range) >= size); - cbsFindDeleteRange(rangeReturn, oldRangeReturn, cbs, &range, + cbsFindDeleteRange(rangeReturn, oldRangeReturn, land, &range, size, findDelete); } - cbsLeave(cbs); return found; } -/* CBSFindFirstInZones -- find the first block of at least the given size - that lies entirely within a zone set */ +/* cbsFindInZones -- find a block of at least the given size that lies + * entirely within a zone set. (The first such block, if high is + * FALSE, or the last, if high is TRUE.) + */ typedef struct cbsTestNodeInZonesClosureStruct { Size size; @@ -873,15 +872,15 @@ typedef struct cbsTestNodeInZonesClosureStruct { } cbsTestNodeInZonesClosureStruct, *cbsTestNodeInZonesClosure; static Bool cbsTestNodeInZones(SplayTree splay, Tree tree, - void *closureP, Size closureSize) + void *closureP, Size closureS) { CBSBlock block = cbsBlockOfTree(tree); cbsTestNodeInZonesClosure closure = closureP; RangeInZoneSet search; UNUSED(splay); - AVER(closureSize == sizeof(cbsTestNodeInZonesClosureStruct)); - UNUSED(closureSize); + AVER(closureS == UNUSED_SIZE); + UNUSED(closureS); search = closure->high ? RangeInZoneSetLast : RangeInZoneSetFirst; @@ -891,104 +890,39 @@ static Bool cbsTestNodeInZones(SplayTree splay, Tree tree, } static Bool cbsTestTreeInZones(SplayTree splay, Tree tree, - void *closureP, Size closureSize) + void *closureP, Size closureS) { - CBSBlock block = cbsBlockOfTree(tree); + CBSFastBlock fastBlock = cbsFastBlockOfTree(tree); + CBSZonedBlock zonedBlock = cbsZonedBlockOfTree(tree); cbsTestNodeInZonesClosure closure = closureP; UNUSED(splay); - AVER(closureSize == sizeof(cbsTestNodeInZonesClosureStruct)); - UNUSED(closureSize); + AVER(closureS == UNUSED_SIZE); + UNUSED(closureS); - return block->maxSize >= closure->size && - ZoneSetInter(block->zones, closure->zoneSet) != ZoneSetEMPTY; -} - -Res CBSFindInZones(Range rangeReturn, Range oldRangeReturn, - CBS cbs, Size size, - ZoneSet zoneSet, Bool high) -{ - Tree tree; - cbsTestNodeInZonesClosureStruct closure; - Res res; - CBSFindMethod cbsFind; - SplayFindMethod splayFind; - - AVER(rangeReturn != NULL); - AVER(oldRangeReturn != NULL); - AVERT(CBS, cbs); - /* AVERT(ZoneSet, zoneSet); */ - AVERT(Bool, high); - - cbsFind = high ? CBSFindLast : CBSFindFirst; - splayFind = high ? SplayFindLast : SplayFindFirst; - - if (zoneSet == ZoneSetEMPTY) - return ResFAIL; - if (zoneSet == ZoneSetUNIV) { - FindDelete fd = high ? FindDeleteHIGH : FindDeleteLOW; - if (cbsFind(rangeReturn, oldRangeReturn, cbs, size, fd)) - return ResOK; - else - return ResFAIL; - } - if (ZoneSetIsSingle(zoneSet) && size > ArenaStripeSize(cbs->arena)) - return ResFAIL; - - /* It would be nice if there were a neat way to eliminate all runs of - zones in zoneSet too small for size.*/ - - cbsEnter(cbs); - - closure.arena = cbs->arena; - closure.zoneSet = zoneSet; - closure.size = size; - closure.high = high; - if (splayFind(&tree, cbsSplay(cbs), - cbsTestNodeInZones, - cbsTestTreeInZones, - &closure, sizeof(closure))) { - CBSBlock block = cbsBlockOfTree(tree); - RangeStruct rangeStruct, oldRangeStruct; - - AVER(CBSBlockBase(block) <= closure.base); - AVER(AddrOffset(closure.base, closure.limit) >= size); - AVER(ZoneSetSub(ZoneSetOfRange(cbs->arena, closure.base, closure.limit), zoneSet)); - AVER(closure.limit <= CBSBlockLimit(block)); - - if (!high) - RangeInit(&rangeStruct, closure.base, AddrAdd(closure.base, size)); - else - RangeInit(&rangeStruct, AddrSub(closure.limit, size), closure.limit); - res = cbsDeleteFromTree(&oldRangeStruct, cbs, &rangeStruct); - if (res == ResOK) { /* enough memory to split block */ - RangeCopy(rangeReturn, &rangeStruct); - RangeCopy(oldRangeReturn, &oldRangeStruct); - } - } else - res = ResFAIL; - - cbsLeave(cbs); - return res; + return fastBlock->maxSize >= closure->size + && ZoneSetInter(zonedBlock->zones, closure->zoneSet) != ZoneSetEMPTY; } -/* CBSFindLast -- find the last block of at least the given size */ +/* cbsFindLast -- find the last block of at least the given size */ -Bool CBSFindLast(Range rangeReturn, Range oldRangeReturn, - CBS cbs, Size size, FindDelete findDelete) +static Bool cbsFindLast(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) { + CBS cbs; Bool found; Tree tree; + AVERT(Land, land); + cbs = cbsOfLand(land); AVERT(CBS, cbs); - cbsEnter(cbs); + AVER(IsLandSubclass(cbsLand(cbs), CBSFastLandClass)); AVER(rangeReturn != NULL); AVER(oldRangeReturn != NULL); AVER(size > 0); - AVER(SizeIsAligned(size, cbs->alignment)); - AVER(cbs->fastFind); + AVER(SizeIsAligned(size, LandAlignment(land))); AVERT(FindDelete, findDelete); METER_ACC(cbs->treeSearch, cbs->treeSize); @@ -1001,38 +935,40 @@ Bool CBSFindLast(Range rangeReturn, Range oldRangeReturn, AVER(CBSBlockSize(block) >= size); RangeInit(&range, CBSBlockBase(block), CBSBlockLimit(block)); AVER(RangeSize(&range) >= size); - cbsFindDeleteRange(rangeReturn, oldRangeReturn, cbs, &range, + cbsFindDeleteRange(rangeReturn, oldRangeReturn, land, &range, size, findDelete); } - cbsLeave(cbs); return found; } -/* CBSFindLargest -- find the largest block in the CBS */ +/* cbsFindLargest -- find the largest block in the CBS */ -Bool CBSFindLargest(Range rangeReturn, Range oldRangeReturn, - CBS cbs, Size size, FindDelete findDelete) +static Bool cbsFindLargest(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) { + CBS cbs; Bool found = FALSE; + AVERT(Land, land); + cbs = cbsOfLand(land); AVERT(CBS, cbs); - cbsEnter(cbs); + AVER(IsLandSubclass(cbsLand(cbs), CBSFastLandClass)); AVER(rangeReturn != NULL); AVER(oldRangeReturn != NULL); - AVER(cbs->fastFind); + AVER(size > 0); AVERT(FindDelete, findDelete); if (!SplayTreeIsEmpty(cbsSplay(cbs))) { RangeStruct range; - CBSBlock block; Tree tree = TreeEMPTY; /* suppress "may be used uninitialized" */ Size maxSize; - maxSize = cbsBlockOfTree(SplayTreeRoot(cbsSplay(cbs)))->maxSize; + maxSize = cbsFastBlockOfTree(SplayTreeRoot(cbsSplay(cbs)))->maxSize; if (maxSize >= size) { + CBSBlock block; METER_ACC(cbs->treeSearch, cbs->treeSize); found = SplayFindFirst(&tree, cbsSplay(cbs), &cbsTestNode, &cbsTestTree, NULL, maxSize); @@ -1041,25 +977,103 @@ Bool CBSFindLargest(Range rangeReturn, Range oldRangeReturn, AVER(CBSBlockSize(block) >= maxSize); RangeInit(&range, CBSBlockBase(block), CBSBlockLimit(block)); AVER(RangeSize(&range) >= maxSize); - cbsFindDeleteRange(rangeReturn, oldRangeReturn, cbs, &range, - maxSize, findDelete); + cbsFindDeleteRange(rangeReturn, oldRangeReturn, land, &range, + size, findDelete); } } - cbsLeave(cbs); return found; } -/* CBSDescribe -- describe a CBS +static Res cbsFindInZones(Bool *foundReturn, Range rangeReturn, + Range oldRangeReturn, Land land, Size size, + ZoneSet zoneSet, Bool high) +{ + CBS cbs; + CBSBlock block; + Tree tree; + cbsTestNodeInZonesClosureStruct closure; + Res res; + LandFindMethod landFind; + SplayFindMethod splayFind; + RangeStruct rangeStruct, oldRangeStruct; + + AVER(foundReturn != NULL); + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + cbs = cbsOfLand(land); + AVERT(CBS, cbs); + AVER(IsLandSubclass(cbsLand(cbs), CBSZonedLandClass)); + /* AVERT(ZoneSet, zoneSet); */ + AVER(BoolCheck(high)); + + landFind = high ? cbsFindLast : cbsFindFirst; + splayFind = high ? SplayFindLast : SplayFindFirst; + + if (zoneSet == ZoneSetEMPTY) + goto fail; + if (zoneSet == ZoneSetUNIV) { + FindDelete fd = high ? FindDeleteHIGH : FindDeleteLOW; + *foundReturn = (*landFind)(rangeReturn, oldRangeReturn, land, size, fd); + return ResOK; + } + if (ZoneSetIsSingle(zoneSet) && size > ArenaStripeSize(LandArena(land))) + goto fail; + + /* It would be nice if there were a neat way to eliminate all runs of + zones in zoneSet too small for size.*/ + + closure.arena = LandArena(land); + closure.zoneSet = zoneSet; + closure.size = size; + closure.high = high; + if (!(*splayFind)(&tree, cbsSplay(cbs), + cbsTestNodeInZones, cbsTestTreeInZones, + &closure, UNUSED_SIZE)) + goto fail; + + block = cbsBlockOfTree(tree); + + AVER(CBSBlockBase(block) <= closure.base); + AVER(AddrOffset(closure.base, closure.limit) >= size); + AVER(ZoneSetSub(ZoneSetOfRange(LandArena(land), closure.base, closure.limit), zoneSet)); + AVER(closure.limit <= CBSBlockLimit(block)); + + if (!high) + RangeInit(&rangeStruct, closure.base, AddrAdd(closure.base, size)); + else + RangeInit(&rangeStruct, AddrSub(closure.limit, size), closure.limit); + res = cbsDelete(&oldRangeStruct, land, &rangeStruct); + if (res != ResOK) + /* not enough memory to split block */ + return res; + RangeCopy(rangeReturn, &rangeStruct); + RangeCopy(oldRangeReturn, &oldRangeStruct); + *foundReturn = TRUE; + return ResOK; + +fail: + *foundReturn = FALSE; + return ResOK; +} + + +/* cbsDescribe -- describe a CBS * - * See . + * See . */ -Res CBSDescribe(CBS cbs, mps_lib_FILE *stream, Count depth) +static Res cbsDescribe(Land land, mps_lib_FILE *stream, Count depth) { + CBS cbs; Res res; + Res (*describe)(Tree, mps_lib_FILE *); + if (!TESTT(Land, land)) + return ResFAIL; + cbs = cbsOfLand(land); if (!TESTT(CBS, cbs)) return ResFAIL; if (stream == NULL) @@ -1067,24 +1081,65 @@ Res CBSDescribe(CBS cbs, mps_lib_FILE *stream, Count depth) res = WriteF(stream, depth, "CBS $P {\n", (WriteFP)cbs, - " alignment: $U\n", (WriteFU)cbs->alignment, " blockPool: $P\n", (WriteFP)cbsBlockPool(cbs), - " fastFind: $U\n", (WriteFU)cbs->fastFind, - " inCBS: $U\n", (WriteFU)cbs->inCBS, + " ownPool: $U\n", (WriteFU)cbs->ownPool, " treeSize: $U\n", (WriteFU)cbs->treeSize, NULL); if (res != ResOK) return res; METER_WRITE(cbs->treeSearch, stream, depth + 2); - res = SplayTreeDescribe(cbsSplay(cbs), stream, depth + 2, - &cbsSplayNodeDescribe); + if (IsLandSubclass(land, CBSZonedLandClass)) + describe = cbsZonedSplayNodeDescribe; + else if (IsLandSubclass(land, CBSFastLandClass)) + describe = cbsFastSplayNodeDescribe; + else + describe = cbsSplayNodeDescribe; + + res = SplayTreeDescribe(cbsSplay(cbs), stream, depth + 2, describe); if (res != ResOK) return res; res = WriteF(stream, depth, "} CBS $P\n", (WriteFP)cbs, NULL); + + res = WriteF(stream, 0, "}\n", NULL); return res; } +DEFINE_LAND_CLASS(CBSLandClass, class) +{ + INHERIT_CLASS(class, LandClass); + class->name = "CBS"; + class->size = sizeof(CBSStruct); + class->init = cbsInit; + class->finish = cbsFinish; + class->sizeMethod = cbsSize; + class->insert = cbsInsert; + class->delete = cbsDelete; + class->iterate = cbsIterate; + class->findFirst = cbsFindFirst; + class->findLast = cbsFindLast; + class->findLargest = cbsFindLargest; + class->findInZones = cbsFindInZones; + class->describe = cbsDescribe; + AVERT(LandClass, class); +} + +DEFINE_LAND_CLASS(CBSFastLandClass, class) +{ + INHERIT_CLASS(class, CBSLandClass); + class->name = "FASTCBS"; + class->init = cbsInitFast; + AVERT(LandClass, class); +} + +DEFINE_LAND_CLASS(CBSZonedLandClass, class) +{ + INHERIT_CLASS(class, CBSFastLandClass); + class->name = "ZONEDCBS"; + class->init = cbsInitZoned; + AVERT(LandClass, class); +} + /* C. COPYRIGHT AND LICENSE * diff --git a/mps/code/cbs.h b/mps/code/cbs.h index 64d00f5c015..e6bc276f067 100644 --- a/mps/code/cbs.h +++ b/mps/code/cbs.h @@ -15,55 +15,37 @@ #include "range.h" #include "splay.h" - -/* TODO: There ought to be different levels of CBS block with inheritance - so that CBSs without fastFind don't allocate the maxSize and zones fields, - and CBSs without zoned don't allocate the zones field. */ - typedef struct CBSBlockStruct *CBSBlock; typedef struct CBSBlockStruct { TreeStruct treeStruct; Addr base; Addr limit; - Size maxSize; /* accurate maximum block size of sub-tree */ - ZoneSet zones; /* union zone set of all ranges in sub-tree */ } CBSBlockStruct; +typedef struct CBSFastBlockStruct *CBSFastBlock; +typedef struct CBSFastBlockStruct { + struct CBSBlockStruct cbsBlockStruct; + Size maxSize; /* accurate maximum block size of sub-tree */ +} CBSFastBlockStruct; + +typedef struct CBSZonedBlockStruct *CBSZonedBlock; +typedef struct CBSZonedBlockStruct { + struct CBSFastBlockStruct cbsFastBlockStruct; + ZoneSet zones; /* union zone set of all ranges in sub-tree */ +} CBSZonedBlockStruct; typedef struct CBSStruct *CBS; -typedef Bool (*CBSVisitor)(CBS cbs, Range range, - void *closureP, Size closureS); extern Bool CBSCheck(CBS cbs); +extern LandClass CBSLandClassGet(void); +extern LandClass CBSFastLandClassGet(void); +extern LandClass CBSZonedLandClassGet(void); + extern const struct mps_key_s _mps_key_cbs_block_pool; #define CBSBlockPool (&_mps_key_cbs_block_pool) #define CBSBlockPool_FIELD pool -/* TODO: Passing booleans to affect behaviour is ugly and error-prone. */ -extern Res CBSInit(CBS cbs, Arena arena, void *owner, Align alignment, - Bool fastFind, Bool zoned, ArgList args); -extern void CBSFinish(CBS cbs); - -extern Res CBSInsert(Range rangeReturn, CBS cbs, Range range); -extern Res CBSDelete(Range rangeReturn, CBS cbs, Range range); -extern void CBSIterate(CBS cbs, CBSVisitor visitor, - void *closureP, Size closureS); - -extern Res CBSDescribe(CBS cbs, mps_lib_FILE *stream, Count depth); - -typedef Bool (*CBSFindMethod)(Range rangeReturn, Range oldRangeReturn, - CBS cbs, Size size, FindDelete findDelete); -extern Bool CBSFindFirst(Range rangeReturn, Range oldRangeReturn, - CBS cbs, Size size, FindDelete findDelete); -extern Bool CBSFindLast(Range rangeReturn, Range oldRangeReturn, - CBS cbs, Size size, FindDelete findDelete); -extern Bool CBSFindLargest(Range rangeReturn, Range oldRangeReturn, - CBS cbs, Size size, FindDelete findDelete); - -extern Res CBSFindInZones(Range rangeReturn, Range oldRangeReturn, - CBS cbs, Size size, ZoneSet zoneSet, Bool high); - #endif /* cbs_h */ diff --git a/mps/code/chain.h b/mps/code/chain.h index b06596fdae8..5c9b4b10f55 100644 --- a/mps/code/chain.h +++ b/mps/code/chain.h @@ -1,7 +1,7 @@ /* chain.h: GENERATION CHAINS * * $Id$ - * Copyright (c) 2001 Ravenbrook Limited. See end of file for license. + * Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license. */ #ifndef chain_h @@ -31,7 +31,6 @@ typedef struct GenDescStruct { ZoneSet zones; /* zoneset for this generation */ Size capacity; /* capacity in kB */ double mortality; - double proflow; /* predicted proportion of survivors promoted */ RingStruct locusRing; /* Ring of all PoolGen's in this GenDesc (locus) */ } GenDescStruct; @@ -44,19 +43,19 @@ typedef struct PoolGenStruct *PoolGen; typedef struct PoolGenStruct { Sig sig; - Serial nr; /* generation number */ Pool pool; /* pool this belongs to */ - Chain chain; /* chain this belongs to */ + GenDesc gen; /* generation this belongs to */ /* link in ring of all PoolGen's in this GenDesc (locus) */ RingStruct genRing; - Size totalSize; /* total size of segs in gen in this pool */ - Size newSize; /* size allocated since last GC */ - /* newSize when TraceCreate was called. This is used in the - * TraceStartPoolGen event emitted at the start of a trace; at that - * time, newSize has already been diminished by Whiten so we can't - * use that value. TODO: This will not work well with multiple - * traces. */ - Size newSizeAtCreate; + + /* Accounting of memory in this generation for this pool */ + STATISTIC_DECL(Size segs); /* number of segments */ + Size totalSize; /* total (sum of segment sizes) */ + STATISTIC_DECL(Size freeSize); /* unused (free or lost to fragmentation) */ + Size newSize; /* allocated since last collection */ + STATISTIC_DECL(Size oldSize); /* allocated prior to last collection */ + Size newDeferredSize; /* new (but deferred) */ + STATISTIC_DECL(Size oldDeferredSize); /* old (but deferred) */ } PoolGenStruct; @@ -86,25 +85,31 @@ extern Res ChainCondemnAuto(double *mortalityReturn, Chain chain, Trace trace); extern void ChainStartGC(Chain chain, Trace trace); extern void ChainEndGC(Chain chain, Trace trace); extern size_t ChainGens(Chain chain); -extern Res ChainAlloc(Seg *segReturn, Chain chain, Serial genNr, - SegClass class, Size size, Pool pool, - Bool withReservoirPermit, ArgList args); +extern GenDesc ChainGen(Chain chain, Index gen); extern Res ChainDescribe(Chain chain, mps_lib_FILE *stream, Count depth); -extern Bool PoolGenCheck(PoolGen gen); -extern Res PoolGenInit(PoolGen gen, Chain chain, Serial nr, Pool pool); -extern void PoolGenFinish(PoolGen gen); -extern void PoolGenFlip(PoolGen gen); -#define PoolGenNr(gen) ((gen)->nr) +extern Bool PoolGenCheck(PoolGen pgen); +extern Res PoolGenInit(PoolGen pgen, GenDesc gen, Pool pool); +extern void PoolGenFinish(PoolGen pgen); +extern Res PoolGenAlloc(Seg *segReturn, PoolGen pgen, SegClass class, + Size size, Bool withReservoirPermit, ArgList args); +extern void PoolGenFree(PoolGen pgen, Seg seg, Size freeSize, Size oldSize, + Size newSize, Bool deferred); +extern void PoolGenAccountForFill(PoolGen pgen, Size size, Bool deferred); +extern void PoolGenAccountForEmpty(PoolGen pgen, Size unused, Bool deferred); +extern void PoolGenAccountForAge(PoolGen pgen, Size aged, Bool deferred); +extern void PoolGenAccountForReclaim(PoolGen pgen, Size reclaimed, Bool deferred); +extern void PoolGenUndefer(PoolGen pgen, Size oldSize, Size newSize); +extern void PoolGenAccountForSegSplit(PoolGen pgen); +extern void PoolGenAccountForSegMerge(PoolGen pgen); extern Res PoolGenDescribe(PoolGen gen, mps_lib_FILE *stream, Count depth); - #endif /* chain_h */ /* C. COPYRIGHT AND LICENSE * - * Copyright (C) 2001-2002 Ravenbrook Limited . + * Copyright (C) 2001-2014 Ravenbrook Limited . * All rights reserved. This is an open source license. Contact * Ravenbrook for commercial licensing options. * diff --git a/mps/code/comm.gmk b/mps/code/comm.gmk index 70fed15d507..6e5adce14f5 100644 --- a/mps/code/comm.gmk +++ b/mps/code/comm.gmk @@ -15,8 +15,8 @@ # Assumes the following variables and definitions: # EXTRA_TARGETS a list of extra targets to build # CFLAGSCOMPILER a list of flags for all compilations -# CFLAGSSTRICT a list of flags for almost all compilations -# CFLAGSLAX a list of flags for compilations which can't be as +# CFLAGSCOMPILERSTRICT a list of flags for almost all compilations +# CFLAGSCOMPILERLAX a list of flags for compilations which can't be as # strict (e.g. because they have to include a third- # party header file that isn't -ansi -pedantic). # CFLAGSDEBUG a list of flags for compilations with maximum debug @@ -108,7 +108,7 @@ endif # These flags are included in all compilations. # Avoid using PFMDEFS in platform makefiles, as they prevent the MPS being # built with a simple command like "cc -c mps.c". -CFLAGSCOMMON = $(PFMDEFS) $(CFLAGSCOMPILER) $(CFLAGSCOMPILERSTRICT) +CFLAGSCOMMONSTRICT = $(PFMDEFS) $(CFLAGSCOMPILER) $(CFLAGSCOMPILERSTRICT) CFLAGSCOMMONLAX = $(PFMDEFS) $(CFLAGSCOMPILER) $(CFLAGSCOMPILERLAX) # %%VARIETY: When adding a new variety, define a macro containing the set @@ -119,20 +119,17 @@ CFRASH = -DCONFIG_VAR_RASH -DNDEBUG $(CFLAGSOPT) CFHOT = -DCONFIG_VAR_HOT -DNDEBUG $(CFLAGSOPT) CFCOOL = -DCONFIG_VAR_COOL $(CFLAGSDEBUG) -# Bind CFLAGS to the appropriate set of flags for the variety. -# %%VARIETY: When adding a new variety, add a test for the variety and set -# CFLAGS here. +# Bind CFLAGSVARIETY to the appropriate set of flags for the variety. +# %%VARIETY: When adding a new variety, add a test for the variety and +# set CFLAGSVARIETY here. ifeq ($(VARIETY),rash) -CFLAGS=$(CFLAGSCOMMON) $(CFRASH) -CFLAGSLAX=$(CFLAGSCOMMONLAX) $(CFRASH) +CFLAGSVARIETY=$(CFRASH) else ifeq ($(VARIETY),hot) -CFLAGS=$(CFLAGSCOMMON) $(CFHOT) -CFLAGSLAX=$(CFLAGSCOMMONLAX) $(CFHOT) +CFLAGSVARIETY=$(CFHOT) else ifeq ($(VARIETY),cool) -CFLAGS=$(CFLAGSCOMMON) $(CFCOOL) -CFLAGSLAX=$(CFLAGSCOMMONLAX) $(CFCOOL) +CFLAGSVARIETY=$(CFCOOL) else ifneq ($(VARIETY),) $(error Variety "$(VARIETY)" not recognized: must be rash/hot/cool) @@ -141,7 +138,8 @@ endif endif endif - +CFLAGSSTRICT=$(CFLAGSCOMMONSTRICT) $(CFLAGSVARIETY) $(CFLAGS) +CFLAGSLAX=$(CFLAGSCOMMONLAX) $(CFLAGSVARIETY) $(CFLAGS) ARFLAGS=rc$(ARFLAGSPFM) @@ -158,20 +156,62 @@ SNC = poolsnc.c POOLN = pooln.c MV2 = poolmv2.c MVFF = poolmvff.c -TESTLIB = testlib.c testthrix.c +TESTLIB = testlib.c +TESTTHR = testthrix.c FMTDY = fmtdy.c fmtno.c FMTDYTST = fmtdy.c fmtno.c fmtdytst.c FMTHETST = fmthe.c fmtdy.c fmtno.c fmtdytst.c FMTSCM = fmtscheme.c PLINTH = mpsliban.c mpsioan.c -EVENTPROC = eventcnv.c table.c -MPMCOMMON = abq.c arena.c arenacl.c arenavm.c arg.c boot.c bt.c \ - buffer.c cbs.c dbgpool.c dbgpooli.c event.c format.c freelist.c \ - global.c ld.c locus.c message.c meter.c mpm.c mpsi.c nailboard.c \ - pool.c poolabs.c poolmfs.c poolmrg.c poolmv.c protocol.c range.c \ - ref.c reserv.c ring.c root.c sa.c sac.c seg.c shield.c splay.c ss.c \ - table.c trace.c traceanc.c tract.c tree.c walk.c -MPM = $(MPMCOMMON) $(MPMPF) +MPMCOMMON = \ + abq.c \ + arena.c \ + arenacl.c \ + arenavm.c \ + arg.c \ + boot.c \ + bt.c \ + buffer.c \ + cbs.c \ + dbgpool.c \ + dbgpooli.c \ + event.c \ + failover.c \ + format.c \ + freelist.c \ + global.c \ + land.c \ + ld.c \ + locus.c \ + message.c \ + meter.c \ + mpm.c \ + mpsi.c \ + nailboard.c \ + pool.c \ + poolabs.c \ + poolmfs.c \ + poolmrg.c \ + poolmv.c \ + protocol.c \ + range.c \ + ref.c \ + reserv.c \ + ring.c \ + root.c \ + sa.c \ + sac.c \ + seg.c \ + shield.c \ + splay.c \ + ss.c \ + table.c \ + trace.c \ + traceanc.c \ + tract.c \ + tree.c \ + walk.c +MPM = $(MPMCOMMON) $(MPMPF) $(AMC) $(AMS) $(AWL) $(LO) $(MV2) $(MVFF) $(PLINTH) # These map the source file lists onto object files and dependency files @@ -183,38 +223,16 @@ MPM = $(MPMCOMMON) $(MPMPF) ifdef VARIETY MPMOBJ = $(MPM:%.c=$(PFM)/$(VARIETY)/%.o) \ $(MPMS:%.s=$(PFM)/$(VARIETY)/%.o) -MPMDEP = $(MPM:%.c=$(PFM)/$(VARIETY)/%.d) -AMCOBJ = $(AMC:%.c=$(PFM)/$(VARIETY)/%.o) -AMCDEP = $(AMC:%.c=$(PFM)/$(VARIETY)/%.d) -AMSOBJ = $(AMS:%.c=$(PFM)/$(VARIETY)/%.o) -AMSDEP = $(AMS:%.c=$(PFM)/$(VARIETY)/%.d) -AWLOBJ = $(AWL:%.c=$(PFM)/$(VARIETY)/%.o) -AWLDEP = $(AWL:%.c=$(PFM)/$(VARIETY)/%.d) -LOOBJ = $(LO:%.c=$(PFM)/$(VARIETY)/%.o) -LODEP = $(LO:%.c=$(PFM)/$(VARIETY)/%.d) -SNCOBJ = $(SNC:%.c=$(PFM)/$(VARIETY)/%.o) -SNCDEP = $(SNC:%.c=$(PFM)/$(VARIETY)/%.d) -POOLNOBJ = $(POOLN:%.c=$(PFM)/$(VARIETY)/%.o) -POOLNDEP = $(POOLN:%.c=$(PFM)/$(VARIETY)/%.d) -MV2OBJ = $(MV2:%.c=$(PFM)/$(VARIETY)/%.o) -MV2DEP = $(MV2:%.c=$(PFM)/$(VARIETY)/%.d) -MVFFOBJ = $(MVFF:%.c=$(PFM)/$(VARIETY)/%.o) -MVFFDEP = $(MVFF:%.c=$(PFM)/$(VARIETY)/%.d) - -TESTLIBOBJ = $(TESTLIB:%.c=$(PFM)/$(VARIETY)/%.o) -TESTLIBDEP = $(TESTLIB:%.c=$(PFM)/$(VARIETY)/%.d) FMTDYOBJ = $(FMTDY:%.c=$(PFM)/$(VARIETY)/%.o) -FMTDYDEP = $(FMTDY:%.c=$(PFM)/$(VARIETY)/%.d) FMTDYTSTOBJ = $(FMTDYTST:%.c=$(PFM)/$(VARIETY)/%.o) -FMTDYTSTDEP = $(FMTDYTST:%.c=$(PFM)/$(VARIETY)/%.d) FMTHETSTOBJ = $(FMTHETST:%.c=$(PFM)/$(VARIETY)/%.o) -FMTHETSTDEP = $(FMTHETST:%.c=$(PFM)/$(VARIETY)/%.d) FMTSCMOBJ = $(FMTSCM:%.c=$(PFM)/$(VARIETY)/%.o) -FMTSCMDEP = $(FMTSCM:%.c=$(PFM)/$(VARIETY)/%.d) +MV2OBJ = $(MV2:%.c=$(PFM)/$(VARIETY)/%.o) +MVFFOBJ = $(MVFF:%.c=$(PFM)/$(VARIETY)/%.o) PLINTHOBJ = $(PLINTH:%.c=$(PFM)/$(VARIETY)/%.o) -PLINTHDEP = $(PLINTH:%.c=$(PFM)/$(VARIETY)/%.d) -EVENTPROCOBJ = $(EVENTPROC:%.c=$(PFM)/$(VARIETY)/%.o) -EVENTPROCDEP = $(EVENTPROC:%.c=$(PFM)/$(VARIETY)/%.d) +POOLNOBJ = $(POOLN:%.c=$(PFM)/$(VARIETY)/%.o) +TESTLIBOBJ = $(TESTLIB:%.c=$(PFM)/$(VARIETY)/%.o) +TESTTHROBJ = $(TESTTHR:%.c=$(PFM)/$(VARIETY)/%.o) endif @@ -245,11 +263,11 @@ TEST_TARGETS=\ djbench \ exposet0 \ expt825 \ - fbmtest \ finalcv \ finaltest \ fotest \ gcbench \ + landtest \ locbwcss \ lockcov \ lockut \ @@ -276,7 +294,7 @@ TEST_TARGETS=\ UNBUILDABLE_TARGETS=\ replay # depends on the EPVM pool -ALL_TARGETS=$(LIB_TARGETS) $(TEST_TARGETS) $(EXTRA_TARGETS) testrun +ALL_TARGETS=$(LIB_TARGETS) $(TEST_TARGETS) $(EXTRA_TARGETS) # == Pseudo-targets == @@ -284,16 +302,24 @@ ALL_TARGETS=$(LIB_TARGETS) $(TEST_TARGETS) $(EXTRA_TARGETS) testrun all: $(ALL_TARGETS) -# Run the automated tests. +# == Automated test suites == +# +# testrun = "smoke test", fast enough to run before every commit +# testci = continuous integration tests, must be known good +# testall = all test cases, for ensuring quality of a release +# testansi = tests that run on the generic ("ANSI") platform +# testpoll = tests that run on the generic platform with CONFIG_POLL_NONE -$(PFM)/$(VARIETY)/testrun: $(TEST_TARGETS) - ../tool/testrun.sh "$(PFM)/$(VARIETY)" +TEST_SUITES=testrun testci testall testansi testpoll + +$(addprefix $(PFM)/$(VARIETY)/,$(TEST_SUITES)): $(TEST_TARGETS) + ../tool/testrun.sh "$(PFM)/$(VARIETY)" "$(notdir $@)" # These convenience targets allow one to type "make foo" to build target # foo in selected varieties (or none, for the latter rule). -$(ALL_TARGETS): phony +$(ALL_TARGETS) $(TEST_SUITES): phony ifdef VARIETY $(MAKE) -f $(PFM).gmk TARGET=$@ variety else @@ -308,17 +334,25 @@ clean: phony $(ECHO) "$(PFM): $@" rm -rf "$(PFM)" -# "target" builds some varieties of the target named in the TARGET macro. +# "target" builds some varieties of the target named in the TARGET +# macro. +# # %%VARIETY: When adding a new target, optionally add a recursive make call # for the new variety, if it should be built by default. It probably # shouldn't without a product design decision and an update of the readme # and build manual! +# +# Note that we build VARIETY=cool before VARIETY=hot because +# the former doesn't need to optimize and so detects errors more +# quickly; and because the former uses file-at-a-time compilation and +# so can pick up where it left off instead of having to start from the +# beginning of mps.c ifdef TARGET ifndef VARIETY target: phony - $(MAKE) -f $(PFM).gmk VARIETY=hot variety $(MAKE) -f $(PFM).gmk VARIETY=cool variety + $(MAKE) -f $(PFM).gmk VARIETY=hot variety endif endif @@ -354,10 +388,7 @@ endif $(PFM)/rash/mps.a: $(PFM)/rash/mps.o $(PFM)/hot/mps.a: $(PFM)/hot/mps.o - -$(PFM)/cool/mps.a: \ - $(MPMOBJ) $(AMCOBJ) $(AMSOBJ) $(AWLOBJ) $(LOOBJ) $(SNCOBJ) \ - $(MV2OBJ) $(MVFFOBJ) $(PLINTHOBJ) $(POOLNOBJ) +$(PFM)/cool/mps.a: $(MPMOBJ) # OTHER GENUINE TARGETS @@ -384,7 +415,7 @@ $(PFM)/$(VARIETY)/amcsshe: $(PFM)/$(VARIETY)/amcsshe.o \ $(FMTHETSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/amcssth: $(PFM)/$(VARIETY)/amcssth.o \ - $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/amsss: $(PFM)/$(VARIETY)/amsss.o \ $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a @@ -405,7 +436,7 @@ $(PFM)/$(VARIETY)/awluthe: $(PFM)/$(VARIETY)/awluthe.o \ $(FMTHETSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/awlutth: $(PFM)/$(VARIETY)/awlutth.o \ - $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/btcv: $(PFM)/$(VARIETY)/btcv.o \ $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a @@ -414,7 +445,7 @@ $(PFM)/$(VARIETY)/bttest: $(PFM)/$(VARIETY)/bttest.o \ $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/djbench: $(PFM)/$(VARIETY)/djbench.o \ - $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)/$(VARIETY)/exposet0: $(PFM)/$(VARIETY)/exposet0.o \ $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a @@ -422,9 +453,6 @@ $(PFM)/$(VARIETY)/exposet0: $(PFM)/$(VARIETY)/exposet0.o \ $(PFM)/$(VARIETY)/expt825: $(PFM)/$(VARIETY)/expt825.o \ $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a -$(PFM)/$(VARIETY)/fbmtest: $(PFM)/$(VARIETY)/fbmtest.o \ - $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a - $(PFM)/$(VARIETY)/finalcv: $(PFM)/$(VARIETY)/finalcv.o \ $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a @@ -435,7 +463,10 @@ $(PFM)/$(VARIETY)/fotest: $(PFM)/$(VARIETY)/fotest.o \ $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/gcbench: $(PFM)/$(VARIETY)/gcbench.o \ - $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)/$(VARIETY)/landtest: $(PFM)/$(VARIETY)/landtest.o \ + $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/locbwcss: $(PFM)/$(VARIETY)/locbwcss.o \ $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a @@ -444,7 +475,7 @@ $(PFM)/$(VARIETY)/lockcov: $(PFM)/$(VARIETY)/lockcov.o \ $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/lockut: $(PFM)/$(VARIETY)/lockut.o \ - $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/locusss: $(PFM)/$(VARIETY)/locusss.o \ $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a @@ -468,7 +499,7 @@ $(PFM)/$(VARIETY)/nailboardtest: $(PFM)/$(VARIETY)/nailboardtest.o \ $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/poolncv: $(PFM)/$(VARIETY)/poolncv.o \ - $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a + $(POOLNOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a $(PFM)/$(VARIETY)/qs: $(PFM)/$(VARIETY)/qs.o \ $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a @@ -519,11 +550,11 @@ endif # Object files -define run-cc +define run-cc-strict $(ECHO) "$(PFM): $@" mkdir -p $(PFM) mkdir -p $(PFM)/$(VARIETY) -$(CC) $(CFLAGS) -c -o $@ $< +$(CC) $(CFLAGSSTRICT) -c -o $@ $< endef define run-cc-lax @@ -535,16 +566,16 @@ endef # .rule.c-to-o: $(PFM)/$(VARIETY)/%.o: %.c - $(run-cc) + $(run-cc-strict) $(PFM)/$(VARIETY)/eventsql.o: eventsql.c $(run-cc-lax) $(PFM)/$(VARIETY)/%.o: %.s - $(run-cc) + $(run-cc-strict) $(PFM)/$(VARIETY)/%.o: %.S - $(run-cc) + $(run-cc-strict) # Dependencies # @@ -571,29 +602,28 @@ else ifeq ($(VARIETY),hot) include $(PFM)/$(VARIETY)/mps.d else -# %%PART: When adding a new part, add the dependency file macro for the new -# part here. +include $(MPM:%.c=$(PFM)/$(VARIETY)/%.d) +endif # VARIETY != hot +endif # VARIETY != rash + +# %%PART: When adding a new part, add the dependencies file for the +# new part here. include \ - $(MPMDEP) \ - $(AMCDEP) \ - $(AMSDEP) \ - $(AWLDEP) \ - $(EVENTPROCDEP) \ - $(FMTDYDEP) \ - $(FMTDYTSTDEP) \ - $(FMTHETSTDEP) \ - $(FMTSCMDEP) \ - $(LODEP) \ - $(PLINTHDEP) \ - $(POOLNDEP) \ - $(TESTLIBDEP) -endif -endif + $(FMTDY:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(FMTDYTST:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(FMTHETST:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(FMTSCM:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(PLINTH:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(POOLN:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(TESTLIB:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(TESTTHR:%.c=$(PFM)/$(VARIETY)/%.d) \ + $(EXTRA_TARGETS:mps%=$(PFM)/$(VARIETY)/%.d) \ + $(TEST_TARGETS:%=$(PFM)/$(VARIETY)/%.d) -endif -endif +endif # !defined TARGET +endif # !defined VARIETY -endif +endif # !defined gendep # Library @@ -604,7 +634,7 @@ endif $(PFM)/$(VARIETY)/%.a: $(ECHO) "$(PFM): $@" rm -f $@ - $(CC) $(CFLAGS) -c -o $(PFM)/$(VARIETY)/version.o version.c + $(CC) $(CFLAGSSTRICT) -c -o $(PFM)/$(VARIETY)/version.o version.c $(AR) $(ARFLAGS) $@ $^ $(PFM)/$(VARIETY)/version.o $(RANLIB) $@ @@ -612,11 +642,11 @@ $(PFM)/$(VARIETY)/%.a: $(PFM)/$(VARIETY)/%: $(ECHO) "$(PFM): $@" - $(CC) $(CFLAGS) $(LINKFLAGS) -o $@ $^ $(LIBS) + $(CC) $(CFLAGSSTRICT) $(LINKFLAGS) -o $@ $^ $(LIBS) $(PFM)/$(VARIETY)/mpseventsql: $(ECHO) "$(PFM): $@" - $(CC) $(CFLAGS) $(LINKFLAGS) -o $@ $^ $(LIBS) -lsqlite3 + $(CC) $(CFLAGSLAX) $(LINKFLAGS) -o $@ $^ $(LIBS) -lsqlite3 # Special targets for development diff --git a/mps/code/commpost.nmk b/mps/code/commpost.nmk index 7c53d585adb..96fca59e250 100644 --- a/mps/code/commpost.nmk +++ b/mps/code/commpost.nmk @@ -39,8 +39,8 @@ clean: !IFDEF TARGET !IFNDEF VARIETY target: - $(MAKE) /nologo /f $(PFM).nmk VARIETY=hot variety $(MAKE) /nologo /f $(PFM).nmk VARIETY=cool variety + $(MAKE) /nologo /f $(PFM).nmk VARIETY=hot variety !ENDIF !ENDIF @@ -53,15 +53,15 @@ variety: $(PFM)\$(VARIETY)\$(TARGET) !ENDIF !ENDIF -# testrun +# testrun testci testall testansi testpoll # Runs automated test cases. -testrun: $(TEST_TARGETS) +testrun testci testall testansi testpoll: $(TEST_TARGETS) !IFDEF VARIETY - ..\tool\testrun.bat $(PFM) $(VARIETY) + ..\tool\testrun.bat $(PFM) $(VARIETY) $@ !ELSE - $(MAKE) /nologo /f $(PFM).nmk VARIETY=hot testrun - $(MAKE) /nologo /f $(PFM).nmk VARIETY=cool testrun + $(MAKE) /nologo /f $(PFM).nmk VARIETY=cool $@ + $(MAKE) /nologo /f $(PFM).nmk VARIETY=hot $@ !ENDIF @@ -92,12 +92,9 @@ $(PFM)\hot\mps.lib: $(PFM)\hot\mps.obj $(ECHO) $@ $(LIBMAN) $(LIBFLAGS) /OUT:$@ $** -$(PFM)\cool\mps.lib: \ - $(MPMOBJ) $(AMCOBJ) $(AMSOBJ) $(AWLOBJ) $(LOOBJ) $(SNCOBJ) \ - $(MVFFOBJ) $(PLINTHOBJ) $(POOLNOBJ) +$(PFM)\cool\mps.lib: $(MPMOBJ) $(ECHO) $@ - $(CC) /c $(CFLAGS) /Fo$(PFM)\$(VARIETY)\version.obj version.c - $(LIBMAN) $(LIBFLAGS) /OUT:$@ $** $(PFM)\$(VARIETY)\version.obj + $(LIBMAN) $(LIBFLAGS) /OUT:$@ $** # OTHER GENUINE TARGETS @@ -124,7 +121,7 @@ $(PFM)\$(VARIETY)\amcsshe.exe: $(PFM)\$(VARIETY)\amcsshe.obj \ $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) $(PFM)\$(VARIETY)\amcssth.exe: $(PFM)\$(VARIETY)\amcssth.obj \ - $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)\$(VARIETY)\amsss.exe: $(PFM)\$(VARIETY)\amsss.obj \ $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) @@ -148,7 +145,7 @@ $(PFM)\$(VARIETY)\awluthe.exe: $(PFM)\$(VARIETY)\awluthe.obj \ $(PFM)\$(VARIETY)\awlutth.exe: $(PFM)\$(VARIETY)\awlutth.obj \ $(FMTTESTOBJ) \ - $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)\$(VARIETY)\btcv.exe: $(PFM)\$(VARIETY)\btcv.obj \ $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) @@ -160,7 +157,7 @@ $(PFM)\$(VARIETY)\cvmicv.exe: $(PFM)\$(VARIETY)\cvmicv.obj \ $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) $(PFM)\$(VARIETY)\djbench.exe: $(PFM)\$(VARIETY)\djbench.obj \ - $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)\$(VARIETY)\exposet0.exe: $(PFM)\$(VARIETY)\exposet0.obj \ $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) @@ -168,9 +165,6 @@ $(PFM)\$(VARIETY)\exposet0.exe: $(PFM)\$(VARIETY)\exposet0.obj \ $(PFM)\$(VARIETY)\expt825.exe: $(PFM)\$(VARIETY)\expt825.obj \ $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) -$(PFM)\$(VARIETY)\fbmtest.exe: $(PFM)\$(VARIETY)\fbmtest.obj \ - $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) - $(PFM)\$(VARIETY)\finalcv.exe: $(PFM)\$(VARIETY)\finalcv.obj \ $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) @@ -181,7 +175,10 @@ $(PFM)\$(VARIETY)\fotest.exe: $(PFM)\$(VARIETY)\fotest.obj \ $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(PFM)\$(VARIETY)\gcbench.exe: $(PFM)\$(VARIETY)\gcbench.obj \ - $(PFM)\$(VARIETY)\mps.lib $(FMTTESTOBJ) $(TESTLIBOBJ) + $(FMTTESTOBJ) $(TESTLIBOBJ) $(TESTTHROBJ) + +$(PFM)\$(VARIETY)\landtest.exe: $(PFM)\$(VARIETY)\landtest.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(PFM)\$(VARIETY)\locbwcss.exe: $(PFM)\$(VARIETY)\locbwcss.obj \ $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) @@ -190,7 +187,7 @@ $(PFM)\$(VARIETY)\lockcov.exe: $(PFM)\$(VARIETY)\lockcov.obj \ $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(PFM)\$(VARIETY)\lockut.exe: $(PFM)\$(VARIETY)\lockut.obj \ - $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(TESTTHROBJ) $(PFM)\$(VARIETY)\locusss.exe: $(PFM)\$(VARIETY)\locusss.obj \ $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) @@ -213,8 +210,8 @@ $(PFM)\$(VARIETY)\mv2test.exe: $(PFM)\$(VARIETY)\mv2test.obj \ $(PFM)\$(VARIETY)\nailboardtest.exe: $(PFM)\$(VARIETY)\nailboardtest.obj \ $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) -$(PFM)\$(VARIETY)\poolncv.exe: $(PFM)\$(VARIETY)\poolncv.obj \ - $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) +$(PFM)\$(VARIETY)\poolncv.exe: $(PFM)\$(VARIETY)\poolncv.obj \ + $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) $(POOLNOBJ) $(PFM)\$(VARIETY)\qs.exe: $(PFM)\$(VARIETY)\qs.obj \ $(PFM)\$(VARIETY)\mps.lib $(TESTLIBOBJ) diff --git a/mps/code/commpre.nmk b/mps/code/commpre.nmk index 09f4f165207..dbb20936fea 100644 --- a/mps/code/commpre.nmk +++ b/mps/code/commpre.nmk @@ -32,6 +32,7 @@ # FMTTEST as above for the "fmttest" part # FMTSCHEME as above for the "fmtscheme" part # TESTLIB as above for the "testlib" part +# TESTTHR as above for the "testthr" part # NOISY if defined, causes command to be emitted # # @@ -71,11 +72,11 @@ TEST_TARGETS=\ djbench.exe \ exposet0.exe \ expt825.exe \ - fbmtest.exe \ finalcv.exe \ finaltest.exe \ fotest.exe \ gcbench.exe \ + landtest.exe \ locbwcss.exe \ lockcov.exe \ lockut.exe \ @@ -130,17 +131,17 @@ MPMCOMMON=\ \ \ \ + \ \ \ \ + \ \ - \ \ \ \ \ \ - \ \ \ \ @@ -149,7 +150,6 @@ MPMCOMMON=\ \ \ \ - \ \ \ \ @@ -162,12 +162,11 @@ MPMCOMMON=\ \ \ \ - \ \ \ \ \ - \ + \ PLINTH = AMC = @@ -177,10 +176,12 @@ LO = MVFF = POOLN = SNC = -DW = +FMTDY = FMTTEST = FMTSCHEME = -TESTLIB = +TESTLIB = +TESTTHR = +MPM = $(MPMCOMMON) $(MPMPF) $(AMC) $(AMS) $(AWL) $(LO) $(MV2) $(MVFF) $(PLINTH) # CHECK PARAMETERS @@ -195,9 +196,15 @@ TESTLIB = !IFNDEF PFMDEFS !ERROR commpre.nmk: PFMDEFS not defined !ENDIF +!IFNDEF MPM +!ERROR commpre.nmk: MPM not defined +!ENDIF !IFNDEF MPMCOMMON !ERROR commpre.nmk: MPMCOMMON not defined !ENDIF +!IFNDEF MPMPF +!ERROR commpre.nmk: MPMPF not defined +!ENDIF !IFNDEF PLINTH !ERROR commpre.nmk: PLINTH not defined !ENDIF @@ -216,8 +223,8 @@ TESTLIB = !IFNDEF SNC !ERROR commpre.nmk: SNC not defined !ENDIF -!IFNDEF DW -!ERROR commpre.nmk: DW not defined +!IFNDEF FMTDY +!ERROR commpre.nmk: FMTDY not defined !ENDIF !IFNDEF FMTTEST !ERROR commpre.nmk: FMTTEST not defined @@ -228,6 +235,9 @@ TESTLIB = !IFNDEF TESTLIB !ERROR commpre.nmk: TESTLIB not defined !ENDIF +!IFNDEF TESTTHR +!ERROR commpre.nmk: TESTTHR not defined +!ENDIF # DECLARATIONS diff --git a/mps/code/config.h b/mps/code/config.h index 1dfd7858f86..552b3c943d9 100644 --- a/mps/code/config.h +++ b/mps/code/config.h @@ -147,11 +147,60 @@ * cc -O2 -c -DCONFIG_PLINTH_NONE mps.c */ -#if defined(CONFIG_PLINTH_NONE) +#if !defined(CONFIG_PLINTH_NONE) +#define PLINTH +#else #define PLINTH_NONE #endif +/* CONFIG_PF_ANSI -- use the ANSI platform + * + * This symbol tells mps.c to exclude the sources for the + * auto-detected platform, and use the generic ("ANSI") platform + * instead. + */ + +#if defined(CONFIG_PF_ANSI) +#define PLATFORM_ANSI +#endif + + +/* CONFIG_THREAD_SINGLE -- support single-threaded execution only + * + * This symbol causes the MPS to be built for single-threaded + * execution only, where locks are not needed and so lock operations + * can be defined as no-ops by lock.h. + */ + +#if !defined(CONFIG_THREAD_SINGLE) +#define LOCK +#else +#define LOCK_NONE +#endif + + +/* CONFIG_POLL_NONE -- no support for polling + * + * This symbol causes the MPS to built without support for polling. + * This means that garbage collections will only happen if requested + * explicitly via mps_arena_collect() or mps_arena_step(), but it also + * means that protection is not needed, and so shield operations can + * be replaced with no-ops in mpm.h. + */ + +#if !defined(CONFIG_POLL_NONE) +#define REMEMBERED_SET +#define SHIELD +#else +#if !defined(CONFIG_THREAD_SINGLE) +#error "CONFIG_POLL_NONE without CONFIG_THREAD_SINGLE" +#endif +#define REMEMBERED_SET_NONE +#define SHIELD_NONE +#endif + + #define MPS_VARIETY_STRING \ MPS_ASSERT_STRING "." MPS_LOG_STRING "." MPS_STATS_STRING @@ -243,6 +292,11 @@ /* Attribute for functions that may be unused in some build configurations. * GCC: + * + * This attribute must be applied to all Check functions, otherwise + * the RASH variety fails to compile with -Wunused-function. (It + * should not be applied to functions that are unused in all build + * configurations: these functions should not be compiled.) */ #if defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL) #define ATTRIBUTE_UNUSED __attribute__((__unused__)) @@ -264,11 +318,6 @@ #define BUFFER_RANK_DEFAULT (mps_rank_exact()) -/* CBS Configuration -- see */ - -#define CBS_EXTEND_BY_DEFAULT ((Size)4096) - - /* Format defaults: see */ #define FMT_ALIGN_DEFAULT ((Align)MPS_PF_ALIGN) @@ -310,6 +359,7 @@ /* Pool MV Configuration -- see */ +#define MV_ALIGN_DEFAULT MPS_PF_ALIGN #define MV_EXTEND_BY_DEFAULT ((Size)65536) #define MV_AVG_SIZE_DEFAULT ((Size)32) #define MV_MAX_SIZE_DEFAULT ((Size)65536) @@ -375,13 +425,15 @@ pool to be very heavily used. */ #define CONTROL_EXTEND_BY 4096 -#define VM_ARENA_SIZE_DEFAULT ((Size)1 << 20) +#define VM_ARENA_SIZE_DEFAULT ((Size)1 << 28) /* Stack configuration */ /* Currently StackProbe has a useful implementation only on Windows. */ -#if defined(MPS_OS_W3) && defined(MPS_ARCH_I3) +#if defined(PLATFORM_ANSI) +#define StackProbeDEPTH ((Size)0) +#elif defined(MPS_OS_W3) && defined(MPS_ARCH_I3) #define StackProbeDEPTH ((Size)500) #elif defined(MPS_OS_W3) && defined(MPS_ARCH_I6) #define StackProbeDEPTH ((Size)500) @@ -410,6 +462,7 @@ * * Source Symbols Header Feature * =========== ========================= ============= ==================== + * eventtxt.c setenv _GNU_SOURCE * lockli.c pthread_mutexattr_settype _XOPEN_SOURCE >= 500 * prmci3li.c REG_EAX etc. _GNU_SOURCE * prmci6li.c REG_RAX etc. _GNU_SOURCE @@ -428,9 +481,14 @@ #if defined(MPS_OS_LI) +#if defined(_XOPEN_SOURCE) && _XOPEN_SOURCE < 500 +#undef _XOPEN_SOURCE +#endif +#if !defined(_XOPEN_SOURCE) #define _XOPEN_SOURCE 500 +#endif -#ifndef _GNU_SOURCE +#if !defined(_GNU_SOURCE) #define _GNU_SOURCE #endif @@ -556,9 +614,6 @@ #define MPS_PROD_STRING "mps" #define MPS_PROD_MPS -#define THREAD_MULTI -#define PROTECTION -#define PROD_CHECKLEVEL_INITIAL CheckLevelSHALLOW /* TODO: This should be proportional to the memory usage of the MPS, not a constant. That will require design, and then some interface and diff --git a/mps/code/dbgpool.c b/mps/code/dbgpool.c index 77c1d6eaf40..bf48208eb37 100644 --- a/mps/code/dbgpool.c +++ b/mps/code/dbgpool.c @@ -123,10 +123,14 @@ Bool PoolDebugOptionsCheck(PoolDebugOptions opt) ARG_DEFINE_KEY(pool_debug_options, PoolDebugOptions); +static PoolDebugOptionsStruct debugPoolOptionsDefault = { + "POST", 4, "DEAD", 4, +}; + static Res DebugPoolInit(Pool pool, ArgList args) { Res res; - PoolDebugOptions options; + PoolDebugOptions options = &debugPoolOptionsDefault; PoolDebugMixin debug; TagInitMethod tagInit; Size tagSize; @@ -134,10 +138,8 @@ static Res DebugPoolInit(Pool pool, ArgList args) AVERT(Pool, pool); - /* TODO: Split this structure into separate keyword arguments, - now that we can support them. */ - ArgRequire(&arg, args, MPS_KEY_POOL_DEBUG_OPTIONS); - options = (PoolDebugOptions)arg.val.pool_debug_options; + if (ArgPick(&arg, args, MPS_KEY_POOL_DEBUG_OPTIONS)) + options = (PoolDebugOptions)arg.val.pool_debug_options; AVERT(PoolDebugOptions, options); @@ -158,10 +160,6 @@ static Res DebugPoolInit(Pool pool, ArgList args) /* into Addr memory, to avoid breaking . */ debug->fenceSize = options->fenceSize; if (debug->fenceSize != 0) { - if (debug->fenceSize % PoolAlignment(pool) != 0) { - res = ResPARAM; - goto alignFail; - } /* Fenceposting turns on tagging */ if (tagInit == NULL) { tagSize = 0; @@ -176,10 +174,6 @@ static Res DebugPoolInit(Pool pool, ArgList args) /* into Addr memory, to avoid breaking . */ debug->freeSize = options->freeSize; if (debug->freeSize != 0) { - if (PoolAlignment(pool) % debug->freeSize != 0) { - res = ResPARAM; - goto alignFail; - } debug->freeTemplate = options->freeTemplate; } @@ -190,7 +184,10 @@ static Res DebugPoolInit(Pool pool, ArgList args) /* This pool has to be like the arena control pool: the blocks */ /* allocated must be accessible using void*. */ MPS_ARGS_BEGIN(pcArgs) { - MPS_ARGS_ADD(pcArgs, MPS_KEY_EXTEND_BY, debug->tagSize); /* FIXME: Check this */ + /* By setting EXTEND_BY to debug->tagSize we get the smallest + possible extensions compatible with the tags, and so the + least amount of wasted space. */ + MPS_ARGS_ADD(pcArgs, MPS_KEY_EXTEND_BY, debug->tagSize); MPS_ARGS_ADD(pcArgs, MPS_KEY_MFS_UNIT_SIZE, debug->tagSize); res = PoolCreate(&debug->tagPool, PoolArena(pool), PoolClassMFS(), pcArgs); } MPS_ARGS_END(pcArgs); @@ -205,7 +202,6 @@ static Res DebugPoolInit(Pool pool, ArgList args) return ResOK; tagFail: -alignFail: SuperclassOfPool(pool)->finish(pool); AVER(res != ResOK); return res; @@ -231,39 +227,150 @@ static void DebugPoolFinish(Pool pool) } -/* freeSplat -- splat free block with splat pattern +/* patternIterate -- call visitor for occurrences of pattern between + * base and limit * - * If base is in a segment, the whole block has to be in it. + * pattern is an arbitrary pattern that's size bytes long. + * + * Imagine that the entirety of memory were covered by contiguous + * copies of pattern starting at address 0. Then call visitor for each + * copy (or part) of pattern that lies between base and limit. In each + * call, target is the address of the copy or part (where base <= + * target < limit); source is the corresponding byte of the pattern + * (where pattern <= source < pattern + size); and size is the length + * of the copy or part. */ +typedef Bool (*patternVisitor)(Addr target, ReadonlyAddr source, Size size); + +static Bool patternIterate(ReadonlyAddr pattern, Size size, + Addr base, Addr limit, patternVisitor visitor) +{ + Addr p; + + AVER(pattern != NULL); + AVER(0 < size); + AVER(base != NULL); + AVER(base <= limit); + + p = base; + while (p < limit) { + Addr end = AddrAdd(p, size); + Addr rounded = AddrRoundUp(p, size); + Size offset = (Word)p % size; + if (end < p || rounded < p) { + /* Address range overflow */ + break; + } else if (p == rounded && end <= limit) { + /* Room for a whole copy */ + if (!(*visitor)(p, pattern, size)) + return FALSE; + p = end; + } else if (p < rounded && rounded <= end && rounded <= limit) { + /* Copy up to rounded */ + if (!(*visitor)(p, ReadonlyAddrAdd(pattern, offset), + AddrOffset(p, rounded))) + return FALSE; + p = rounded; + } else { + /* Copy up to limit */ + AVER(limit <= end && (p == rounded || limit <= rounded)); + if (!(*visitor)(p, ReadonlyAddrAdd(pattern, offset), + AddrOffset(p, limit))) + return FALSE; + p = limit; + } + } + + return TRUE; +} + + +/* patternCopy -- copy pattern to fill a range + * + * Fill the range of addresses from base (inclusive) to limit + * (exclusive) with copies of pattern (which is size bytes long). + */ + +static Bool patternCopyVisitor(Addr target, ReadonlyAddr source, Size size) +{ + (void)AddrCopy(target, source, size); + return TRUE; +} + +static void patternCopy(ReadonlyAddr pattern, Size size, Addr base, Addr limit) +{ + (void)patternIterate(pattern, size, base, limit, patternCopyVisitor); +} + + +/* patternCheck -- check pattern against a range + * + * Compare the range of addresses from base (inclusive) to limit + * (exclusive) with copies of pattern (which is size bytes long). The + * copies of pattern must be arranged so that fresh copies start at + * aligned addresses wherever possible. + */ + +static Bool patternCheckVisitor(Addr target, ReadonlyAddr source, Size size) +{ + return AddrComp(target, source, size) == 0; +} + +static Bool patternCheck(ReadonlyAddr pattern, Size size, Addr base, Addr limit) +{ + return patternIterate(pattern, size, base, limit, patternCheckVisitor); +} + + +/* debugPoolSegIterate -- iterate over a range of segments in an arena + * + * Expects to be called on a range corresponding to objects withing a + * single pool. + * + * NOTE: This relies on pools consistently using segments + * contiguously. + */ + +static void debugPoolSegIterate(Arena arena, Addr base, Addr limit, + void (*visitor)(Arena, Seg)) +{ + Seg seg; + + if (SegOfAddr(&seg, arena, base)) { + do { + base = SegLimit(seg); + (*visitor)(arena, seg); + } while (base < limit && SegOfAddr(&seg, arena, base)); + AVER(base >= limit); /* shouldn't run out of segments */ + } +} + +static void debugPoolShieldExpose(Arena arena, Seg seg) +{ + ShieldExpose(arena, seg); +} + +static void debugPoolShieldCover(Arena arena, Seg seg) +{ + ShieldCover(arena, seg); +} + + +/* freeSplat -- splat free block with splat pattern */ + static void freeSplat(PoolDebugMixin debug, Pool pool, Addr base, Addr limit) { - Addr p, next; - Size freeSize = debug->freeSize; Arena arena; - Seg seg = NULL; /* suppress "may be used uninitialized" */ - Bool inSeg; AVER(base < limit); - /* If the block is in a segment, make sure any shield is up. */ + /* If the block is in one or more segments, make sure the segments + are exposed so that we can overwrite the block with the pattern. */ arena = PoolArena(pool); - inSeg = SegOfAddr(&seg, arena, base); - if (inSeg) { - AVER(limit <= SegLimit(seg)); - ShieldExpose(arena, seg); - } - /* Write as many copies of the template as fit in the block. */ - for (p = base, next = AddrAdd(p, freeSize); - next <= limit && p < next /* watch out for overflow in next */; - p = next, next = AddrAdd(next, freeSize)) - (void)AddrCopy(p, debug->freeTemplate, freeSize); - /* Fill the tail of the block with a partial copy of the template. */ - if (next > limit || next < p) - (void)AddrCopy(p, debug->freeTemplate, AddrOffset(p, limit)); - if (inSeg) { - ShieldCover(arena, seg); - } + debugPoolSegIterate(arena, base, limit, debugPoolShieldExpose); + patternCopy(debug->freeTemplate, debug->freeSize, base, limit); + debugPoolSegIterate(arena, base, limit, debugPoolShieldCover); } @@ -271,41 +378,17 @@ static void freeSplat(PoolDebugMixin debug, Pool pool, Addr base, Addr limit) static Bool freeCheck(PoolDebugMixin debug, Pool pool, Addr base, Addr limit) { - Addr p, next; - Size freeSize = debug->freeSize; - Res res; + Bool res; Arena arena; - Seg seg = NULL; /* suppress "may be used uninitialized" */ - Bool inSeg; AVER(base < limit); - /* If the block is in a segment, make sure any shield is up. */ + /* If the block is in one or more segments, make sure the segments + are exposed so we can read the pattern. */ arena = PoolArena(pool); - inSeg = SegOfAddr(&seg, arena, base); - if (inSeg) { - AVER(limit <= SegLimit(seg)); - ShieldExpose(arena, seg); - } - /* Compare this to the AddrCopys in freeSplat. */ - /* Check the complete copies of the template in the block. */ - for (p = base, next = AddrAdd(p, freeSize); - next <= limit && p < next /* watch out for overflow in next */; - p = next, next = AddrAdd(next, freeSize)) - if (AddrComp(p, debug->freeTemplate, freeSize) != 0) { - res = FALSE; goto done; - } - /* Check the partial copy of the template at the tail of the block. */ - if (next > limit || next < p) - if (AddrComp(p, debug->freeTemplate, AddrOffset(p, limit)) != 0) { - res = FALSE; goto done; - } - res = TRUE; - -done: - if (inSeg) { - ShieldCover(arena, seg); - } + debugPoolSegIterate(arena, base, limit, debugPoolShieldExpose); + res = patternCheck(debug->freeTemplate, debug->freeSize, base, limit); + debugPoolSegIterate(arena, base, limit, debugPoolShieldCover); return res; } @@ -351,63 +434,75 @@ static void freeCheckFree(PoolDebugMixin debug, * start fp client object slop end fp * * slop is the extra allocation from rounding up the client request to - * the pool's alignment. The fenceposting code does this, so there's a - * better chance of the end fencepost being flush with the next object - * (can't be guaranteed, since the underlying pool could have allocated - * an even larger block). The alignment slop is filled from the - * fencepost template as well (as much as fits, .fence.size guarantees - * the template is larger). + * the pool's alignment. The fenceposting code adds this slop so that + * there's a better chance of the end fencepost being flush with the + * next object (though it can't be guaranteed, since the underlying + * pool could have allocated an even larger block). The alignment slop + * is filled from the fencepost template as well. + * + * Keep in sync with fenceCheck. */ static Res fenceAlloc(Addr *aReturn, PoolDebugMixin debug, Pool pool, Size size, Bool withReservoir) { Res res; - Addr new, clientNew; - Size alignedSize; + Addr obj, startFence, clientNew, clientLimit, limit; + Size alignedFenceSize, alignedSize; AVER(aReturn != NULL); + AVERT(PoolDebugMixin, debug); + AVERT(Pool, pool); + alignedFenceSize = SizeAlignUp(debug->fenceSize, PoolAlignment(pool)); alignedSize = SizeAlignUp(size, PoolAlignment(pool)); - res = freeCheckAlloc(&new, debug, pool, alignedSize + 2*debug->fenceSize, + res = freeCheckAlloc(&obj, debug, pool, + alignedSize + 2 * alignedFenceSize, withReservoir); if (res != ResOK) return res; - clientNew = AddrAdd(new, debug->fenceSize); + + startFence = obj; + clientNew = AddrAdd(startFence, alignedFenceSize); + clientLimit = AddrAdd(clientNew, size); + limit = AddrAdd(clientNew, alignedSize + alignedFenceSize); + /* @@@@ shields? */ - /* start fencepost */ - (void)AddrCopy(new, debug->fenceTemplate, debug->fenceSize); - /* alignment slop */ - (void)AddrCopy(AddrAdd(clientNew, size), - debug->fenceTemplate, alignedSize - size); - /* end fencepost */ - (void)AddrCopy(AddrAdd(clientNew, alignedSize), - debug->fenceTemplate, debug->fenceSize); + patternCopy(debug->fenceTemplate, debug->fenceSize, startFence, clientNew); + patternCopy(debug->fenceTemplate, debug->fenceSize, clientLimit, limit); *aReturn = clientNew; - return res; + return ResOK; } -/* fenceCheck -- check fences of an object */ +/* fenceCheck -- check fences of an object + * + * Keep in sync with fenceAlloc. + */ static Bool fenceCheck(PoolDebugMixin debug, Pool pool, Addr obj, Size size) { - Size alignedSize; + Addr startFence, clientNew, clientLimit, limit; + Size alignedFenceSize, alignedSize; AVERT_CRITICAL(PoolDebugMixin, debug); AVERT_CRITICAL(Pool, pool); /* Can't check obj */ + alignedFenceSize = SizeAlignUp(debug->fenceSize, PoolAlignment(pool)); alignedSize = SizeAlignUp(size, PoolAlignment(pool)); + + startFence = AddrSub(obj, alignedFenceSize); + clientNew = obj; + clientLimit = AddrAdd(clientNew, size); + limit = AddrAdd(clientNew, alignedSize + alignedFenceSize); + /* @@@@ shields? */ - /* Compare this to the AddrCopys in fenceAlloc */ - return (AddrComp(AddrSub(obj, debug->fenceSize), debug->fenceTemplate, - debug->fenceSize) == 0 - && AddrComp(AddrAdd(obj, size), debug->fenceTemplate, - alignedSize - size) == 0 - && AddrComp(AddrAdd(obj, alignedSize), debug->fenceTemplate, - debug->fenceSize) == 0); + return patternCheck(debug->fenceTemplate, debug->fenceSize, + startFence, clientNew) + && patternCheck(debug->fenceTemplate, debug->fenceSize, + clientLimit, limit); } @@ -416,13 +511,14 @@ static Bool fenceCheck(PoolDebugMixin debug, Pool pool, Addr obj, Size size) static void fenceFree(PoolDebugMixin debug, Pool pool, Addr old, Size size) { - Size alignedSize; + Size alignedFenceSize, alignedSize; ASSERT(fenceCheck(debug, pool, old, size), "fencepost check on free"); + alignedFenceSize = SizeAlignUp(debug->fenceSize, PoolAlignment(pool)); alignedSize = SizeAlignUp(size, PoolAlignment(pool)); - freeCheckFree(debug, pool, AddrSub(old, debug->fenceSize), - alignedSize + 2*debug->fenceSize); + freeCheckFree(debug, pool, AddrSub(old, alignedFenceSize), + alignedSize + 2 * alignedFenceSize); } diff --git a/mps/code/dbgpool.h b/mps/code/dbgpool.h index bacecb11253..e00cfe19ab8 100644 --- a/mps/code/dbgpool.h +++ b/mps/code/dbgpool.h @@ -26,9 +26,9 @@ typedef void (*TagInitMethod)(void* tag, va_list args); */ typedef struct PoolDebugOptionsStruct { - void* fenceTemplate; + const void *fenceTemplate; Size fenceSize; - void* freeTemplate; + const void *freeTemplate; Size freeSize; /* TagInitMethod tagInit; */ /* Size tagSize; */ @@ -43,9 +43,9 @@ typedef PoolDebugOptionsStruct *PoolDebugOptions; typedef struct PoolDebugMixinStruct { Sig sig; - Addr fenceTemplate; + const struct AddrStruct *fenceTemplate; Size fenceSize; - Addr freeTemplate; + const struct AddrStruct *freeTemplate; Size freeSize; TagInitMethod tagInit; Size tagSize; diff --git a/mps/code/djbench.c b/mps/code/djbench.c index c1f37f412b2..00bb5d3f954 100644 --- a/mps/code/djbench.c +++ b/mps/code/djbench.c @@ -48,6 +48,7 @@ static double pact = 0.2; /* probability per pass of acting */ static unsigned rinter = 75; /* pass interval for recursion */ static unsigned rmax = 10; /* maximum recursion depth */ static mps_bool_t zoned = TRUE; /* arena allocates using zones */ +static size_t arenasize = 256ul * 1024 * 1024; /* arena size */ #define DJRUN(fname, alloc, free) \ static unsigned fname##_inner(mps_ap_t ap, unsigned depth, unsigned r) { \ @@ -177,7 +178,7 @@ static void wrap(dj_t dj, mps_class_t dummy, const char *name) static void arena_wrap(dj_t dj, mps_class_t pool_class, const char *name) { MPS_ARGS_BEGIN(args) { - MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 256ul * 1024 * 1024); /* FIXME: Why is there no default? */ + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, arenasize); MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, zoned); DJMUST(mps_arena_create_k(&arena, mps_arena_class_vm(), args)); } MPS_ARGS_END(args); @@ -201,6 +202,7 @@ static struct option longopts[] = { {"rinter", required_argument, NULL, 'r'}, {"rmax", required_argument, NULL, 'd'}, {"seed", required_argument, NULL, 'x'}, + {"arena-size", required_argument, NULL, 'm'}, {"arena-unzoned", no_argument, NULL, 'z'}, {NULL, 0, NULL, 0} }; @@ -235,7 +237,7 @@ int main(int argc, char *argv[]) { seed = rnd_seed(); - while ((ch = getopt_long(argc, argv, "ht:i:p:b:s:a:r:d:x:z", longopts, NULL)) != -1) + while ((ch = getopt_long(argc, argv, "ht:i:p:b:s:a:r:d:m:x:z", longopts, NULL)) != -1) switch (ch) { case 't': nthreads = (unsigned)strtoul(optarg, NULL, 10); @@ -267,6 +269,20 @@ int main(int argc, char *argv[]) { case 'z': zoned = FALSE; break; + case 'm': { + char *p; + arenasize = (unsigned)strtoul(optarg, &p, 10); + switch(toupper(*p)) { + case 'G': arenasize <<= 30; break; + case 'M': arenasize <<= 20; break; + case 'K': arenasize <<= 10; break; + case '\0': break; + default: + fprintf(stderr, "Bad arena size %s\n", optarg); + return EXIT_FAILURE; + } + } + break; default: fprintf(stderr, "Usage: %s [option...] [test...]\n" diff --git a/mps/code/eventdef.h b/mps/code/eventdef.h index 4ce313dcfc8..0c94da20cf4 100644 --- a/mps/code/eventdef.h +++ b/mps/code/eventdef.h @@ -36,8 +36,8 @@ */ #define EVENT_VERSION_MAJOR ((unsigned)1) -#define EVENT_VERSION_MEDIAN ((unsigned)1) -#define EVENT_VERSION_MINOR ((unsigned)7) +#define EVENT_VERSION_MEDIAN ((unsigned)2) +#define EVENT_VERSION_MINOR ((unsigned)0) /* EVENT_LIST -- list of event types and general properties @@ -95,7 +95,7 @@ EVENT(X, PoolFinish , 0x0016, TRUE, Pool) \ EVENT(X, PoolAlloc , 0x0017, TRUE, Object) \ EVENT(X, PoolFree , 0x0018, TRUE, Object) \ - EVENT(X, CBSInit , 0x0019, TRUE, Pool) \ + EVENT(X, LandInit , 0x0019, TRUE, Pool) \ EVENT(X, Intern , 0x001a, TRUE, User) \ EVENT(X, Label , 0x001b, TRUE, User) \ EVENT(X, TraceStart , 0x001c, TRUE, Trace) \ @@ -187,7 +187,7 @@ EVENT(X, VMCompact , 0x0079, TRUE, Arena) \ EVENT(X, amcScanNailed , 0x0080, TRUE, Seg) \ EVENT(X, AMCTraceEnd , 0x0081, TRUE, Trace) \ - EVENT(X, TraceStartPoolGen , 0x0082, TRUE, Trace) \ + EVENT(X, TraceCreatePoolGen , 0x0082, TRUE, Trace) \ /* new events for performance analysis of large heaps. */ \ EVENT(X, TraceCondemnZones , 0x0083, TRUE, Trace) \ EVENT(X, ArenaGenZoneAdd , 0x0084, TRUE, Arena) \ @@ -311,8 +311,8 @@ PARAM(X, 1, A, old) \ PARAM(X, 2, W, size) -#define EVENT_CBSInit_PARAMS(PARAM, X) \ - PARAM(X, 0, P, cbs) \ +#define EVENT_LandInit_PARAMS(PARAM, X) \ + PARAM(X, 0, P, land) \ PARAM(X, 1, P, owner) #define EVENT_Intern_PARAMS(PARAM, X) \ @@ -713,18 +713,18 @@ PARAM(X, 19, W, pRL) \ PARAM(X, 20, W, pRLr) -#define EVENT_TraceStartPoolGen_PARAMS(PARAM, X) \ - PARAM(X, 0, P, chain) /* chain (or NULL for topGen) */ \ - PARAM(X, 1, B, top) /* 1 for topGen, 0 otherwise */ \ - PARAM(X, 2, W, index) /* index of generation in the chain */ \ - PARAM(X, 3, P, gendesc) /* generation description */ \ - PARAM(X, 4, W, capacity) /* capacity of generation */ \ - PARAM(X, 5, D, mortality) /* mortality of generation */ \ - PARAM(X, 6, W, zone) /* zone set of generation */ \ - PARAM(X, 7, P, pool) /* pool */ \ - PARAM(X, 8, W, serial) /* pool gen serial number */ \ - PARAM(X, 9, W, totalSize) /* total size of pool gen */ \ - PARAM(X, 10, W, newSizeAtCreate) /* new size of pool gen at trace create */ +#define EVENT_TraceCreatePoolGen_PARAMS(PARAM, X) \ + PARAM(X, 0, P, gendesc) /* generation description */ \ + PARAM(X, 1, W, capacity) /* capacity of generation */ \ + PARAM(X, 2, D, mortality) /* mortality of generation */ \ + PARAM(X, 3, W, zone) /* zone set of generation */ \ + PARAM(X, 4, P, pool) /* pool */ \ + PARAM(X, 5, W, totalSize) /* total size of pool gen */ \ + PARAM(X, 6, W, freeSize) /* free size of pool gen */ \ + PARAM(X, 7, W, newSize) /* new size of pool gen */ \ + PARAM(X, 8, W, oldSize) /* old size of pool gen */ \ + PARAM(X, 9, W, newDeferredSize) /* new size (deferred) of pool gen */ \ + PARAM(X, 10, W, oldDeferredSize) /* old size (deferred) of pool gen */ #define EVENT_TraceCondemnZones_PARAMS(PARAM, X) \ PARAM(X, 0, P, trace) /* the trace */ \ @@ -733,7 +733,7 @@ #define EVENT_ArenaGenZoneAdd_PARAMS(PARAM, X) \ PARAM(X, 0, P, arena) /* the arena */ \ - PARAM(X, 1, W, gen) /* the generation number */ \ + PARAM(X, 1, P, gendesc) /* the generation description */ \ PARAM(X, 2, W, zoneSet) /* the new zoneSet */ #define EVENT_ArenaUseFreeZone_PARAMS(PARAM, X) \ diff --git a/mps/code/eventrep.c b/mps/code/eventrep.c index 375d793c13f..969d8adcb67 100644 --- a/mps/code/eventrep.c +++ b/mps/code/eventrep.c @@ -143,37 +143,6 @@ static void error(const char *format, ...) MPS_BEGIN if (!(cond)) error("line %d " #cond, __LINE__); MPS_END -#ifdef MPS_PROD_EPCORE - - -/* ensurePSFormat -- return the PS format, creating it, if necessary */ - -static mps_fmt_t psFormat = NULL; - -static void ensurePSFormat(mps_fmt_t *fmtOut, mps_arena_t arena) -{ - mps_res_t eres; - - if (psFormat == NULL) { - eres = mps_fmt_create_A(&psFormat, arena, ps_fmt_A()); - verifyMPS(eres); - } - *fmtOut = psFormat; -} - - -/* finishPSFormat -- finish the PS format, if necessary */ - -static void finishPSFormat(void) -{ - if (psFormat != NULL) - mps_fmt_destroy(psFormat); -} - - -#endif - - /* objectTableCreate -- create an objectTable */ static objectTable objectTableCreate(poolSupport support) @@ -418,10 +387,6 @@ void EventReplay(Event event, Word etime) case EventArenaDestroy: { /* arena */ found = TableLookup(&entry, arenaTable, (Word)event->p.p0); verify(found); -#ifdef MPS_PROD_EPCORE - /* @@@@ assuming there's only one arena at a time */ - finishPSFormat(); -#endif mps_arena_destroy((mps_arena_t)entry); ires = TableRemove(arenaTable, (Word)event->pw.p0); verify(ires == ResOK); @@ -456,30 +421,6 @@ void EventReplay(Event event, Word etime) /* all internal only */ ++discardedEvents; } break; -#ifdef MPS_PROD_EPCORE - case EventPoolInitEPVM: { - /* pool, arena, format, maxSaveLevel, saveLevel */ - mps_arena_t arena; - mps_fmt_t format; - - found = TableLookup(&entry, arenaTable, (Word)event->pppuu.p1); - verify(found); - arena = (mps_arena_t)entry; - ensurePSFormat(&format, arena); /* We know what the format is. */ - poolRecreate(event->pppuu.p0, event->pppuu.p1, - mps_class_epvm(), supportNothing, 2, format, - (mps_epvm_save_level_t)event->pppuu.u3, - (mps_epvm_save_level_t)event->pppuu.u4); - } break; - case EventPoolInitEPDL: { - /* pool, arena, isEPDL, extendBy, avgSize, align */ - poolRecreate(event->ppuwww.p0, event->ppuwww.p1, - event->ppuwww.u2 ? mps_class_epdl() : mps_class_epdr(), - event->ppuwww.u2 ? supportTruncate : supportFree, 0, - (size_t)event->ppuwww.w3, (size_t)event->ppuwww.w4, - (size_t)event->ppuwww.w5); - } break; -#endif case EventPoolFinish: { /* pool */ found = TableLookup(&entry, poolTable, (Word)event->p.p0); if (found) { @@ -542,22 +483,6 @@ void EventReplay(Event event, Word etime) ++discardedEvents; } } break; -#ifdef MPS_PROD_EPCORE - case EventBufferInitEPVM: { /* buffer, pool, isObj */ - found = TableLookup(&entry, poolTable, (Word)event->ppu.p1); - if (found) { - poolRep rep = (poolRep)entry; - - if(rep->bufferClassLevel == 2) { /* see .bufclass */ - apRecreate(event->ppu.p0, event->ppu.p1, (mps_bool_t)event->ppu.u2); - } else { - ++discardedEvents; - } - } else { - ++discardedEvents; - } - } break; -#endif case EventBufferFinish: { /* buffer */ found = TableLookup(&entry, apTable, (Word)event->p.p0); if (found) { @@ -620,26 +545,6 @@ void EventReplay(Event event, Word etime) ++discardedEvents; } } break; -#ifdef MPS_PROD_EPCORE - case EventPoolPush: { /* pool */ - found = TableLookup(&entry, poolTable, (Word)event->p.p0); - if (found) { - poolRep rep = (poolRep)entry; - - /* It must be EPVM. */ - mps_epvm_save(rep->pool); - } - } break; - case EventPoolPop: { /* pool, level */ - found = TableLookup(&entry, poolTable, (Word)event->pu.p0); - if (found) { - poolRep rep = (poolRep)entry; - - /* It must be EPVM. */ - mps_epvm_restore(rep->pool, (mps_epvm_save_level_t)event->pu.u1); - } - } break; -#endif case EventCommitLimitSet: { /* arena, limit, succeeded */ found = TableLookup(&entry, arenaTable, (Word)event->pwu.p0); verify(found); diff --git a/mps/code/eventtxt.c b/mps/code/eventtxt.c index 4c50ac994f9..01b071aee3a 100644 --- a/mps/code/eventtxt.c +++ b/mps/code/eventtxt.c @@ -29,20 +29,21 @@ * $Id$ */ +#include "check.h" +#include "config.h" +#include "eventcom.h" +#include "eventdef.h" #include "mps.h" #include "mpsavm.h" #include "mpscmvff.h" -#include "check.h" -#include "config.h" -#include "eventdef.h" -#include "eventcom.h" #include "table.h" #include "testlib.h" /* for ulongest_t and associated print formats */ #include +#include #include #include /* exit, EXIT_FAILURE, EXIT_SUCCESS */ -#include /* strcpy, strlen */ +#include /* strcpy, strerror, strlen */ static const char *prog; /* program name */ static const char *logFileName = NULL; @@ -571,6 +572,11 @@ int main(int argc, char *argv[]) everror("unable to open %s", logFileName); } + /* Ensure no telemetry output. */ + res = setenv("MPS_TELEMETRY_CONTROL", "0", 1); + if (res != 0) + everror("failed to set MPS_TELEMETRY_CONTROL: %s", strerror(errno)); + res = mps_arena_create_k(&arena, mps_arena_class_vm(), mps_args_none); if (res != MPS_RES_OK) everror("failed to create arena: %d", res); diff --git a/mps/code/exposet0.c b/mps/code/exposet0.c index 21f2567cbaa..7e097d6b034 100644 --- a/mps/code/exposet0.c +++ b/mps/code/exposet0.c @@ -222,6 +222,7 @@ static void *test(void *arg, size_t s) } (void)mps_commit(busy_ap, busy_init, 64); + mps_arena_park(arena); mps_ap_destroy(busy_ap); mps_ap_destroy(ap); mps_root_destroy(exactRoot); @@ -244,7 +245,7 @@ int main(int argc, char *argv[]) die(mps_arena_create(&arena, mps_arena_class_vm(), 2*testArenaSIZE), "arena_create"); mps_message_type_enable(arena, mps_message_type_gc()); - die(mps_arena_commit_limit_set(arena, testArenaSIZE), "set limit"); + die(mps_arena_commit_limit_set(arena, 2*testArenaSIZE), "set limit"); die(mps_thread_reg(&thread, arena), "thread_reg"); mps_tramp(&r, test, arena, 0); mps_thread_dereg(thread); diff --git a/mps/code/expt825.c b/mps/code/expt825.c index 5e775455909..bd6c5f2b1e0 100644 --- a/mps/code/expt825.c +++ b/mps/code/expt825.c @@ -250,6 +250,7 @@ static void *test(void *arg, size_t s) (ulongest_t)object_count); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_root_destroy(mps_root); mps_pool_destroy(amc); diff --git a/mps/code/failover.c b/mps/code/failover.c new file mode 100644 index 00000000000..7cb596f8f33 --- /dev/null +++ b/mps/code/failover.c @@ -0,0 +1,360 @@ +/* failover.c: FAILOVER IMPLEMENTATION + * + * $Id$ + * Copyright (c) 2014 Ravenbrook Limited. See end of file for license. + * + * .design: + */ + +#include "failover.h" +#include "mpm.h" +#include "range.h" + +SRCID(failover, "$Id$"); + + +#define failoverOfLand(land) PARENT(FailoverStruct, landStruct, land) + + +ARG_DEFINE_KEY(failover_primary, Pointer); +ARG_DEFINE_KEY(failover_secondary, Pointer); + + +Bool FailoverCheck(Failover fo) +{ + CHECKS(Failover, fo); + CHECKD(Land, &fo->landStruct); + CHECKD(Land, fo->primary); + CHECKD(Land, fo->secondary); + return TRUE; +} + + +static Res failoverInit(Land land, ArgList args) +{ + Failover fo; + LandClass super; + Land primary, secondary; + ArgStruct arg; + Res res; + + AVERT(Land, land); + super = LAND_SUPERCLASS(FailoverLandClass); + res = (*super->init)(land, args); + if (res != ResOK) + return res; + + ArgRequire(&arg, args, FailoverPrimary); + primary = arg.val.p; + ArgRequire(&arg, args, FailoverSecondary); + secondary = arg.val.p; + + fo = failoverOfLand(land); + fo->primary = primary; + fo->secondary = secondary; + fo->sig = FailoverSig; + AVERT(Failover, fo); + return ResOK; +} + + +static void failoverFinish(Land land) +{ + Failover fo; + + AVERT(Land, land); + fo = failoverOfLand(land); + AVERT(Failover, fo); + + fo->sig = SigInvalid; +} + + +static Size failoverSize(Land land) +{ + Failover fo; + + AVERT(Land, land); + fo = failoverOfLand(land); + AVERT(Failover, fo); + + return LandSize(fo->primary) + LandSize(fo->secondary); +} + + +static Res failoverInsert(Range rangeReturn, Land land, Range range) +{ + Failover fo; + Res res; + + AVER(rangeReturn != NULL); + AVERT(Land, land); + fo = failoverOfLand(land); + AVERT(Failover, fo); + AVERT(Range, range); + + /* Provide more opportunities for coalescence. See + * . + */ + (void)LandFlush(fo->primary, fo->secondary); + + res = LandInsert(rangeReturn, fo->primary, range); + if (res != ResOK && res != ResFAIL) + res = LandInsert(rangeReturn, fo->secondary, range); + + return res; +} + + +static Res failoverDelete(Range rangeReturn, Land land, Range range) +{ + Failover fo; + Res res; + RangeStruct oldRange, dummyRange, left, right; + + AVER(rangeReturn != NULL); + AVERT(Land, land); + fo = failoverOfLand(land); + AVERT(Failover, fo); + AVERT(Range, range); + + /* Prefer efficient search in the primary. See + * . + */ + (void)LandFlush(fo->primary, fo->secondary); + + res = LandDelete(&oldRange, fo->primary, range); + + if (res == ResFAIL) { + /* Range not found in primary: try secondary. */ + return LandDelete(rangeReturn, fo->secondary, range); + } else if (res != ResOK) { + /* Range was found in primary, but couldn't be deleted. The only + * case we expect to encounter here is the case where the primary + * is out of memory. (In particular, we don't handle the case of a + * CBS returning ResLIMIT because its block pool has been + * configured not to automatically extend itself.) + */ + AVER(ResIsAllocFailure(res)); + + /* Delete the whole of oldRange, and re-insert the fragments + * (which might end up in the secondary). See + * . + */ + res = LandDelete(&dummyRange, fo->primary, &oldRange); + if (res != ResOK) + return res; + + AVER(RangesEqual(&oldRange, &dummyRange)); + RangeInit(&left, RangeBase(&oldRange), RangeBase(range)); + if (!RangeIsEmpty(&left)) { + /* Don't call LandInsert(..., land, ...) here: that would be + * re-entrant and fail the landEnter check. */ + res = LandInsert(&dummyRange, fo->primary, &left); + if (res != ResOK) { + /* The range was successful deleted from the primary above. */ + AVER(res != ResFAIL); + res = LandInsert(&dummyRange, fo->secondary, &left); + AVER(res == ResOK); + } + } + RangeInit(&right, RangeLimit(range), RangeLimit(&oldRange)); + if (!RangeIsEmpty(&right)) { + res = LandInsert(&dummyRange, fo->primary, &right); + if (res != ResOK) { + /* The range was successful deleted from the primary above. */ + AVER(res != ResFAIL); + res = LandInsert(&dummyRange, fo->secondary, &right); + AVER(res == ResOK); + } + } + } + if (res == ResOK) { + AVER(RangesNest(&oldRange, range)); + RangeCopy(rangeReturn, &oldRange); + } + return res; +} + + +static Bool failoverIterate(Land land, LandVisitor visitor, void *closureP, Size closureS) +{ + Failover fo; + + AVERT(Land, land); + fo = failoverOfLand(land); + AVERT(Failover, fo); + AVER(visitor != NULL); + + return LandIterate(fo->primary, visitor, closureP, closureS) + && LandIterate(fo->secondary, visitor, closureP, closureS); +} + + +static Bool failoverFindFirst(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Failover fo; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + fo = failoverOfLand(land); + AVERT(Failover, fo); + AVERT(FindDelete, findDelete); + + /* See . */ + (void)LandFlush(fo->primary, fo->secondary); + + return LandFindFirst(rangeReturn, oldRangeReturn, fo->primary, size, findDelete) + || LandFindFirst(rangeReturn, oldRangeReturn, fo->secondary, size, findDelete); +} + + +static Bool failoverFindLast(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Failover fo; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + fo = failoverOfLand(land); + AVERT(Failover, fo); + AVERT(FindDelete, findDelete); + + /* See . */ + (void)LandFlush(fo->primary, fo->secondary); + + return LandFindLast(rangeReturn, oldRangeReturn, fo->primary, size, findDelete) + || LandFindLast(rangeReturn, oldRangeReturn, fo->secondary, size, findDelete); +} + + +static Bool failoverFindLargest(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Failover fo; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + fo = failoverOfLand(land); + AVERT(Failover, fo); + AVERT(FindDelete, findDelete); + + /* See . */ + (void)LandFlush(fo->primary, fo->secondary); + + return LandFindLargest(rangeReturn, oldRangeReturn, fo->primary, size, findDelete) + || LandFindLargest(rangeReturn, oldRangeReturn, fo->secondary, size, findDelete); +} + + +static Bool failoverFindInZones(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high) +{ + Failover fo; + Bool found = FALSE; + Res res; + + AVER(FALSE); /* TODO: this code is completely untested! */ + AVER(foundReturn != NULL); + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + fo = failoverOfLand(land); + AVERT(Failover, fo); + /* AVERT(ZoneSet, zoneSet); */ + AVERT(Bool, high); + + /* See . */ + (void)LandFlush(fo->primary, fo->secondary); + + res = LandFindInZones(&found, rangeReturn, oldRangeReturn, fo->primary, size, zoneSet, high); + if (res != ResOK || !found) + res = LandFindInZones(&found, rangeReturn, oldRangeReturn, fo->secondary, size, zoneSet, high); + + *foundReturn = found; + return res; +} + + +static Res failoverDescribe(Land land, mps_lib_FILE *stream, Count depth) +{ + Failover fo; + Res res; + + if (!TESTT(Land, land)) return ResFAIL; + fo = failoverOfLand(land); + if (!TESTT(Failover, fo)) return ResFAIL; + if (stream == NULL) return ResFAIL; + + res = WriteF(stream, depth, + "Failover $P {\n", (WriteFP)fo, + " primary = $P ($S)\n", (WriteFP)fo->primary, + fo->primary->class->name, + " secondary = $P ($S)\n", (WriteFP)fo->secondary, + fo->secondary->class->name, + "}\n", NULL); + + return res; +} + + +DEFINE_LAND_CLASS(FailoverLandClass, class) +{ + INHERIT_CLASS(class, LandClass); + class->name = "FAILOVER"; + class->size = sizeof(FailoverStruct); + class->init = failoverInit; + class->finish = failoverFinish; + class->sizeMethod = failoverSize; + class->insert = failoverInsert; + class->delete = failoverDelete; + class->iterate = failoverIterate; + class->findFirst = failoverFindFirst; + class->findLast = failoverFindLast; + class->findLargest = failoverFindLargest; + class->findInZones = failoverFindInZones; + class->describe = failoverDescribe; + AVERT(LandClass, class); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014 Ravenbrook Limited . + * All rights reserved. This is an open source license. Contact + * Ravenbrook for commercial licensing options. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Redistributions in any form must be accompanied by information on how + * to obtain complete source code for this software and any accompanying + * software that uses this software. The source code must either be + * included in the distribution or be available for no more than the cost + * of distribution plus a nominal fee, and must be freely redistributable + * under reasonable conditions. For an executable file, complete source + * code means the source code for all modules it contains. It does not + * include source code for modules or files that typically accompany the + * major components of the operating system on which the executable file + * runs. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/failover.h b/mps/code/failover.h new file mode 100644 index 00000000000..56e6149e05e --- /dev/null +++ b/mps/code/failover.h @@ -0,0 +1,69 @@ +/* failover.h: FAILOVER ALLOCATOR INTERFACE + * + * $Id$ + * Copyright (c) 2014 Ravenbrook Limited. See end of file for license. + * + * .source: . + */ + +#ifndef failover_h +#define failover_h + +#include "mpmtypes.h" + +typedef struct FailoverStruct *Failover; + +extern Bool FailoverCheck(Failover failover); + +extern LandClass FailoverLandClassGet(void); + +extern const struct mps_key_s _mps_key_failover_primary; +#define FailoverPrimary (&_mps_key_failover_primary) +#define FailoverPrimary_FIELD p +extern const struct mps_key_s _mps_key_failover_secondary; +#define FailoverSecondary (&_mps_key_failover_secondary) +#define FailoverSecondary_FIELD p + +#endif /* failover.h */ + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014 Ravenbrook Limited . + * All rights reserved. This is an open source license. Contact + * Ravenbrook for commercial licensing options. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Redistributions in any form must be accompanied by information on how + * to obtain complete source code for this software and any accompanying + * software that uses this software. The source code must either be + * included in the distribution or be available for no more than the cost + * of distribution plus a nominal fee, and must be freely redistributable + * under reasonable conditions. For an executable file, complete source + * code means the source code for all modules it contains. It does not + * include source code for modules or files that typically accompany the + * major components of the operating system on which the executable file + * runs. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/fbmtest.c b/mps/code/fbmtest.c index 8a5fb63578e..f5aa6831db8 100644 --- a/mps/code/fbmtest.c +++ b/mps/code/fbmtest.c @@ -21,7 +21,6 @@ #include "mpm.h" #include "mps.h" #include "mpsavm.h" -#include "mpstd.h" #include "testlib.h" #include /* printf */ @@ -100,6 +99,7 @@ static Bool checkCallback(Range range, void *closureP, Size closureS) Addr base, limit; CheckFBMClosure cl = (CheckFBMClosure)closureP; + AVER(closureS == UNUSED_SIZE); UNUSED(closureS); Insist(cl != NULL); @@ -151,10 +151,10 @@ static void check(FBMState state) switch (state->type) { case FBMTypeCBS: - CBSIterate(state->the.cbs, checkCBSCallback, (void *)&closure, 0); + CBSIterate(state->the.cbs, checkCBSCallback, &closure, UNUSED_SIZE); break; case FBMTypeFreelist: - FreelistIterate(state->the.fl, checkFLCallback, (void *)&closure, 0); + FreelistIterate(state->the.fl, checkFLCallback, &closure, UNUSED_SIZE); break; default: cdie(0, "invalid state->type"); diff --git a/mps/code/finalcv.c b/mps/code/finalcv.c index d8673972156..9f22226dc17 100644 --- a/mps/code/finalcv.c +++ b/mps/code/finalcv.c @@ -202,6 +202,7 @@ static void *test(void *arg, size_t s) /* @@@@ missing */ + mps_arena_park(arena); mps_ap_destroy(ap); mps_root_destroy(mps_root[1]); mps_root_destroy(mps_root[0]); diff --git a/mps/code/finaltest.c b/mps/code/finaltest.c index f449d7e1acf..2e36c31ee10 100644 --- a/mps/code/finaltest.c +++ b/mps/code/finaltest.c @@ -255,8 +255,8 @@ static void test_mode(int mode, mps_arena_t arena, mps_chain_t chain) test_pool(mode, arena, chain, mps_class_amc()); test_pool(mode, arena, chain, mps_class_amcz()); test_pool(mode, arena, chain, mps_class_ams()); - /* test_pool(mode, arena, chain, mps_class_lo()); TODO: job003773 */ - /* test_pool(mode, arena, chain, mps_class_awl()); TODO: job003772 */ + test_pool(mode, arena, chain, mps_class_awl()); + test_pool(mode, arena, chain, mps_class_lo()); } @@ -284,6 +284,7 @@ int main(int argc, char *argv[]) test_mode(ModePOLL, arena, chain); test_mode(ModePARK, arena, NULL); + mps_arena_park(arena); mps_chain_destroy(chain); mps_thread_dereg(thread); mps_arena_destroy(arena); diff --git a/mps/code/fotest.c b/mps/code/fotest.c index 2e63d4e121b..750883f61a4 100644 --- a/mps/code/fotest.c +++ b/mps/code/fotest.c @@ -38,28 +38,36 @@ /* Accessors for the CBS used to implement a pool. */ -extern CBS _mps_mvff_cbs(mps_pool_t); -extern CBS _mps_mvt_cbs(mps_pool_t); +extern Land _mps_mvff_cbs(Pool); +extern Land _mps_mvt_cbs(Pool); /* "OOM" pool class -- dummy alloc/free pool class whose alloc() - * method always returns ResMEMORY */ + * method always fails and whose free method does nothing. */ -static Res OOMAlloc(Addr *pReturn, Pool pool, Size size, - Bool withReservoirPermit) +static Res oomAlloc(Addr *pReturn, Pool pool, Size size, + Bool withReservoirPermit) { UNUSED(pReturn); UNUSED(pool); UNUSED(size); UNUSED(withReservoirPermit); - return ResMEMORY; + switch (rnd() % 3) { + case 0: + return ResRESOURCE; + case 1: + return ResMEMORY; + default: + return ResCOMMIT_LIMIT; + } } -extern PoolClass PoolClassOOM(void); +extern PoolClass OOMPoolClassGet(void); DEFINE_POOL_CLASS(OOMPoolClass, this) { - INHERIT_CLASS(this, AbstractAllocFreePoolClass); - this->alloc = OOMAlloc; + INHERIT_CLASS(this, AbstractPoolClass); + this->alloc = oomAlloc; + this->free = PoolTrivFree; this->size = sizeof(PoolStruct); AVERT(PoolClass, this); } @@ -83,16 +91,17 @@ static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size) /* set_oom -- set blockPool of CBS to OOM or MFS according to argument. */ -static void set_oom(CBS cbs, int oom) +static void set_oom(Land land, int oom) { - cbs->blockPool->class = oom ? EnsureOOMPoolClass() : PoolClassMFS(); + CBS cbs = PARENT(CBSStruct, landStruct, land); + cbs->blockPool->class = oom ? OOMPoolClassGet() : PoolClassMFS(); } /* stress -- create an allocation point and allocate in it */ static mps_res_t stress(size_t (*size)(unsigned long, mps_align_t), - mps_align_t alignment, mps_pool_t pool, CBS cbs) + mps_align_t alignment, mps_pool_t pool, Land cbs) { mps_res_t res = MPS_RES_OK; mps_ap_t ap; @@ -182,8 +191,8 @@ int main(int argc, char *argv[]) die(mps_pool_create_k(&pool, arena, mps_class_mvff(), args), "create MVFF"); } MPS_ARGS_END(args); { - CBS cbs = _mps_mvff_cbs(pool); - die(stress(randomSizeAligned, alignment, pool, cbs), "stress MVFF"); + die(stress(randomSizeAligned, alignment, pool, _mps_mvff_cbs(pool)), + "stress MVFF"); } mps_pool_destroy(pool); mps_arena_destroy(arena); @@ -201,8 +210,8 @@ int main(int argc, char *argv[]) die(mps_pool_create_k(&pool, arena, mps_class_mvt(), args), "create MVFF"); } MPS_ARGS_END(args); { - CBS cbs = _mps_mvt_cbs(pool); - die(stress(randomSizeAligned, alignment, pool, cbs), "stress MVT"); + die(stress(randomSizeAligned, alignment, pool, _mps_mvt_cbs(pool)), + "stress MVT"); } mps_pool_destroy(pool); mps_arena_destroy(arena); diff --git a/mps/code/freelist.c b/mps/code/freelist.c index bf0798ccdc0..692c59a1e80 100644 --- a/mps/code/freelist.c +++ b/mps/code/freelist.c @@ -6,32 +6,58 @@ * .sources: . */ -#include "cbs.h" #include "freelist.h" #include "mpm.h" +#include "range.h" SRCID(freelist, "$Id$"); +#define freelistOfLand(land) PARENT(FreelistStruct, landStruct, land) +#define freelistAlignment(fl) LandAlignment(&(fl)->landStruct) + + typedef union FreelistBlockUnion { struct { FreelistBlock next; /* tagged with low bit 1 */ - /* limit is (char *)this + fl->alignment */ + /* limit is (char *)this + freelistAlignment(fl) */ } small; struct { - FreelistBlock next; + FreelistBlock next; /* not tagged (low bit 0) */ Addr limit; } large; } FreelistBlockUnion; -/* See */ -#define freelistMinimumAlignment ((Align)sizeof(FreelistBlock)) +/* freelistEND -- the end of a list + * + * The end of a list should not be represented with NULL, as this is + * ambiguous. However, freelistEND is in fact a null pointer, for + * performance. To check whether you have it right, try temporarily + * defining freelistEND as ((FreelistBlock)2) or similar (it must be + * an even number because of the use of a tag). + */ +#define freelistEND ((FreelistBlock)0) + + +/* FreelistTag -- return the tag of word */ #define FreelistTag(word) ((word) & 1) + + +/* FreelistTagSet -- return word updated with the tag set */ + #define FreelistTagSet(word) ((FreelistBlock)((Word)(word) | 1)) + + +/* FreelistTagReset -- return word updated with the tag reset */ + #define FreelistTagReset(word) ((FreelistBlock)((Word)(word) & ~(Word)1)) + + +/* FreelistTagCopy -- return 'to' updated to have the same tag as 'from' */ + #define FreelistTagCopy(to, from) ((FreelistBlock)((Word)(to) | FreelistTag((Word)(from)))) @@ -51,7 +77,7 @@ static Addr FreelistBlockLimit(Freelist fl, FreelistBlock block) { AVERT(Freelist, fl); if (FreelistBlockIsSmall(block)) { - return AddrAdd(FreelistBlockBase(block), fl->alignment); + return AddrAdd(FreelistBlockBase(block), freelistAlignment(fl)); } else { return block->large.limit; } @@ -65,7 +91,7 @@ static Bool FreelistBlockCheck(FreelistBlock block) { CHECKL(block != NULL); /* block list is address-ordered */ - CHECKL(FreelistTagReset(block->small.next) == NULL + CHECKL(FreelistTagReset(block->small.next) == freelistEND || block < FreelistTagReset(block->small.next)); CHECKL(FreelistBlockIsSmall(block) || (Addr)block < block->large.limit); @@ -73,8 +99,8 @@ static Bool FreelistBlockCheck(FreelistBlock block) } -/* FreelistBlockNext -- return the next block in the list, or NULL if - * there are no more blocks. +/* FreelistBlockNext -- return the next block in the list, or + * freelistEND if there are no more blocks. */ static FreelistBlock FreelistBlockNext(FreelistBlock block) { @@ -106,7 +132,7 @@ static void FreelistBlockSetLimit(Freelist fl, FreelistBlock block, Addr limit) AVERT(Freelist, fl); AVERT(FreelistBlock, block); - AVER(AddrIsAligned(limit, fl->alignment)); + AVER(AddrIsAligned(limit, freelistAlignment(fl))); AVER(FreelistBlockBase(block) < limit); size = AddrOffset(block, limit); @@ -129,12 +155,12 @@ static FreelistBlock FreelistBlockInit(Freelist fl, Addr base, Addr limit) AVERT(Freelist, fl); AVER(base != NULL); - AVER(AddrIsAligned(base, fl->alignment)); + AVER(AddrIsAligned(base, freelistAlignment(fl))); AVER(base < limit); - AVER(AddrIsAligned(limit, fl->alignment)); + AVER(AddrIsAligned(limit, freelistAlignment(fl))); block = (FreelistBlock)base; - block->small.next = FreelistTagSet(NULL); + block->small.next = FreelistTagSet(freelistEND); FreelistBlockSetLimit(fl, block, limit); AVERT(FreelistBlock, block); return block; @@ -143,23 +169,39 @@ static FreelistBlock FreelistBlockInit(Freelist fl, Addr base, Addr limit) Bool FreelistCheck(Freelist fl) { + Land land; CHECKS(Freelist, fl); + land = &fl->landStruct; + CHECKD(Land, land); /* See */ - CHECKL(AlignIsAligned(fl->alignment, freelistMinimumAlignment)); - CHECKL((fl->list == NULL) == (fl->listSize == 0)); + CHECKL(AlignIsAligned(freelistAlignment(fl), FreelistMinimumAlignment)); + CHECKL((fl->list == freelistEND) == (fl->listSize == 0)); + CHECKL((fl->list == freelistEND) == (fl->size == 0)); + CHECKL(SizeIsAligned(fl->size, freelistAlignment(fl))); + return TRUE; } -Res FreelistInit(Freelist fl, Align alignment) +static Res freelistInit(Land land, ArgList args) { - /* See */ - if (!AlignIsAligned(alignment, freelistMinimumAlignment)) - return ResPARAM; + Freelist fl; + LandClass super; + Res res; - fl->alignment = alignment; - fl->list = NULL; + AVERT(Land, land); + super = LAND_SUPERCLASS(FreelistLandClass); + res = (*super->init)(land, args); + if (res != ResOK) + return res; + + /* See */ + AVER(AlignIsAligned(LandAlignment(land), FreelistMinimumAlignment)); + + fl = freelistOfLand(land); + fl->list = freelistEND; fl->listSize = 0; + fl->size = 0; fl->sig = FreelistSig; AVERT(Freelist, fl); @@ -167,31 +209,56 @@ Res FreelistInit(Freelist fl, Align alignment) } -void FreelistFinish(Freelist fl) +static void freelistFinish(Land land) { + Freelist fl; + + AVERT(Land, land); + fl = freelistOfLand(land); AVERT(Freelist, fl); fl->sig = SigInvalid; - fl->list = NULL; + fl->list = freelistEND; +} + + +static Size freelistSize(Land land) +{ + Freelist fl; + + AVERT(Land, land); + fl = freelistOfLand(land); + AVERT(Freelist, fl); + return fl->size; } /* freelistBlockSetPrevNext -- update list of blocks - * If prev and next are both NULL, make the block list empty. - * Otherwise, if prev is NULL, make next the first block in the list. - * Otherwise, if next is NULL, make prev the last block in the list. + * + * If prev and next are both freelistEND, make the block list empty. + * Otherwise, if prev is freelistEND, make next the first block in the list. + * Otherwise, if next is freelistEND, make prev the last block in the list. * Otherwise, make next follow prev in the list. * Update the count of blocks by 'delta'. + + * It is tempting to try to simplify this code by putting a + * FreelistBlockUnion into the FreelistStruct and so avoiding the + * special case on prev. But the problem with that idea is that we + * can't guarantee that such a sentinel would respect the isolated + * range invariant, and so it would still have to be special-cases. */ + static void freelistBlockSetPrevNext(Freelist fl, FreelistBlock prev, FreelistBlock next, int delta) { AVERT(Freelist, fl); - if (prev) { - AVER(next == NULL || FreelistBlockLimit(fl, prev) < FreelistBlockBase(next)); - FreelistBlockSetNext(prev, next); - } else { + if (prev == freelistEND) { fl->list = next; + } else { + /* Isolated range invariant (design.mps.freelist.impl.invariant). */ + AVER(next == freelistEND + || FreelistBlockLimit(fl, prev) < FreelistBlockBase(next)); + FreelistBlockSetNext(prev, next); } if (delta < 0) { AVER(fl->listSize >= (Count)-delta); @@ -202,29 +269,32 @@ static void freelistBlockSetPrevNext(Freelist fl, FreelistBlock prev, } -Res FreelistInsert(Range rangeReturn, Freelist fl, Range range) +static Res freelistInsert(Range rangeReturn, Land land, Range range) { + Freelist fl; FreelistBlock prev, cur, next, new; Addr base, limit; Bool coalesceLeft, coalesceRight; AVER(rangeReturn != NULL); + AVERT(Land, land); + fl = freelistOfLand(land); AVERT(Freelist, fl); AVERT(Range, range); - AVER(RangeIsAligned(range, fl->alignment)); + AVER(RangeIsAligned(range, freelistAlignment(fl))); base = RangeBase(range); limit = RangeLimit(range); - prev = NULL; + prev = freelistEND; cur = fl->list; - while (cur) { + while (cur != freelistEND) { if (base < FreelistBlockLimit(fl, cur) && FreelistBlockBase(cur) < limit) return ResFAIL; /* range overlaps with cur */ if (limit <= FreelistBlockBase(cur)) break; next = FreelistBlockNext(cur); - if (next) + if (next != freelistEND) /* Isolated range invariant (design.mps.freelist.impl.invariant). */ AVER(FreelistBlockLimit(fl, cur) < FreelistBlockBase(next)); prev = cur; @@ -235,8 +305,8 @@ Res FreelistInsert(Range rangeReturn, Freelist fl, Range range) * coalesces then it does so with prev on the left, and cur on the * right. */ - coalesceLeft = (prev && base == FreelistBlockLimit(fl, prev)); - coalesceRight = (cur && limit == FreelistBlockBase(cur)); + coalesceLeft = (prev != freelistEND && base == FreelistBlockLimit(fl, prev)); + coalesceRight = (cur != freelistEND && limit == FreelistBlockBase(cur)); if (coalesceLeft && coalesceRight) { base = FreelistBlockBase(prev); @@ -262,17 +332,20 @@ Res FreelistInsert(Range rangeReturn, Freelist fl, Range range) freelistBlockSetPrevNext(fl, prev, new, +1); } + fl->size += RangeSize(range); RangeInit(rangeReturn, base, limit); return ResOK; } -/* freelistDeleteFromBlock -- delete 'range' from 'block' (it is known - * to be a subset of that block); update 'rangeReturn' to the original - * range of 'block' and update the block list accordingly: 'prev' is - * the block on the list just before 'block', or NULL if 'block' is - * the first block on the list. +/* freelistDeleteFromBlock -- delete range from block + * + * range must be a subset of block. Update rangeReturn to be the + * original range of block and update the block list accordingly: prev + * is on the list just before block, or freelistEND if block is the + * first block on the list. */ + static void freelistDeleteFromBlock(Range rangeReturn, Freelist fl, Range range, FreelistBlock prev, FreelistBlock block) @@ -283,8 +356,8 @@ static void freelistDeleteFromBlock(Range rangeReturn, Freelist fl, AVER(rangeReturn != NULL); AVERT(Freelist, fl); AVERT(Range, range); - AVER(RangeIsAligned(range, fl->alignment)); - AVER(prev == NULL || FreelistBlockNext(prev) == block); + AVER(RangeIsAligned(range, freelistAlignment(fl))); + AVER(prev == freelistEND || FreelistBlockNext(prev) == block); AVERT(FreelistBlock, block); AVER(FreelistBlockBase(block) <= RangeBase(range)); AVER(RangeLimit(range) <= FreelistBlockLimit(fl, block)); @@ -317,25 +390,30 @@ static void freelistDeleteFromBlock(Range rangeReturn, Freelist fl, freelistBlockSetPrevNext(fl, block, new, +1); } + AVER(fl->size >= RangeSize(range)); + fl->size -= RangeSize(range); RangeInit(rangeReturn, blockBase, blockLimit); } -Res FreelistDelete(Range rangeReturn, Freelist fl, Range range) +static Res freelistDelete(Range rangeReturn, Land land, Range range) { + Freelist fl; FreelistBlock prev, cur, next; Addr base, limit; AVER(rangeReturn != NULL); + AVERT(Land, land); + fl = freelistOfLand(land); AVERT(Freelist, fl); AVERT(Range, range); base = RangeBase(range); limit = RangeLimit(range); - prev = NULL; + prev = freelistEND; cur = fl->list; - while (cur) { + while (cur != freelistEND) { Addr blockBase, blockLimit; blockBase = FreelistBlockBase(cur); blockLimit = FreelistBlockLimit(fl, cur); @@ -359,43 +437,82 @@ Res FreelistDelete(Range rangeReturn, Freelist fl, Range range) } -void FreelistIterate(Freelist fl, FreelistIterateMethod iterate, - void *closureP, Size closureS) +static Bool freelistIterate(Land land, LandVisitor visitor, + void *closureP, Size closureS) { - FreelistBlock prev, cur, next; + Freelist fl; + FreelistBlock cur, next; + AVERT(Land, land); + fl = freelistOfLand(land); AVERT(Freelist, fl); - AVER(FUNCHECK(iterate)); + AVER(FUNCHECK(visitor)); + /* closureP and closureS are arbitrary */ - prev = NULL; - cur = fl->list; - while (cur) { - Bool delete = FALSE; + for (cur = fl->list; cur != freelistEND; cur = next) { RangeStruct range; Bool cont; - RangeInit(&range, FreelistBlockBase(cur), FreelistBlockLimit(fl, cur)); - cont = (*iterate)(&delete, &range, closureP, closureS); + /* .next.first: Take next before calling the visitor, in case the + * visitor touches the block. */ next = FreelistBlockNext(cur); - if (delete) { - freelistBlockSetPrevNext(fl, prev, next, -1); - } else { - prev = cur; - } - cur = next; + RangeInit(&range, FreelistBlockBase(cur), FreelistBlockLimit(fl, cur)); + cont = (*visitor)(land, &range, closureP, closureS); if (!cont) - break; + return FALSE; } + return TRUE; } -/* freelistFindDeleteFromBlock -- Find a chunk of 'size' bytes in - * 'block' (which is known to be at least that big) and possibly - * delete that chunk according to the instruction in 'findDelete'. - * Return the range of that chunk in 'rangeReturn'. Return the - * original range of the block in 'oldRangeReturn'. Update the block - * list accordingly, using 'prev' which is the previous block in the - * list, or NULL if 'block' is the first block in the list. +static Bool freelistIterateAndDelete(Land land, LandDeleteVisitor visitor, + void *closureP, Size closureS) +{ + Freelist fl; + FreelistBlock prev, cur, next; + + AVERT(Land, land); + fl = freelistOfLand(land); + AVERT(Freelist, fl); + AVER(FUNCHECK(visitor)); + /* closureP and closureS are arbitrary */ + + prev = freelistEND; + cur = fl->list; + while (cur != freelistEND) { + Bool delete = FALSE; + RangeStruct range; + Bool cont; + Size size; + next = FreelistBlockNext(cur); /* See .next.first. */ + size = FreelistBlockSize(fl, cur); + RangeInit(&range, FreelistBlockBase(cur), FreelistBlockLimit(fl, cur)); + cont = (*visitor)(&delete, land, &range, closureP, closureS); + if (delete) { + freelistBlockSetPrevNext(fl, prev, next, -1); + AVER(fl->size >= size); + fl->size -= size; + } else { + prev = cur; + } + if (!cont) + return FALSE; + cur = next; + } + return TRUE; +} + + +/* freelistFindDeleteFromBlock -- delete size bytes from block + * + * Find a chunk of size bytes in block (which is known to be at least + * that big) and possibly delete that chunk according to the + * instruction in findDelete. Return the range of that chunk in + * rangeReturn. Return the original range of the block in + * oldRangeReturn. Update the block list accordingly, using prev, + * which is previous in list or freelistEND if block is the first + * block in the list. */ + static void freelistFindDeleteFromBlock(Range rangeReturn, Range oldRangeReturn, Freelist fl, Size size, FindDelete findDelete, @@ -407,9 +524,9 @@ static void freelistFindDeleteFromBlock(Range rangeReturn, Range oldRangeReturn, AVER(rangeReturn != NULL); AVER(oldRangeReturn != NULL); AVERT(Freelist, fl); - AVER(SizeIsAligned(size, fl->alignment)); + AVER(SizeIsAligned(size, freelistAlignment(fl))); AVERT(FindDelete, findDelete); - AVER(prev == NULL || FreelistBlockNext(prev) == block); + AVER(prev == freelistEND || FreelistBlockNext(prev) == block); AVERT(FreelistBlock, block); AVER(FreelistBlockSize(fl, block) >= size); @@ -447,20 +564,23 @@ static void freelistFindDeleteFromBlock(Range rangeReturn, Range oldRangeReturn, } -Bool FreelistFindFirst(Range rangeReturn, Range oldRangeReturn, - Freelist fl, Size size, FindDelete findDelete) +static Bool freelistFindFirst(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) { + Freelist fl; FreelistBlock prev, cur, next; AVER(rangeReturn != NULL); AVER(oldRangeReturn != NULL); + AVERT(Land, land); + fl = freelistOfLand(land); AVERT(Freelist, fl); - AVER(SizeIsAligned(size, fl->alignment)); + AVER(SizeIsAligned(size, freelistAlignment(fl))); AVERT(FindDelete, findDelete); - prev = NULL; + prev = freelistEND; cur = fl->list; - while (cur) { + while (cur != freelistEND) { if (FreelistBlockSize(fl, cur) >= size) { freelistFindDeleteFromBlock(rangeReturn, oldRangeReturn, fl, size, findDelete, prev, cur); @@ -475,22 +595,25 @@ Bool FreelistFindFirst(Range rangeReturn, Range oldRangeReturn, } -Bool FreelistFindLast(Range rangeReturn, Range oldRangeReturn, - Freelist fl, Size size, FindDelete findDelete) +static Bool freelistFindLast(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) { + Freelist fl; Bool found = FALSE; FreelistBlock prev, cur, next; - FreelistBlock foundPrev = NULL, foundCur = NULL; + FreelistBlock foundPrev = freelistEND, foundCur = freelistEND; AVER(rangeReturn != NULL); AVER(oldRangeReturn != NULL); + AVERT(Land, land); + fl = freelistOfLand(land); AVERT(Freelist, fl); - AVER(SizeIsAligned(size, fl->alignment)); + AVER(SizeIsAligned(size, freelistAlignment(fl))); AVERT(FindDelete, findDelete); - prev = NULL; + prev = freelistEND; cur = fl->list; - while (cur) { + while (cur != freelistEND) { if (FreelistBlockSize(fl, cur) >= size) { found = TRUE; foundPrev = prev; @@ -509,21 +632,24 @@ Bool FreelistFindLast(Range rangeReturn, Range oldRangeReturn, } -Bool FreelistFindLargest(Range rangeReturn, Range oldRangeReturn, - Freelist fl, Size size, FindDelete findDelete) +static Bool freelistFindLargest(Range rangeReturn, Range oldRangeReturn, + Land land, Size size, FindDelete findDelete) { + Freelist fl; Bool found = FALSE; FreelistBlock prev, cur, next; - FreelistBlock bestPrev = NULL, bestCur = NULL; + FreelistBlock bestPrev = freelistEND, bestCur = freelistEND; AVER(rangeReturn != NULL); AVER(oldRangeReturn != NULL); + AVERT(Land, land); + fl = freelistOfLand(land); AVERT(Freelist, fl); AVERT(FindDelete, findDelete); - prev = NULL; + prev = freelistEND; cur = fl->list; - while (cur) { + while (cur != freelistEND) { if (FreelistBlockSize(fl, cur) >= size) { found = TRUE; size = FreelistBlockSize(fl, cur); @@ -543,20 +669,90 @@ Bool FreelistFindLargest(Range rangeReturn, Range oldRangeReturn, } -/* freelistDescribeIterateMethod -- Iterate method for - * FreelistDescribe. Writes a decription of the range into the stream - * pointed to by 'closureP'. +static Res freelistFindInZones(Bool *foundReturn, Range rangeReturn, + Range oldRangeReturn, Land land, Size size, + ZoneSet zoneSet, Bool high) +{ + Freelist fl; + LandFindMethod landFind; + RangeInZoneSet search; + Bool found = FALSE; + FreelistBlock prev, cur, next; + FreelistBlock foundPrev = freelistEND, foundCur = freelistEND; + RangeStruct foundRange; + + AVER(FALSE); /* TODO: this code is completely untested! */ + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + fl = freelistOfLand(land); + AVERT(Freelist, fl); + /* AVERT(ZoneSet, zoneSet); */ + AVERT(Bool, high); + + landFind = high ? freelistFindLast : freelistFindFirst; + search = high ? RangeInZoneSetLast : RangeInZoneSetFirst; + + if (zoneSet == ZoneSetEMPTY) + goto fail; + if (zoneSet == ZoneSetUNIV) { + FindDelete fd = high ? FindDeleteHIGH : FindDeleteLOW; + *foundReturn = (*landFind)(rangeReturn, oldRangeReturn, land, size, fd); + return ResOK; + } + if (ZoneSetIsSingle(zoneSet) && size > ArenaStripeSize(LandArena(land))) + goto fail; + + prev = freelistEND; + cur = fl->list; + while (cur != freelistEND) { + Addr base, limit; + if ((*search)(&base, &limit, FreelistBlockBase(cur), + FreelistBlockLimit(fl, cur), + LandArena(land), zoneSet, size)) + { + found = TRUE; + foundPrev = prev; + foundCur = cur; + RangeInit(&foundRange, base, limit); + if (!high) + break; + } + next = FreelistBlockNext(cur); + prev = cur; + cur = next; + } + + if (!found) + goto fail; + + freelistDeleteFromBlock(oldRangeReturn, fl, &foundRange, foundPrev, foundCur); + RangeCopy(rangeReturn, &foundRange); + *foundReturn = TRUE; + return ResOK; + +fail: + *foundReturn = FALSE; + return ResOK; +} + + +/* freelistDescribeVisitor -- visitor method for freelistDescribe + * + * Writes a decription of the range into the stream pointed to by + * closureP. */ -static Bool freelistDescribeIterateMethod(Bool *deleteReturn, Range range, - void *closureP, Size closureS) + +static Bool freelistDescribeVisitor(Land land, Range range, + void *closureP, Size closureS) { Res res; mps_lib_FILE *stream = closureP; Count depth = closureS; - AVER(deleteReturn != NULL); - AVERT(Range, range); - AVER(stream != NULL); + if (!TESTT(Land, land)) return FALSE; + if (!RangeCheck(range)) return FALSE; + if (stream == NULL) return FALSE; res = WriteF(stream, depth, "[$P,", (WriteFP)RangeBase(range), @@ -564,64 +760,52 @@ static Bool freelistDescribeIterateMethod(Bool *deleteReturn, Range range, " {$U}\n", (WriteFU)RangeSize(range), NULL); - *deleteReturn = FALSE; return res == ResOK; } -Res FreelistDescribe(Freelist fl, mps_lib_FILE *stream, Count depth) +static Res freelistDescribe(Land land, mps_lib_FILE *stream, Count depth) { + Freelist fl; Res res; + Bool b; + if (!TESTT(Land, land)) return ResFAIL; + fl = freelistOfLand(land); if (!TESTT(Freelist, fl)) return ResFAIL; if (stream == NULL) return ResFAIL; res = WriteF(stream, depth, "Freelist $P {\n", (WriteFP)fl, - " alignment = $U\n", (WriteFU)fl->alignment, " listSize = $U\n", (WriteFU)fl->listSize, NULL); - FreelistIterate(fl, freelistDescribeIterateMethod, stream, depth + 2); + b = LandIterate(land, freelistDescribeVisitor, stream, depth + 2); + if (!b) return ResFAIL; res = WriteF(stream, depth, "} Freelist $P\n", (WriteFP)fl, NULL); return res; } -/* freelistFlushIterateMethod -- Iterate method for - * FreelistFlushToCBS. Attempst to insert the range into the CBS. - */ -static Bool freelistFlushIterateMethod(Bool *deleteReturn, Range range, - void *closureP, Size closureS) +DEFINE_LAND_CLASS(FreelistLandClass, class) { - Res res; - RangeStruct newRange; - CBS cbs; - - AVER(deleteReturn != NULL); - AVERT(Range, range); - AVER(closureP != NULL); - UNUSED(closureS); - - cbs = closureP; - res = CBSInsert(&newRange, cbs, range); - if (res == ResOK) { - *deleteReturn = TRUE; - return TRUE; - } else { - *deleteReturn = FALSE; - return FALSE; - } -} - - -void FreelistFlushToCBS(Freelist fl, CBS cbs) -{ - AVERT(Freelist, fl); - AVERT(CBS, cbs); - - FreelistIterate(fl, freelistFlushIterateMethod, cbs, 0); + INHERIT_CLASS(class, LandClass); + class->name = "FREELIST"; + class->size = sizeof(FreelistStruct); + class->init = freelistInit; + class->finish = freelistFinish; + class->sizeMethod = freelistSize; + class->insert = freelistInsert; + class->delete = freelistDelete; + class->iterate = freelistIterate; + class->iterateAndDelete = freelistIterateAndDelete; + class->findFirst = freelistFindFirst; + class->findLast = freelistFindLast; + class->findLargest = freelistFindLargest; + class->findInZones = freelistFindInZones; + class->describe = freelistDescribe; + AVERT(LandClass, class); } diff --git a/mps/code/freelist.h b/mps/code/freelist.h index 5957728a311..db75837253e 100644 --- a/mps/code/freelist.h +++ b/mps/code/freelist.h @@ -1,7 +1,7 @@ /* freelist.h: FREE LIST ALLOCATOR INTERFACE * * $Id$ - * Copyright (c) 2013 Ravenbrook Limited. See end of file for license. + * Copyright (c) 2013-2014 Ravenbrook Limited. See end of file for license. * * .source: . */ @@ -9,51 +9,23 @@ #ifndef freelist_h #define freelist_h -#include "cbs.h" #include "mpmtypes.h" -#include "range.h" - -#define FreelistSig ((Sig)0x519F6331) /* SIGnature FREEL */ typedef struct FreelistStruct *Freelist; -typedef union FreelistBlockUnion *FreelistBlock; -typedef Bool (*FreelistIterateMethod)(Bool *deleteReturn, Range range, - void *closureP, Size closureS); +extern Bool FreelistCheck(Freelist freelist); -typedef struct FreelistStruct { - Sig sig; - Align alignment; - FreelistBlock list; - Count listSize; -} FreelistStruct; +/* See */ +#define FreelistMinimumAlignment ((Align)sizeof(FreelistBlock)) -extern Bool FreelistCheck(Freelist fl); -extern Res FreelistInit(Freelist fl, Align alignment); -extern void FreelistFinish(Freelist fl); - -extern Res FreelistInsert(Range rangeReturn, Freelist fl, Range range); -extern Res FreelistDelete(Range rangeReturn, Freelist fl, Range range); -extern Res FreelistDescribe(Freelist fl, mps_lib_FILE *stream, Count depth); - -extern void FreelistIterate(Freelist abq, FreelistIterateMethod iterate, - void *closureP, Size closureS); - -extern Bool FreelistFindFirst(Range rangeReturn, Range oldRangeReturn, - Freelist fl, Size size, FindDelete findDelete); -extern Bool FreelistFindLast(Range rangeReturn, Range oldRangeReturn, - Freelist fl, Size size, FindDelete findDelete); -extern Bool FreelistFindLargest(Range rangeReturn, Range oldRangeReturn, - Freelist fl, Size size, FindDelete findDelete); - -extern void FreelistFlushToCBS(Freelist fl, CBS cbs); +extern LandClass FreelistLandClassGet(void); #endif /* freelist.h */ /* C. COPYRIGHT AND LICENSE * - * Copyright (C) 2013 Ravenbrook Limited . + * Copyright (C) 2013-2014 Ravenbrook Limited . * All rights reserved. This is an open source license. Contact * Ravenbrook for commercial licensing options. * diff --git a/mps/code/gc.gmk b/mps/code/gc.gmk index 826cb0ef659..76716dc0785 100644 --- a/mps/code/gc.gmk +++ b/mps/code/gc.gmk @@ -41,7 +41,7 @@ CFLAGSCOMPILERLAX := # If interrupted, this is liable to leave a zero-length file behind. define gendep - $(SHELL) -ec "$(CC) $(CFLAGS) -MM $< | \ + $(SHELL) -ec "$(CC) $(CFLAGSSTRICT) -MM $< | \ sed '/:/s!$*.o!$(@D)/& $(@D)/$*.d!' > $@" [ -s $@ ] || rm -f $@ endef diff --git a/mps/code/gcbench.c b/mps/code/gcbench.c index 2a18d1a7d10..733dc0a925b 100644 --- a/mps/code/gcbench.c +++ b/mps/code/gcbench.c @@ -12,6 +12,7 @@ #include "testthr.h" #include "fmtdy.h" #include "fmtdytst.h" +#include "mpm.h" #include /* fprintf, printf, putchars, sscanf, stderr, stdout */ #include /* alloca, exit, EXIT_FAILURE, EXIT_SUCCESS, strtoul */ @@ -243,6 +244,8 @@ static void arena_setup(gcthread_fn_t fn, RESMUST(mps_pool_create_k(&pool, arena, pool_class, args)); } MPS_ARGS_END(args); watch(fn, name); + mps_arena_park(arena); + printf("%u chunks\n", (unsigned)RingLength(&arena->chunkRing)); mps_pool_destroy(pool); mps_fmt_destroy(format); if (ngen > 0) diff --git a/mps/code/global.c b/mps/code/global.c index 24a5e3e748f..31b4296d482 100644 --- a/mps/code/global.c +++ b/mps/code/global.c @@ -37,10 +37,6 @@ static Bool arenaRingInit = FALSE; static RingStruct arenaRing; /* */ static Serial arenaSerial; /* */ -/* forward declarations */ -void arenaEnterLock(Arena, int); -void arenaLeaveLock(Arena, int); - /* arenaClaimRingLock, arenaReleaseRingLock -- lock/release the arena ring * @@ -431,6 +427,10 @@ void GlobalsPrepareToDestroy(Globals arenaGlobals) AVERT(Globals, arenaGlobals); + /* Park the arena before destroying the default chain, to ensure + * that there are no traces using that chain. */ + ArenaPark(arenaGlobals); + arena = GlobalsArena(arenaGlobals); arenaDenounce(arena); @@ -520,26 +520,15 @@ Ring GlobalsRememberedSummaryRing(Globals global) /* ArenaEnter -- enter the state where you can look at the arena */ -/* TODO: The THREAD_SINGLE and PROTECTION_NONE build configs aren't regularly - tested, though they might well be useful for embedded custom targets. - Should test them. RB 2012-09-03 */ - -#if defined(THREAD_SINGLE) && defined(PROTECTION_NONE) void (ArenaEnter)(Arena arena) { - /* Don't need to lock, just check. */ AVERT(Arena, arena); + ArenaEnter(arena); } -#else -void ArenaEnter(Arena arena) -{ - arenaEnterLock(arena, 0); -} -#endif /* The recursive argument specifies whether to claim the lock recursively or not. */ -void arenaEnterLock(Arena arena, int recursive) +void ArenaEnterLock(Arena arena, Bool recursive) { Lock lock; @@ -574,25 +563,18 @@ void arenaEnterLock(Arena arena, int recursive) void ArenaEnterRecursive(Arena arena) { - arenaEnterLock(arena, 1); + ArenaEnterLock(arena, TRUE); } /* ArenaLeave -- leave the state where you can look at MPM data structures */ -#if defined(THREAD_SINGLE) && defined(PROTECTION_NONE) void (ArenaLeave)(Arena arena) { - /* Don't need to lock, just check. */ AVERT(Arena, arena); + ArenaLeave(arena); } -#else -void ArenaLeave(Arena arena) -{ - arenaLeaveLock(arena, 0); -} -#endif -void arenaLeaveLock(Arena arena, int recursive) +void ArenaLeaveLock(Arena arena, Bool recursive) { Lock lock; @@ -616,7 +598,7 @@ void arenaLeaveLock(Arena arena, int recursive) void ArenaLeaveRecursive(Arena arena) { - arenaLeaveLock(arena, 1); + ArenaLeaveLock(arena, TRUE); } /* mps_exception_info -- pointer to exception info @@ -717,14 +699,7 @@ Bool ArenaAccess(Addr addr, AccessSet mode, MutatorFaultContext context) * series of manual steps for looking around. This might be worthwhile * if we introduce background activities other than tracing. */ -#ifdef MPS_PROD_EPCORE void (ArenaPoll)(Globals globals) -{ - /* Don't poll, just check. */ - AVERT(Globals, globals); -} -#else -void ArenaPoll(Globals globals) { Arena arena; Clock start; @@ -779,7 +754,6 @@ void ArenaPoll(Globals globals) globals->insidePoll = FALSE; } -#endif /* Work out whether we have enough time here to collect the world, * and whether much time has passed since the last time we did that diff --git a/mps/code/land.c b/mps/code/land.c new file mode 100644 index 00000000000..fa0e9c62a8a --- /dev/null +++ b/mps/code/land.c @@ -0,0 +1,643 @@ +/* land.c: LAND (COLLECTION OF ADDRESS RANGES) IMPLEMENTATION + * + * $Id: //info.ravenbrook.com/project/mps/branch/2014-03-30/land/code/land.c#1 $ + * Copyright (c) 2014 Ravenbrook Limited. See end of file for license. + * + * .design: + */ + +#include "mpm.h" +#include "range.h" + +SRCID(land, "$Id$"); + + +/* FindDeleteCheck -- check method for a FindDelete value */ + +Bool FindDeleteCheck(FindDelete findDelete) +{ + CHECKL(findDelete == FindDeleteNONE + || findDelete == FindDeleteLOW + || findDelete == FindDeleteHIGH + || findDelete == FindDeleteENTIRE); + UNUSED(findDelete); /* */ + + return TRUE; +} + + +/* landEnter, landLeave -- Avoid re-entrance + * + * .enter-leave: The visitor functions passed to LandIterate and + * LandIterateAndDelete are not allowed to call methods of that land. + * These functions enforce this. + * + * .enter-leave.simple: Some simple queries are fine to call from + * visitor functions. These are marked with the tag of this comment. + */ + +static void landEnter(Land land) +{ + /* Don't need to check as always called from interface function. */ + AVER(!land->inLand); + land->inLand = TRUE; + return; +} + +static void landLeave(Land land) +{ + /* Don't need to check as always called from interface function. */ + AVER(land->inLand); + land->inLand = FALSE; + return; +} + + +/* LandCheck -- check land */ + +Bool LandCheck(Land land) +{ + /* .enter-leave.simple */ + CHECKS(Land, land); + CHECKD(LandClass, land->class); + CHECKU(Arena, land->arena); + CHECKL(AlignCheck(land->alignment)); + return TRUE; +} + + +/* LandInit -- initialize land + * + * See + */ + +Res LandInit(Land land, LandClass class, Arena arena, Align alignment, void *owner, ArgList args) +{ + Res res; + + AVER(land != NULL); + AVERT(LandClass, class); + AVERT(Align, alignment); + + land->inLand = TRUE; + land->alignment = alignment; + land->arena = arena; + land->class = class; + land->sig = LandSig; + + AVERT(Land, land); + + res = (*class->init)(land, args); + if (res != ResOK) + goto failInit; + + EVENT2(LandInit, land, owner); + landLeave(land); + return ResOK; + + failInit: + land->sig = SigInvalid; + return res; +} + + +/* LandCreate -- allocate and initialize land + * + * See + */ + +Res LandCreate(Land *landReturn, Arena arena, LandClass class, Align alignment, void *owner, ArgList args) +{ + Res res; + Land land; + void *p; + + AVER(landReturn != NULL); + AVERT(Arena, arena); + AVERT(LandClass, class); + + res = ControlAlloc(&p, arena, class->size, + /* withReservoirPermit */ FALSE); + if (res != ResOK) + goto failAlloc; + land = p; + + res = LandInit(land, class, arena, alignment, owner, args); + if (res != ResOK) + goto failInit; + + *landReturn = land; + return ResOK; + +failInit: + ControlFree(arena, land, class->size); +failAlloc: + return res; +} + + +/* LandDestroy -- finish and deallocate land + * + * See + */ + +void LandDestroy(Land land) +{ + Arena arena; + LandClass class; + + AVERT(Land, land); + arena = land->arena; + class = land->class; + AVERT(LandClass, class); + LandFinish(land); + ControlFree(arena, land, class->size); +} + + +/* LandFinish -- finish land + * + * See + */ + +void LandFinish(Land land) +{ + AVERT(Land, land); + landEnter(land); + + (*land->class->finish)(land); + + land->sig = SigInvalid; +} + + +/* LandSize -- return the total size of ranges in land + * + * See + */ + +Size LandSize(Land land) +{ + /* .enter-leave.simple */ + AVERT(Land, land); + + return (*land->class->sizeMethod)(land); +} + + +/* LandInsert -- insert range of addresses into land + * + * See + */ + +Res LandInsert(Range rangeReturn, Land land, Range range) +{ + Res res; + + AVER(rangeReturn != NULL); + AVERT(Land, land); + AVERT(Range, range); + AVER(RangeIsAligned(range, land->alignment)); + landEnter(land); + + res = (*land->class->insert)(rangeReturn, land, range); + + landLeave(land); + return res; +} + + +/* LandDelete -- delete range of addresses from land + * + * See + */ + +Res LandDelete(Range rangeReturn, Land land, Range range) +{ + Res res; + + AVER(rangeReturn != NULL); + AVERT(Land, land); + AVERT(Range, range); + AVER(RangeIsAligned(range, land->alignment)); + landEnter(land); + + res = (*land->class->delete)(rangeReturn, land, range); + + landLeave(land); + return res; +} + + +/* LandIterate -- iterate over isolated ranges of addresses in land + * + * See + */ + +Bool LandIterate(Land land, LandVisitor visitor, void *closureP, Size closureS) +{ + Bool b; + AVERT(Land, land); + AVER(FUNCHECK(visitor)); + landEnter(land); + + b = (*land->class->iterate)(land, visitor, closureP, closureS); + + landLeave(land); + return b; +} + + +/* LandIterateAndDelete -- iterate over isolated ranges of addresses + * in land, deleting some of them + * + * See + */ + +Bool LandIterateAndDelete(Land land, LandDeleteVisitor visitor, void *closureP, Size closureS) +{ + Bool b; + AVERT(Land, land); + AVER(FUNCHECK(visitor)); + landEnter(land); + + b = (*land->class->iterateAndDelete)(land, visitor, closureP, closureS); + + landLeave(land); + return b; +} + + +/* LandFindFirst -- find first range of given size + * + * See + */ + +Bool LandFindFirst(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Bool b; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + AVER(SizeIsAligned(size, land->alignment)); + AVER(FindDeleteCheck(findDelete)); + landEnter(land); + + b = (*land->class->findFirst)(rangeReturn, oldRangeReturn, land, size, + findDelete); + + landLeave(land); + return b; +} + + +/* LandFindLast -- find last range of given size + * + * See + */ + +Bool LandFindLast(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Bool b; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + AVER(SizeIsAligned(size, land->alignment)); + AVER(FindDeleteCheck(findDelete)); + landEnter(land); + + b = (*land->class->findLast)(rangeReturn, oldRangeReturn, land, size, + findDelete); + + landLeave(land); + return b; +} + + +/* LandFindLargest -- find largest range of at least given size + * + * See + */ + +Bool LandFindLargest(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + Bool b; + + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + AVER(SizeIsAligned(size, land->alignment)); + AVER(FindDeleteCheck(findDelete)); + landEnter(land); + + b = (*land->class->findLargest)(rangeReturn, oldRangeReturn, land, size, + findDelete); + + landLeave(land); + return b; +} + + +/* LandFindInSize -- find range of given size in set of zones + * + * See + */ + +Res LandFindInZones(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high) +{ + Res res; + + AVER(foundReturn != NULL); + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + AVER(SizeIsAligned(size, land->alignment)); + /* AVER(ZoneSet, zoneSet); */ + AVERT(Bool, high); + landEnter(land); + + res = (*land->class->findInZones)(foundReturn, rangeReturn, oldRangeReturn, + land, size, zoneSet, high); + + landLeave(land); + return res; +} + + +/* LandDescribe -- describe land for debugging + * + * See + */ + +Res LandDescribe(Land land, mps_lib_FILE *stream, Count depth) +{ + Res res; + + if (!TESTT(Land, land)) return ResFAIL; + if (stream == NULL) return ResFAIL; + + res = WriteF(stream, depth, + "Land $P {\n", (WriteFP)land, + " class $P", (WriteFP)land->class, + " (\"$S\")\n", land->class->name, + " arena $P\n", (WriteFP)land->arena, + " align $U\n", (WriteFU)land->alignment, + " inLand: $U\n", (WriteFU)land->inLand, + NULL); + if (res != ResOK) + return res; + + res = (*land->class->describe)(land, stream, depth + 2); + if (res != ResOK) + return res; + + res = WriteF(stream, depth, "} Land $P\n", (WriteFP)land, NULL); + return ResOK; +} + + +/* landFlushVisitor -- visitor for LandFlush. + * + * closureP argument is the destination Land. Attempt to insert the + * range into the destination. + */ +static Bool landFlushVisitor(Bool *deleteReturn, Land land, Range range, + void *closureP, Size closureS) +{ + Res res; + RangeStruct newRange; + Land dest; + + AVER(deleteReturn != NULL); + AVERT(Land, land); + AVERT(Range, range); + AVER(closureP != NULL); + AVER(closureS == UNUSED_SIZE); + UNUSED(closureS); + + dest = closureP; + res = LandInsert(&newRange, dest, range); + if (res == ResOK) { + *deleteReturn = TRUE; + return TRUE; + } else { + *deleteReturn = FALSE; + return FALSE; + } +} + + +/* LandFlush -- move ranges from src to dest + * + * See + */ + +Bool LandFlush(Land dest, Land src) +{ + AVERT(Land, dest); + AVERT(Land, src); + + return LandIterateAndDelete(src, landFlushVisitor, dest, UNUSED_SIZE); +} + + +/* LandClassCheck -- check land class */ + +Bool LandClassCheck(LandClass class) +{ + CHECKL(ProtocolClassCheck(&class->protocol)); + CHECKL(class->name != NULL); /* Should be <=6 char C identifier */ + CHECKL(class->size >= sizeof(LandStruct)); + CHECKL(FUNCHECK(class->init)); + CHECKL(FUNCHECK(class->finish)); + CHECKL(FUNCHECK(class->insert)); + CHECKL(FUNCHECK(class->delete)); + CHECKL(FUNCHECK(class->findFirst)); + CHECKL(FUNCHECK(class->findLast)); + CHECKL(FUNCHECK(class->findLargest)); + CHECKL(FUNCHECK(class->findInZones)); + CHECKL(FUNCHECK(class->describe)); + CHECKS(LandClass, class); + return TRUE; +} + + +static Res landTrivInit(Land land, ArgList args) +{ + AVERT(Land, land); + AVER(ArgListCheck(args)); + UNUSED(args); + return ResOK; +} + +static void landTrivFinish(Land land) +{ + AVERT(Land, land); + NOOP; +} + +static Size landNoSize(Land land) +{ + UNUSED(land); + NOTREACHED; + return 0; +} + +/* LandSlowSize -- generic size method but slow */ + +static Bool landSizeVisitor(Land land, Range range, + void *closureP, Size closureS) +{ + Size *size; + + AVERT(Land, land); + AVERT(Range, range); + AVER(closureP != NULL); + AVER(closureS == UNUSED_SIZE); + UNUSED(closureS); + + size = closureP; + *size += RangeSize(range); + + return TRUE; +} + +Size LandSlowSize(Land land) +{ + Size size = 0; + Bool b = LandIterate(land, landSizeVisitor, &size, UNUSED_SIZE); + AVER(b); + return size; +} + +static Res landNoInsert(Range rangeReturn, Land land, Range range) +{ + AVER(rangeReturn != NULL); + AVERT(Land, land); + AVERT(Range, range); + return ResUNIMPL; +} + +static Res landNoDelete(Range rangeReturn, Land land, Range range) +{ + AVER(rangeReturn != NULL); + AVERT(Land, land); + AVERT(Range, range); + return ResUNIMPL; +} + +static Bool landNoIterate(Land land, LandVisitor visitor, void *closureP, Size closureS) +{ + AVERT(Land, land); + AVER(visitor != NULL); + UNUSED(closureP); + UNUSED(closureS); + return FALSE; +} + +static Bool landNoIterateAndDelete(Land land, LandDeleteVisitor visitor, void *closureP, Size closureS) +{ + AVERT(Land, land); + AVER(visitor != NULL); + UNUSED(closureP); + UNUSED(closureS); + return FALSE; +} + +static Bool landNoFind(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete) +{ + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + UNUSED(size); + AVER(FindDeleteCheck(findDelete)); + return ResUNIMPL; +} + +static Res landNoFindInZones(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high) +{ + AVER(foundReturn != NULL); + AVER(rangeReturn != NULL); + AVER(oldRangeReturn != NULL); + AVERT(Land, land); + UNUSED(size); + UNUSED(zoneSet); + AVER(BoolCheck(high)); + return ResUNIMPL; +} + +static Res landTrivDescribe(Land land, mps_lib_FILE *stream, Count depth) +{ + if (!TESTT(Land, land)) + return ResFAIL; + if (stream == NULL) + return ResFAIL; + UNUSED(depth); + /* dispatching function does it all */ + return ResOK; +} + +DEFINE_CLASS(LandClass, class) +{ + INHERIT_CLASS(&class->protocol, ProtocolClass); + class->name = "LAND"; + class->size = sizeof(LandStruct); + class->init = landTrivInit; + class->sizeMethod = landNoSize; + class->finish = landTrivFinish; + class->insert = landNoInsert; + class->delete = landNoDelete; + class->iterate = landNoIterate; + class->iterateAndDelete = landNoIterateAndDelete; + class->findFirst = landNoFind; + class->findLast = landNoFind; + class->findLargest = landNoFind; + class->findInZones = landNoFindInZones; + class->describe = landTrivDescribe; + class->sig = LandClassSig; + AVERT(LandClass, class); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014 Ravenbrook Limited . + * All rights reserved. This is an open source license. Contact + * Ravenbrook for commercial licensing options. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Redistributions in any form must be accompanied by information on how + * to obtain complete source code for this software and any accompanying + * software that uses this software. The source code must either be + * included in the distribution or be available for no more than the cost + * of distribution plus a nominal fee, and must be freely redistributable + * under reasonable conditions. For an executable file, complete source + * code means the source code for all modules it contains. It does not + * include source code for modules or files that typically accompany the + * major components of the operating system on which the executable file + * runs. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/landtest.c b/mps/code/landtest.c new file mode 100644 index 00000000000..e968e91ec0a --- /dev/null +++ b/mps/code/landtest.c @@ -0,0 +1,637 @@ +/* landtest.c: LAND TEST + * + * $Id$ + * Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license. + * + * The MPS contains three land implementations: + * + * 1. the CBS (Coalescing Block Structure) module maintains blocks in + * a splay tree for fast access with a cost in storage; + * + * 2. the Freelist module maintains blocks in an address-ordered + * singly linked list for zero storage overhead with a cost in + * performance. + * + * 3. the Failover module implements a mechanism for using CBS until + * it fails, then falling back to a Freelist. + */ + +#include "cbs.h" +#include "failover.h" +#include "freelist.h" +#include "mpm.h" +#include "mps.h" +#include "mpsavm.h" +#include "mpstd.h" +#include "poolmfs.h" +#include "testlib.h" + +#include /* printf */ + +SRCID(landtest, "$Id$"); + + +#define ArraySize ((Size)123456) + +/* CBS is much faster than Freelist, so we apply more operations to + * the former. */ +#define nCBSOperations ((Size)125000) +#define nFLOperations ((Size)12500) +#define nFOOperations ((Size)12500) + +static Count NAllocateTried, NAllocateSucceeded, NDeallocateTried, + NDeallocateSucceeded; + +static int verbose = 0; + +typedef struct TestStateStruct { + Align align; + BT allocTable; + Addr block; + Land land; +} TestStateStruct, *TestState; + +typedef struct CheckTestClosureStruct { + TestState state; + Addr limit; + Addr oldLimit; +} CheckTestClosureStruct, *CheckTestClosure; + + +static Addr (addrOfIndex)(TestState state, Index i) +{ + return AddrAdd(state->block, (i * state->align)); +} + + +static Index (indexOfAddr)(TestState state, Addr a) +{ + return (Index)(AddrOffset(state->block, a) / state->align); +} + + +static void describe(TestState state) { + die(LandDescribe(state->land, mps_lib_get_stdout(), 0), "LandDescribe"); +} + + +static Bool checkVisitor(Land land, Range range, void *closureP, Size closureS) +{ + Addr base, limit; + CheckTestClosure cl = closureP; + + testlib_unused(land); + Insist(closureS == UNUSED_SIZE); + Insist(cl != NULL); + + base = RangeBase(range); + limit = RangeLimit(range); + + if (base > cl->oldLimit) { + Insist(BTIsSetRange(cl->state->allocTable, + indexOfAddr(cl->state, cl->oldLimit), + indexOfAddr(cl->state, base))); + } else { /* must be at start of table */ + Insist(base == cl->oldLimit); + Insist(cl->oldLimit == cl->state->block); + } + + Insist(BTIsResRange(cl->state->allocTable, + indexOfAddr(cl->state, base), + indexOfAddr(cl->state, limit))); + + cl->oldLimit = limit; + + return TRUE; +} + +static void check(TestState state) +{ + CheckTestClosureStruct closure; + Bool b; + + closure.state = state; + closure.limit = addrOfIndex(state, ArraySize); + closure.oldLimit = state->block; + + b = LandIterate(state->land, checkVisitor, &closure, UNUSED_SIZE); + Insist(b); + + if (closure.oldLimit == state->block) + Insist(BTIsSetRange(state->allocTable, 0, + indexOfAddr(state, closure.limit))); + else if (closure.limit > closure.oldLimit) + Insist(BTIsSetRange(state->allocTable, + indexOfAddr(state, closure.oldLimit), + indexOfAddr(state, closure.limit))); + else + Insist(closure.oldLimit == closure.limit); +} + + +static Word fbmRnd(Word limit) +{ + /* Not very uniform, but never mind. */ + return (Word)rnd() % limit; +} + + +/* nextEdge -- Finds the next transition in the bit table + * + * Returns the index greater than such that the + * range [, ) has the same value in the bit table, + * and has a different value or does not exist. + */ + +static Index nextEdge(BT bt, Size size, Index base) +{ + Index end; + Bool baseValue; + + Insist(bt != NULL); + Insist(base < size); + + baseValue = BTGet(bt, base); + + for(end = base + 1; end < size && BTGet(bt, end) == baseValue; end++) + NOOP; + + return end; +} + + +/* lastEdge -- Finds the previous transition in the bit table + * + * Returns the index less than such that the range + * [, ] has the same value in the bit table, + * and -1 has a different value or does not exist. + */ + +static Index lastEdge(BT bt, Size size, Index base) +{ + Index end; + Bool baseValue; + + Insist(bt != NULL); + Insist(base < size); + + baseValue = BTGet(bt, base); + + for(end = base; end > (Index)0 && BTGet(bt, end - 1) == baseValue; end--) + NOOP; + + return end; +} + + +/* randomRange -- picks random range within table + * + * The function first picks a uniformly distributed within the table. + * + * It then scans forward a binary exponentially distributed + * number of "edges" in the table (that is, transitions between set and + * reset) to get . Note that there is a 50% chance that will + * be the next edge, a 25% chance it will be the edge after, etc., until + * the end of the table. + * + * Finally it picks a uniformly distributed in the range + * [base+1, limit]. + * + * Hence there is a somewhat better than 50% chance that the range will be + * all either set or reset. + */ + +static void randomRange(Addr *baseReturn, Addr *limitReturn, TestState state) +{ + Index base; /* the start of our range */ + Index end; /* an edge (i.e. different from its predecessor) */ + /* after base */ + Index limit; /* a randomly chosen value in (base, limit]. */ + + base = fbmRnd(ArraySize); + + do { + end = nextEdge(state->allocTable, ArraySize, base); + } while(end < ArraySize && fbmRnd(2) == 0); /* p=0.5 exponential */ + + Insist(end > base); + + limit = base + 1 + fbmRnd(end - base); + + *baseReturn = addrOfIndex(state, base); + *limitReturn = addrOfIndex(state, limit); +} + + +static void allocate(TestState state, Addr base, Addr limit) +{ + Res res; + Index ib, il; /* Indexed for base and limit */ + Bool isFree; + RangeStruct range, oldRange; + Addr outerBase, outerLimit; /* interval containing [ib, il) */ + + ib = indexOfAddr(state, base); + il = indexOfAddr(state, limit); + + isFree = BTIsResRange(state->allocTable, ib, il); + + NAllocateTried++; + + if (isFree) { + Size left, right, total; /* Sizes of block and two fragments */ + + outerBase = + addrOfIndex(state, lastEdge(state->allocTable, ArraySize, ib)); + outerLimit = + addrOfIndex(state, nextEdge(state->allocTable, ArraySize, il - 1)); + + left = AddrOffset(outerBase, base); + right = AddrOffset(limit, outerLimit); + total = AddrOffset(outerBase, outerLimit); + + /* TODO: check these values */ + testlib_unused(left); + testlib_unused(right); + testlib_unused(total); + } else { + outerBase = outerLimit = NULL; + } + + RangeInit(&range, base, limit); + res = LandDelete(&oldRange, state->land, &range); + + if (verbose) { + printf("allocate: [%p,%p) -- %s\n", + (void *)base, (void *)limit, isFree ? "succeed" : "fail"); + describe(state); + } + + if (!isFree) { + die_expect((mps_res_t)res, MPS_RES_FAIL, + "Succeeded in deleting allocated block"); + } else { /* isFree */ + die_expect((mps_res_t)res, MPS_RES_OK, + "failed to delete free block"); + Insist(RangeBase(&oldRange) == outerBase); + Insist(RangeLimit(&oldRange) == outerLimit); + NAllocateSucceeded++; + BTSetRange(state->allocTable, ib, il); + } +} + + +static void deallocate(TestState state, Addr base, Addr limit) +{ + Res res; + Index ib, il; + Bool isAllocated; + Addr outerBase = base, outerLimit = limit; /* interval containing [ib, il) */ + RangeStruct range, freeRange; /* interval returned by the manager */ + + ib = indexOfAddr(state, base); + il = indexOfAddr(state, limit); + + isAllocated = BTIsSetRange(state->allocTable, ib, il); + + NDeallocateTried++; + + if (isAllocated) { + Size left, right, total; /* Sizes of block and two fragments */ + + /* Find the free blocks adjacent to the allocated block */ + if (ib > 0 && !BTGet(state->allocTable, ib - 1)) { + outerBase = + addrOfIndex(state, lastEdge(state->allocTable, ArraySize, ib - 1)); + } else { + outerBase = base; + } + + if (il < ArraySize && !BTGet(state->allocTable, il)) { + outerLimit = + addrOfIndex(state, nextEdge(state->allocTable, ArraySize, il)); + } else { + outerLimit = limit; + } + + left = AddrOffset(outerBase, base); + right = AddrOffset(limit, outerLimit); + total = AddrOffset(outerBase, outerLimit); + + /* TODO: check these values */ + testlib_unused(left); + testlib_unused(right); + testlib_unused(total); + } + + RangeInit(&range, base, limit); + res = LandInsert(&freeRange, state->land, &range); + + if (verbose) { + printf("deallocate: [%p,%p) -- %s\n", + (void *)base, (void *)limit, isAllocated ? "succeed" : "fail"); + describe(state); + } + + if (!isAllocated) { + die_expect((mps_res_t)res, MPS_RES_FAIL, + "succeeded in inserting non-allocated block"); + } else { /* isAllocated */ + die_expect((mps_res_t)res, MPS_RES_OK, + "failed to insert allocated block"); + + NDeallocateSucceeded++; + BTResRange(state->allocTable, ib, il); + Insist(RangeBase(&freeRange) == outerBase); + Insist(RangeLimit(&freeRange) == outerLimit); + } +} + + +static void find(TestState state, Size size, Bool high, FindDelete findDelete) +{ + Bool expected, found; + Index expectedBase, expectedLimit; + RangeStruct foundRange, oldRange; + Addr remainderBase, remainderLimit; + Addr origBase, origLimit; + Size oldSize, newSize; + + origBase = origLimit = NULL; + expected = (high ? BTFindLongResRangeHigh : BTFindLongResRange) + (&expectedBase, &expectedLimit, state->allocTable, + (Index)0, (Index)ArraySize, (Count)size); + + if (expected) { + oldSize = (expectedLimit - expectedBase) * state->align; + remainderBase = origBase = addrOfIndex(state, expectedBase); + remainderLimit = origLimit = addrOfIndex(state, expectedLimit); + + switch(findDelete) { + case FindDeleteNONE: + /* do nothing */ + break; + case FindDeleteENTIRE: + remainderBase = remainderLimit; + break; + case FindDeleteLOW: + expectedLimit = expectedBase + size; + remainderBase = addrOfIndex(state, expectedLimit); + break; + case FindDeleteHIGH: + expectedBase = expectedLimit - size; + remainderLimit = addrOfIndex(state, expectedBase); + break; + default: + cdie(0, "invalid findDelete"); + break; + } + + if (findDelete != FindDeleteNONE) { + newSize = AddrOffset(remainderBase, remainderLimit); + } + + /* TODO: check these values */ + testlib_unused(oldSize); + testlib_unused(newSize); + } + + found = (high ? LandFindLast : LandFindFirst) + (&foundRange, &oldRange, state->land, size * state->align, findDelete); + + if (verbose) { + printf("find %s %lu: ", high ? "last" : "first", + (unsigned long)(size * state->align)); + if (expected) { + printf("expecting [%p,%p)\n", + (void *)addrOfIndex(state, expectedBase), + (void *)addrOfIndex(state, expectedLimit)); + } else { + printf("expecting this not to be found\n"); + } + if (found) { + printf(" found [%p,%p)\n", (void *)RangeBase(&foundRange), + (void *)RangeLimit(&foundRange)); + } else { + printf(" not found\n"); + } + } + + Insist(found == expected); + + if (found) { + Insist(expectedBase == indexOfAddr(state, RangeBase(&foundRange))); + Insist(expectedLimit == indexOfAddr(state, RangeLimit(&foundRange))); + + if (findDelete != FindDeleteNONE) { + Insist(RangeBase(&oldRange) == origBase); + Insist(RangeLimit(&oldRange) == origLimit); + BTSetRange(state->allocTable, expectedBase, expectedLimit); + } + } + + return; +} + +static void test(TestState state, unsigned n) { + Addr base, limit; + unsigned i; + Size size; + Bool high; + FindDelete findDelete = FindDeleteNONE; + + BTSetRange(state->allocTable, 0, ArraySize); /* Initially all allocated */ + check(state); + for(i = 0; i < n; i++) { + switch(fbmRnd(3)) { + case 0: + randomRange(&base, &limit, state); + allocate(state, base, limit); + break; + case 1: + randomRange(&base, &limit, state); + deallocate(state, base, limit); + break; + case 2: + size = fbmRnd(ArraySize / 10) + 1; + high = fbmRnd(2) ? TRUE : FALSE; + switch(fbmRnd(6)) { + default: findDelete = FindDeleteNONE; break; + case 3: findDelete = FindDeleteLOW; break; + case 4: findDelete = FindDeleteHIGH; break; + case 5: findDelete = FindDeleteENTIRE; break; + } + find(state, size, high, findDelete); + break; + default: + cdie(0, "invalid rnd(3)"); + return; + } + if ((i + 1) % 1000 == 0) + check(state); + } +} + +#define testArenaSIZE (((size_t)4)<<20) + +extern int main(int argc, char *argv[]) +{ + mps_arena_t mpsArena; + Arena arena; + TestStateStruct state; + void *p; + Addr dummyBlock; + BT allocTable; + MFSStruct blockPool; + CBSStruct cbsStruct; + FreelistStruct flStruct; + FailoverStruct foStruct; + Land cbs = &cbsStruct.landStruct; + Land fl = &flStruct.landStruct; + Land fo = &foStruct.landStruct; + Pool mfs = &blockPool.poolStruct; + Align align; + int i; + + testlib_init(argc, argv); + align = (1 << rnd() % 4) * MPS_PF_ALIGN; + + NAllocateTried = NAllocateSucceeded = NDeallocateTried = + NDeallocateSucceeded = 0; + + die(mps_arena_create(&mpsArena, mps_arena_class_vm(), testArenaSIZE), + "mps_arena_create"); + arena = (Arena)mpsArena; /* avoid pun */ + + die((mps_res_t)BTCreate(&allocTable, arena, ArraySize), + "failed to create alloc table"); + + /* We're not going to use this block, but I feel unhappy just */ + /* inventing addresses. */ + die((mps_res_t)ControlAlloc(&p, arena, ArraySize * align, + /* withReservoirPermit */ FALSE), + "failed to allocate block"); + dummyBlock = p; /* avoid pun */ + + if (verbose) { + printf("Allocated block [%p,%p)\n", (void*)dummyBlock, + (char *)dummyBlock + ArraySize); + } + + /* 1. Test CBS */ + + MPS_ARGS_BEGIN(args) { + die((mps_res_t)LandInit(cbs, CBSFastLandClassGet(), arena, align, NULL, args), + "failed to initialise CBS"); + } MPS_ARGS_END(args); + state.align = align; + state.block = dummyBlock; + state.allocTable = allocTable; + state.land = cbs; + test(&state, nCBSOperations); + LandFinish(cbs); + + /* 2. Test Freelist */ + + die((mps_res_t)LandInit(fl, FreelistLandClassGet(), arena, align, NULL, + mps_args_none), + "failed to initialise Freelist"); + state.land = fl; + test(&state, nFLOperations); + LandFinish(fl); + + /* 3. Test CBS-failing-over-to-Freelist (always failing over on + * first iteration, never failing over on second; see fotest.c for a + * test case that randomly switches fail-over on and off) + */ + + for (i = 0; i < 2; ++i) { + MPS_ARGS_BEGIN(piArgs) { + MPS_ARGS_ADD(piArgs, MPS_KEY_MFS_UNIT_SIZE, sizeof(CBSFastBlockStruct)); + MPS_ARGS_ADD(piArgs, MPS_KEY_EXTEND_BY, ArenaAlign(arena)); + MPS_ARGS_ADD(piArgs, MFSExtendSelf, i); + MPS_ARGS_DONE(piArgs); + die(PoolInit(mfs, arena, PoolClassMFS(), piArgs), "PoolInit"); + } MPS_ARGS_END(piArgs); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, CBSBlockPool, mfs); + die((mps_res_t)LandInit(cbs, CBSFastLandClassGet(), arena, align, NULL, + args), + "failed to initialise CBS"); + } MPS_ARGS_END(args); + + die((mps_res_t)LandInit(fl, FreelistLandClassGet(), arena, align, NULL, + mps_args_none), + "failed to initialise Freelist"); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, FailoverPrimary, cbs); + MPS_ARGS_ADD(args, FailoverSecondary, fl); + die((mps_res_t)LandInit(fo, FailoverLandClassGet(), arena, align, NULL, + args), + "failed to initialise Failover"); + } MPS_ARGS_END(args); + + state.land = fo; + test(&state, nFOOperations); + LandFinish(fo); + LandFinish(fl); + LandFinish(cbs); + PoolFinish(mfs); + } + + mps_arena_destroy(arena); + + printf("\nNumber of allocations attempted: %"PRIuLONGEST"\n", + (ulongest_t)NAllocateTried); + printf("Number of allocations succeeded: %"PRIuLONGEST"\n", + (ulongest_t)NAllocateSucceeded); + printf("Number of deallocations attempted: %"PRIuLONGEST"\n", + (ulongest_t)NDeallocateTried); + printf("Number of deallocations succeeded: %"PRIuLONGEST"\n", + (ulongest_t)NDeallocateSucceeded); + printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (c) 2001-2014 Ravenbrook Limited . + * All rights reserved. This is an open source license. Contact + * Ravenbrook for commercial licensing options. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Redistributions in any form must be accompanied by information on how + * to obtain complete source code for this software and any accompanying + * software that uses this software. The source code must either be + * included in the distribution or be available for no more than the cost + * of distribution plus a nominal fee, and must be freely redistributable + * under reasonable conditions. For an executable file, complete source + * code means the source code for all modules it contains. It does not + * include source code for modules or files that typically accompany the + * major components of the operating system on which the executable file + * runs. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/code/ll.gmk b/mps/code/ll.gmk index 24dd32b9efe..787380fb3ed 100644 --- a/mps/code/ll.gmk +++ b/mps/code/ll.gmk @@ -10,7 +10,7 @@ # common makefile fragment () requires. CC = clang -CFLAGSDEBUG = -O -g3 +CFLAGSDEBUG = -O0 -g3 CFLAGSOPT = -O2 -g3 CFLAGSCOMPILER := \ -pedantic \ @@ -46,7 +46,7 @@ CFLAGSCOMPILERLAX := # If interrupted, this is liable to leave a zero-length file behind. define gendep - $(SHELL) -ec "$(CC) $(CFLAGS) -MM $< | \ + $(SHELL) -ec "$(CC) $(CFLAGSSTRICT) -MM $< | \ sed '/:/s!$*.o!$(@D)/& $(@D)/$*.d!' > $@" [ -s $@ ] || rm -f $@ endef diff --git a/mps/code/lock.h b/mps/code/lock.h index 1431bbacd85..4fcd591a0f2 100644 --- a/mps/code/lock.h +++ b/mps/code/lock.h @@ -85,9 +85,6 @@ #define LockSig ((Sig)0x51970CC9) /* SIGnature LOCK */ -#if defined(THREAD_MULTI) - - /* LockSize -- Return the size of a LockStruct * * Supports allocation of locks. @@ -198,9 +195,9 @@ extern void LockClaimGlobal(void); extern void LockReleaseGlobal(void); -#elif defined(THREAD_SINGLE) - - +#if defined(LOCK) +/* Nothing to do: functions declared in all lock configurations. */ +#elif defined(LOCK_NONE) #define LockSize() MPS_PF_ALIGN #define LockInit(lock) UNUSED(lock) #define LockFinish(lock) UNUSED(lock) @@ -213,13 +210,9 @@ extern void LockReleaseGlobal(void); #define LockReleaseGlobalRecursive() #define LockClaimGlobal() #define LockReleaseGlobal() - - #else - -#error "No threading defined." - -#endif +#error "No lock configuration." +#endif /* LOCK */ #endif /* lock_h */ diff --git a/mps/code/lockix.c b/mps/code/lockix.c index c32361e8560..2afd294246e 100644 --- a/mps/code/lockix.c +++ b/mps/code/lockix.c @@ -58,7 +58,7 @@ typedef struct LockStruct { /* LockSize -- size of a LockStruct */ -size_t LockSize(void) +size_t (LockSize)(void) { return sizeof(LockStruct); } @@ -66,7 +66,7 @@ size_t LockSize(void) /* LockCheck -- check a lock */ -Bool LockCheck(Lock lock) +Bool (LockCheck)(Lock lock) { CHECKS(Lock, lock); /* While claims can't be very large, I don't dare to put a limit on it. */ @@ -77,7 +77,7 @@ Bool LockCheck(Lock lock) /* LockInit -- initialize a lock */ -void LockInit(Lock lock) +void (LockInit)(Lock lock) { pthread_mutexattr_t attr; int res; @@ -99,7 +99,7 @@ void LockInit(Lock lock) /* LockFinish -- finish a lock */ -void LockFinish(Lock lock) +void (LockFinish)(Lock lock) { int res; @@ -114,7 +114,7 @@ void LockFinish(Lock lock) /* LockClaim -- claim a lock (non-recursive) */ -void LockClaim(Lock lock) +void (LockClaim)(Lock lock) { int res; @@ -133,7 +133,7 @@ void LockClaim(Lock lock) /* LockReleaseMPM -- release a lock (non-recursive) */ -void LockReleaseMPM(Lock lock) +void (LockReleaseMPM)(Lock lock) { int res; @@ -148,7 +148,7 @@ void LockReleaseMPM(Lock lock) /* LockClaimRecursive -- claim a lock (recursive) */ -void LockClaimRecursive(Lock lock) +void (LockClaimRecursive)(Lock lock) { int res; @@ -168,7 +168,7 @@ void LockClaimRecursive(Lock lock) /* LockReleaseRecursive -- release a lock (recursive) */ -void LockReleaseRecursive(Lock lock) +void (LockReleaseRecursive)(Lock lock) { int res; @@ -203,7 +203,7 @@ static void globalLockInit(void) /* LockClaimGlobalRecursive -- claim the global recursive lock */ -void LockClaimGlobalRecursive(void) +void (LockClaimGlobalRecursive)(void) { int res; @@ -216,7 +216,7 @@ void LockClaimGlobalRecursive(void) /* LockReleaseGlobalRecursive -- release the global recursive lock */ -void LockReleaseGlobalRecursive(void) +void (LockReleaseGlobalRecursive)(void) { LockReleaseRecursive(globalRecLock); } @@ -224,7 +224,7 @@ void LockReleaseGlobalRecursive(void) /* LockClaimGlobal -- claim the global non-recursive lock */ -void LockClaimGlobal(void) +void (LockClaimGlobal)(void) { int res; @@ -237,7 +237,7 @@ void LockClaimGlobal(void) /* LockReleaseGlobal -- release the global non-recursive lock */ -void LockReleaseGlobal(void) +void (LockReleaseGlobal)(void) { LockReleaseMPM(globalLock); } diff --git a/mps/code/lockli.c b/mps/code/lockli.c index 06437b5b531..89e8f4f0653 100644 --- a/mps/code/lockli.c +++ b/mps/code/lockli.c @@ -72,7 +72,7 @@ typedef struct LockStruct { /* LockSize -- size of a LockStruct */ -size_t LockSize(void) +size_t (LockSize)(void) { return sizeof(LockStruct); } @@ -80,7 +80,7 @@ size_t LockSize(void) /* LockCheck -- check a lock */ -Bool LockCheck(Lock lock) +Bool (LockCheck)(Lock lock) { CHECKS(Lock, lock); /* While claims can't be very large, I don't dare to put a limit on it. */ @@ -91,7 +91,7 @@ Bool LockCheck(Lock lock) /* LockInit -- initialize a lock */ -void LockInit(Lock lock) +void (LockInit)(Lock lock) { pthread_mutexattr_t attr; int res; @@ -113,7 +113,7 @@ void LockInit(Lock lock) /* LockFinish -- finish a lock */ -void LockFinish(Lock lock) +void (LockFinish)(Lock lock) { int res; @@ -128,7 +128,7 @@ void LockFinish(Lock lock) /* LockClaim -- claim a lock (non-recursive) */ -void LockClaim(Lock lock) +void (LockClaim)(Lock lock) { int res; @@ -147,7 +147,7 @@ void LockClaim(Lock lock) /* LockReleaseMPM -- release a lock (non-recursive) */ -void LockReleaseMPM(Lock lock) +void (LockReleaseMPM)(Lock lock) { int res; @@ -162,7 +162,7 @@ void LockReleaseMPM(Lock lock) /* LockClaimRecursive -- claim a lock (recursive) */ -void LockClaimRecursive(Lock lock) +void (LockClaimRecursive)(Lock lock) { int res; @@ -182,7 +182,7 @@ void LockClaimRecursive(Lock lock) /* LockReleaseRecursive -- release a lock (recursive) */ -void LockReleaseRecursive(Lock lock) +void (LockReleaseRecursive)(Lock lock) { int res; @@ -217,7 +217,7 @@ static void globalLockInit(void) /* LockClaimGlobalRecursive -- claim the global recursive lock */ -void LockClaimGlobalRecursive(void) +void (LockClaimGlobalRecursive)(void) { int res; @@ -230,7 +230,7 @@ void LockClaimGlobalRecursive(void) /* LockReleaseGlobalRecursive -- release the global recursive lock */ -void LockReleaseGlobalRecursive(void) +void (LockReleaseGlobalRecursive)(void) { LockReleaseRecursive(globalRecLock); } @@ -238,7 +238,7 @@ void LockReleaseGlobalRecursive(void) /* LockClaimGlobal -- claim the global non-recursive lock */ -void LockClaimGlobal(void) +void (LockClaimGlobal)(void) { int res; @@ -251,7 +251,7 @@ void LockClaimGlobal(void) /* LockReleaseGlobal -- release the global non-recursive lock */ -void LockReleaseGlobal(void) +void (LockReleaseGlobal)(void) { LockReleaseMPM(globalLock); } diff --git a/mps/code/lockw3.c b/mps/code/lockw3.c index 258b31bff44..2fdc2800032 100644 --- a/mps/code/lockw3.c +++ b/mps/code/lockw3.c @@ -40,18 +40,18 @@ typedef struct LockStruct { } LockStruct; -size_t LockSize(void) +size_t (LockSize)(void) { return sizeof(LockStruct); } -Bool LockCheck(Lock lock) +Bool (LockCheck)(Lock lock) { CHECKS(Lock, lock); return TRUE; } -void LockInit(Lock lock) +void (LockInit)(Lock lock) { AVER(lock != NULL); lock->claims = 0; @@ -60,7 +60,7 @@ void LockInit(Lock lock) AVERT(Lock, lock); } -void LockFinish(Lock lock) +void (LockFinish)(Lock lock) { AVERT(Lock, lock); /* Lock should not be finished while held */ @@ -69,7 +69,7 @@ void LockFinish(Lock lock) lock->sig = SigInvalid; } -void LockClaim(Lock lock) +void (LockClaim)(Lock lock) { AVERT(Lock, lock); EnterCriticalSection(&lock->cs); @@ -79,7 +79,7 @@ void LockClaim(Lock lock) lock->claims = 1; } -void LockReleaseMPM(Lock lock) +void (LockReleaseMPM)(Lock lock) { AVERT(Lock, lock); AVER(lock->claims == 1); /* The lock should only be held once */ @@ -87,7 +87,7 @@ void LockReleaseMPM(Lock lock) LeaveCriticalSection(&lock->cs); } -void LockClaimRecursive(Lock lock) +void (LockClaimRecursive)(Lock lock) { AVERT(Lock, lock); EnterCriticalSection(&lock->cs); @@ -95,7 +95,7 @@ void LockClaimRecursive(Lock lock) AVER(lock->claims > 0); } -void LockReleaseRecursive(Lock lock) +void (LockReleaseRecursive)(Lock lock) { AVERT(Lock, lock); AVER(lock->claims > 0); @@ -129,27 +129,27 @@ static void lockEnsureGlobalLock(void) } } -void LockClaimGlobalRecursive(void) +void (LockClaimGlobalRecursive)(void) { lockEnsureGlobalLock(); AVER(globalLockInit); LockClaimRecursive(globalRecLock); } -void LockReleaseGlobalRecursive(void) +void (LockReleaseGlobalRecursive)(void) { AVER(globalLockInit); LockReleaseRecursive(globalRecLock); } -void LockClaimGlobal(void) +void (LockClaimGlobal)(void) { lockEnsureGlobalLock(); AVER(globalLockInit); LockClaim(globalLock); } -void LockReleaseGlobal(void) +void (LockReleaseGlobal)(void) { AVER(globalLockInit); LockReleaseMPM(globalLock); diff --git a/mps/code/locus.c b/mps/code/locus.c index 06fedac0473..aea81751725 100644 --- a/mps/code/locus.c +++ b/mps/code/locus.c @@ -6,7 +6,8 @@ * DESIGN * * See and for basic locus stuff. - * See for chains. + * See for chains. See for the + * collection strategy. */ #include "chain.h" @@ -88,8 +89,6 @@ static Bool GenDescCheck(GenDesc gen) /* nothing to check for capacity */ CHECKL(gen->mortality >= 0.0); CHECKL(gen->mortality <= 1.0); - CHECKL(gen->proflow >= 0.0); - CHECKL(gen->proflow <= 1.0); CHECKD_NOSIG(Ring, &gen->locusRing); return TRUE; } @@ -142,7 +141,6 @@ Res GenDescDescribe(GenDesc gen, mps_lib_FILE *stream, Count depth) " zones $B\n", (WriteFB)gen->zones, " capacity $U\n", (WriteFU)gen->capacity, " mortality $D\n", (WriteFD)gen->mortality, - " proflow $D\n", (WriteFD)gen->proflow, NULL); if (res != ResOK) return res; @@ -187,9 +185,9 @@ Res ChainCreate(Chain *chainReturn, Arena arena, size_t genCount, gens[i].zones = ZoneSetEMPTY; gens[i].capacity = params[i].capacity; gens[i].mortality = params[i].mortality; - gens[i].proflow = 1.0; /* @@@@ temporary */ RingInit(&gens[i].locusRing); gens[i].sig = GenDescSig; + AVERT(GenDesc, &gens[i]); } res = ControlAlloc(&p, arena, sizeof(ChainStruct), FALSE); @@ -242,8 +240,10 @@ void ChainDestroy(Chain chain) size_t i; AVERT(Chain, chain); + AVER(chain->activeTraces == TraceSetEMPTY); - arena = chain->arena; genCount = chain->genCount; + arena = chain->arena; + genCount = chain->genCount; RingRemove(&chain->chainRing); chain->sig = SigInvalid; for (i = 0; i < genCount; ++i) { @@ -265,55 +265,75 @@ size_t ChainGens(Chain chain) } -/* ChainAlloc -- allocate tracts in a generation */ +/* ChainGen -- return a generation in a chain, or the arena top generation */ -Res ChainAlloc(Seg *segReturn, Chain chain, Serial genNr, SegClass class, - Size size, Pool pool, Bool withReservoirPermit, - ArgList args) +GenDesc ChainGen(Chain chain, Index gen) +{ + AVERT(Chain, chain); + AVER(gen <= chain->genCount); + + if (gen < chain->genCount) + return &chain->gens[gen]; + else + return &chain->arena->topGen; +} + + +/* PoolGenAlloc -- allocate a segment in a pool generation and update + * accounting + */ + +Res PoolGenAlloc(Seg *segReturn, PoolGen pgen, SegClass class, Size size, + Bool withReservoirPermit, ArgList args) { SegPrefStruct pref; Res res; Seg seg; ZoneSet zones, moreZones; Arena arena; + GenDesc gen; - AVERT(Chain, chain); - AVER(genNr <= chain->genCount); + AVER(segReturn != NULL); + AVERT(PoolGen, pgen); + AVERT(SegClass, class); + AVER(size > 0); + AVERT(Bool, withReservoirPermit); + AVERT(ArgList, args); - arena = chain->arena; - if (genNr < chain->genCount) - zones = chain->gens[genNr].zones; - else - zones = arena->topGen.zones; + arena = PoolArena(pgen->pool); + gen = pgen->gen; + zones = gen->zones; SegPrefInit(&pref); pref.high = FALSE; pref.zones = zones; pref.avoid = ZoneSetBlacklist(arena); - res = SegAlloc(&seg, class, &pref, size, pool, withReservoirPermit, args); + res = SegAlloc(&seg, class, &pref, size, pgen->pool, withReservoirPermit, + args); if (res != ResOK) return res; moreZones = ZoneSetUnion(zones, ZoneSetOfSeg(arena, seg)); + gen->zones = moreZones; if (!ZoneSetSuper(zones, moreZones)) { - /* Tracking the whole zoneset for each generation number gives - * more understandable telemetry than just reporting the added + /* Tracking the whole zoneset for each generation gives more + * understandable telemetry than just reporting the added * zones. */ - EVENT3(ArenaGenZoneAdd, arena, genNr, moreZones); + EVENT3(ArenaGenZoneAdd, arena, gen, moreZones); } - if (genNr < chain->genCount) - chain->gens[genNr].zones = moreZones; - else - chain->arena->topGen.zones = moreZones; - + size = SegSize(seg); + pgen->totalSize += size; + STATISTIC_STAT ({ + ++ pgen->segs; + pgen->freeSize += size; + }); *segReturn = seg; return ResOK; } - /* ChainDeferral -- time until next ephemeral GC for this chain */ double ChainDeferral(Chain chain) @@ -452,59 +472,257 @@ Res ChainDescribe(Chain chain, mps_lib_FILE *stream, Count depth) /* PoolGenInit -- initialize a PoolGen */ -Res PoolGenInit(PoolGen gen, Chain chain, Serial nr, Pool pool) +Res PoolGenInit(PoolGen pgen, GenDesc gen, Pool pool) { - /* Can't check gen, because it's not been initialized. */ - AVER(gen != NULL); - AVERT(Chain, chain); - AVER(nr <= chain->genCount); + /* Can't check pgen, because it's not been initialized. */ + AVER(pgen != NULL); + AVERT(GenDesc, gen); AVERT(Pool, pool); AVER(PoolHasAttr(pool, AttrGC)); - gen->nr = nr; - gen->pool = pool; - gen->chain = chain; - RingInit(&gen->genRing); - gen->totalSize = (Size)0; - gen->newSize = (Size)0; - gen->sig = PoolGenSig; + pgen->pool = pool; + pgen->gen = gen; + RingInit(&pgen->genRing); + STATISTIC(pgen->segs = 0); + pgen->totalSize = 0; + STATISTIC(pgen->freeSize = 0); + pgen->newSize = 0; + STATISTIC(pgen->oldSize = 0); + pgen->newDeferredSize = 0; + STATISTIC(pgen->oldDeferredSize = 0); + pgen->sig = PoolGenSig; + AVERT(PoolGen, pgen); - if(nr != chain->genCount) { - RingAppend(&chain->gens[nr].locusRing, &gen->genRing); - } else { - /* Dynamic generation is linked to the arena, not the chain. */ - RingAppend(&chain->arena->topGen.locusRing, &gen->genRing); - } - AVERT(PoolGen, gen); + RingAppend(&gen->locusRing, &pgen->genRing); return ResOK; } /* PoolGenFinish -- finish a PoolGen */ -void PoolGenFinish(PoolGen gen) +void PoolGenFinish(PoolGen pgen) { - AVERT(PoolGen, gen); + AVERT(PoolGen, pgen); + AVER(pgen->totalSize == 0); + AVER(pgen->newSize == 0); + AVER(pgen->newDeferredSize == 0); + STATISTIC_STAT ({ + AVER(pgen->segs == 0); + AVER(pgen->freeSize == 0); + AVER(pgen->oldSize == 0); + AVER(pgen->oldDeferredSize == 0); + }); - gen->sig = SigInvalid; - RingRemove(&gen->genRing); + pgen->sig = SigInvalid; + RingRemove(&pgen->genRing); } /* PoolGenCheck -- check a PoolGen */ -Bool PoolGenCheck(PoolGen gen) +Bool PoolGenCheck(PoolGen pgen) { - CHECKS(PoolGen, gen); + CHECKS(PoolGen, pgen); /* nothing to check about serial */ - CHECKU(Pool, gen->pool); - CHECKU(Chain, gen->chain); - CHECKD_NOSIG(Ring, &gen->genRing); - CHECKL(gen->newSize <= gen->totalSize); + CHECKU(Pool, pgen->pool); + CHECKU(GenDesc, pgen->gen); + CHECKD_NOSIG(Ring, &pgen->genRing); + STATISTIC_STAT ({ + CHECKL((pgen->totalSize == 0) == (pgen->segs == 0)); + CHECKL(pgen->totalSize >= pgen->segs * ArenaAlign(PoolArena(pgen->pool))); + CHECKL(pgen->totalSize == pgen->freeSize + pgen->newSize + pgen->oldSize + + pgen->newDeferredSize + pgen->oldDeferredSize); + }); return TRUE; } +/* PoolGenAccountForFill -- accounting for allocation + * + * Call this when the pool allocates memory to the client program via + * BufferFill. The deferred flag indicates whether the accounting of + * this memory (for the purpose of scheduling collections) should be + * deferred until later. + * + * See + */ + +void PoolGenAccountForFill(PoolGen pgen, Size size, Bool deferred) +{ + AVERT(PoolGen, pgen); + AVERT(Bool, deferred); + + STATISTIC_STAT ({ + AVER(pgen->freeSize >= size); + pgen->freeSize -= size; + }); + if (deferred) + pgen->newDeferredSize += size; + else + pgen->newSize += size; +} + + +/* PoolGenAccountForEmpty -- accounting for emptying a buffer + * + * Call this when the client program returns memory (that was never + * condemned) to the pool via BufferEmpty. The deferred flag is as for + * PoolGenAccountForFill. + * + * See + */ + +void PoolGenAccountForEmpty(PoolGen pgen, Size unused, Bool deferred) +{ + AVERT(PoolGen, pgen); + AVERT(Bool, deferred); + + if (deferred) { + AVER(pgen->newDeferredSize >= unused); + pgen->newDeferredSize -= unused; + } else { + AVER(pgen->newSize >= unused); + pgen->newSize -= unused; + } + STATISTIC(pgen->freeSize += unused); +} + + +/* PoolGenAccountForAge -- accounting for condemning + * + * Call this when memory is condemned via PoolWhiten. The size + * parameter should be the amount of memory that is being condemned + * for the first time. The deferred flag is as for PoolGenAccountForFill. + * + * See + */ + +void PoolGenAccountForAge(PoolGen pgen, Size size, Bool deferred) +{ + AVERT(PoolGen, pgen); + + if (deferred) { + AVER(pgen->newDeferredSize >= size); + pgen->newDeferredSize -= size; + STATISTIC(pgen->oldDeferredSize += size); + } else { + AVER(pgen->newSize >= size); + pgen->newSize -= size; + STATISTIC(pgen->oldSize += size); + } +} + + +/* PoolGenAccountForReclaim -- accounting for reclaiming + * + * Call this when reclaiming memory, passing the amount of memory that + * was reclaimed. The deferred flag is as for PoolGenAccountForFill. + * + * See + */ + +void PoolGenAccountForReclaim(PoolGen pgen, Size reclaimed, Bool deferred) +{ + AVERT(PoolGen, pgen); + AVERT(Bool, deferred); + + STATISTIC_STAT ({ + if (deferred) { + AVER(pgen->oldDeferredSize >= reclaimed); + pgen->oldDeferredSize -= reclaimed; + } else { + AVER(pgen->oldSize >= reclaimed); + pgen->oldSize -= reclaimed; + } + pgen->freeSize += reclaimed; + }); +} + + +/* PoolGenUndefer -- finish deferring accounting + * + * Call this when exiting ramp mode, passing the amount of old + * (condemned at least once) and new (never condemned) memory whose + * accounting was deferred (for example, during a ramp). + * + * See + */ + +void PoolGenUndefer(PoolGen pgen, Size oldSize, Size newSize) +{ + AVERT(PoolGen, pgen); + STATISTIC_STAT ({ + AVER(pgen->oldDeferredSize >= oldSize); + pgen->oldDeferredSize -= oldSize; + pgen->oldSize += oldSize; + }); + AVER(pgen->newDeferredSize >= newSize); + pgen->newDeferredSize -= newSize; + pgen->newSize += newSize; +} + + +/* PoolGenAccountForSegSplit -- accounting for splitting a segment */ + +void PoolGenAccountForSegSplit(PoolGen pgen) +{ + AVERT(PoolGen, pgen); + STATISTIC_STAT ({ + AVER(pgen->segs >= 1); /* must be at least one segment to split */ + ++ pgen->segs; + }); +} + + +/* PoolGenAccountForSegMerge -- accounting for merging a segment */ + +void PoolGenAccountForSegMerge(PoolGen pgen) +{ + AVERT(PoolGen, pgen); + STATISTIC_STAT ({ + AVER(pgen->segs >= 2); /* must be at least two segments to merge */ + -- pgen->segs; + }); +} + + +/* PoolGenFree -- free a segment and update accounting + * + * Pass the amount of memory in the segment that is accounted as free, + * old, or new, respectively. The deferred flag is as for + * PoolGenAccountForFill. + * + * See + */ + +void PoolGenFree(PoolGen pgen, Seg seg, Size freeSize, Size oldSize, + Size newSize, Bool deferred) +{ + Size size; + + AVERT(PoolGen, pgen); + AVERT(Seg, seg); + + size = SegSize(seg); + AVER(freeSize + oldSize + newSize == size); + + /* Pretend to age and reclaim the contents of the segment to ensure + * that the entire segment is accounted as free. */ + PoolGenAccountForAge(pgen, newSize, deferred); + PoolGenAccountForReclaim(pgen, oldSize + newSize, deferred); + + AVER(pgen->totalSize >= size); + pgen->totalSize -= size; + STATISTIC_STAT ({ + AVER(pgen->segs > 0); + -- pgen->segs; + AVER(pgen->freeSize >= size); + pgen->freeSize -= size; + }); + SegFree(seg); +} + + /* PoolGenDescribe -- describe a PoolGen */ Res PoolGenDescribe(PoolGen pgen, mps_lib_FILE *stream, Count depth) @@ -515,14 +733,17 @@ Res PoolGenDescribe(PoolGen pgen, mps_lib_FILE *stream, Count depth) if (stream == NULL) return ResFAIL; res = WriteF(stream, depth, - "PoolGen $P ($U) {\n", (WriteFP)pgen, (WriteFU)pgen->nr, + "PoolGen $P {\n", (WriteFP)pgen, " pool $P ($U) \"$S\"\n", (WriteFP)pgen->pool, (WriteFU)pgen->pool->serial, (WriteFS)pgen->pool->class->name, - " chain $P\n", (WriteFP)pgen->chain, + " segs $U\n", (WriteFU)pgen->segs, " totalSize $U\n", (WriteFU)pgen->totalSize, + " freeSize $U\n", (WriteFU)pgen->freeSize, + " oldSize $U\n", (WriteFU)pgen->oldSize, + " oldDeferredSize $U\n", (WriteFU)pgen->oldDeferredSize, " newSize $U\n", (WriteFU)pgen->newSize, - " newSizeAtCreate $U\n", (WriteFU)pgen->newSizeAtCreate, + " newDeferredSize $U\n", (WriteFU)pgen->newDeferredSize, "} PoolGen $P\n", (WriteFP)pgen, NULL); return res; @@ -542,9 +763,9 @@ void LocusInit(Arena arena) gen->zones = ZoneSetEMPTY; gen->capacity = 0; /* unused */ gen->mortality = 0.51; - gen->proflow = 0.0; RingInit(&gen->locusRing); gen->sig = GenDescSig; + AVERT(GenDesc, gen); } diff --git a/mps/code/misc.h b/mps/code/misc.h index 7380421d5c5..3d0f259ff72 100644 --- a/mps/code/misc.h +++ b/mps/code/misc.h @@ -152,6 +152,19 @@ typedef const struct SrcIdStruct { #define UNUSED(param) ((void)param) +/* UNUSED_POINTER, UNUSED_SIZE -- values for unused arguments + * + * Use these values for unused pointer, size closure arguments and + * check them in the callback or visitor. + * + * We use PointerAdd rather than a cast to avoid "warning C4306: 'type + * cast' : conversion from 'unsigned int' to 'Pointer' of greater + * size" on platform w3i6mv. + */ +#define UNUSED_POINTER PointerAdd(0, 0xB60405ED) /* PointeR UNUSED */ +#define UNUSED_SIZE ((Size)0x520405ED) /* SiZe UNUSED */ + + /* PARENT -- parent structure * * Given a pointer to a field of a structure this returns a pointer to @@ -169,6 +182,19 @@ typedef const struct SrcIdStruct { ((type *)(void *)((char *)(p) - offsetof(type, field))) + +/* BOOLFIELD -- declare a Boolean bitfield + * + * A Boolean bitfield needs to be unsigned (not Bool), so that its + * values are 0 and 1 (not 0 and -1), in order to avoid a sign + * conversion (which would be a compiler error) when assigning TRUE to + * the field. + * + * See + */ +#define BOOLFIELD(name) unsigned name : 1 + + /* BITFIELD -- coerce a value into a bitfield * * This coerces value to the given width and type in a way that avoids diff --git a/mps/code/mpm.h b/mps/code/mpm.h index c243a4386c4..8ff053ddb12 100644 --- a/mps/code/mpm.h +++ b/mps/code/mpm.h @@ -87,6 +87,9 @@ extern Addr (AddrAlignDown)(Addr addr, Align align); #define AddrIsAligned(p, a) WordIsAligned((Word)(p), a) #define AddrAlignUp(p, a) ((Addr)WordAlignUp((Word)(p), a)) +#define AddrRoundUp(p, r) ((Addr)WordRoundUp((Word)(p), r)) + +#define ReadonlyAddrAdd(p, s) ((ReadonlyAddr)((const char *)(p) + (s))) #define SizeIsAligned(s, a) WordIsAligned((Word)(s), a) #define SizeAlignUp(s, a) ((Size)WordAlignUp((Word)(s), a)) @@ -281,13 +284,11 @@ extern BufferClass PoolNoBufferClass(void); /* Abstract Pool Classes Interface -- see */ -extern void PoolClassMixInAllocFree(PoolClass class); extern void PoolClassMixInBuffer(PoolClass class); extern void PoolClassMixInScan(PoolClass class); extern void PoolClassMixInFormat(PoolClass class); extern void PoolClassMixInCollect(PoolClass class); extern AbstractPoolClass AbstractPoolClassGet(void); -extern AbstractAllocFreePoolClass AbstractAllocFreePoolClassGet(void); extern AbstractBufferPoolClass AbstractBufferPoolClassGet(void); extern AbstractBufferPoolClass AbstractSegBufPoolClassGet(void); extern AbstractScanPoolClass AbstractScanPoolClassGet(void); @@ -496,8 +497,8 @@ extern void ArenaFinish(Arena arena); extern Res ArenaDescribe(Arena arena, mps_lib_FILE *stream, Count depth); extern Res ArenaDescribeTracts(Arena arena, mps_lib_FILE *stream, Count depth); extern Bool ArenaAccess(Addr addr, AccessSet mode, MutatorFaultContext context); -extern Res ArenaFreeCBSInsert(Arena arena, Addr base, Addr limit); -extern void ArenaFreeCBSDelete(Arena arena, Addr base, Addr limit); +extern Res ArenaFreeLandInsert(Arena arena, Addr base, Addr limit); +extern void ArenaFreeLandDelete(Arena arena, Addr base, Addr limit); extern Bool GlobalsCheck(Globals arena); @@ -520,24 +521,27 @@ extern Ring GlobalsRememberedSummaryRing(Globals); #define ArenaGreyRing(arena, rank) (&(arena)->greyRing[rank]) #define ArenaPoolRing(arena) (&ArenaGlobals(arena)->poolRing) +extern void ArenaEnterLock(Arena arena, Bool recursive); +extern void ArenaLeaveLock(Arena arena, Bool recursive); + extern void (ArenaEnter)(Arena arena); extern void (ArenaLeave)(Arena arena); +extern void (ArenaPoll)(Globals globals); -#if defined(THREAD_SINGLE) && defined(PROTECTION_NONE) +#if defined(SHIELD) +#define ArenaEnter(arena) ArenaEnterLock(arena, FALSE) +#define ArenaLeave(arena) ArenaLeaveLock(arena, FALSE) +#elif defined(SHIELD_NONE) #define ArenaEnter(arena) UNUSED(arena) -#define ArenaLeave(arena) UNUSED(arena) -#endif +#define ArenaLeave(arena) AVER(arena->busyTraces == TraceSetEMPTY) +#define ArenaPoll(globals) UNUSED(globals) +#else +#error "No shield configuration." +#endif /* SHIELD */ extern void ArenaEnterRecursive(Arena arena); extern void ArenaLeaveRecursive(Arena arena); -extern void (ArenaPoll)(Globals globals); -#ifdef MPS_PROD_EPCORE -#define ArenaPoll(globals) UNUSED(globals) -#endif -/* .nogc.why: ScriptWorks doesn't use MM-provided incremental GC, so */ -/* doesn't need to poll when allocating. */ - extern Bool (ArenaStep)(Globals globals, double interval, double multiplier); extern void ArenaClamp(Globals globals); extern void ArenaRelease(Globals globals); @@ -813,7 +817,7 @@ extern AllocPattern AllocPatternRamp(void); extern AllocPattern AllocPatternRampCollectAll(void); -/* FindDelete -- see and */ +/* FindDelete -- see */ extern Bool FindDeleteCheck(FindDelete findDelete); @@ -894,7 +898,9 @@ extern void (ShieldSuspend)(Arena arena); extern void (ShieldResume)(Arena arena); extern void (ShieldFlush)(Arena arena); -#if defined(THREAD_SINGLE) && defined(PROTECTION_NONE) +#if defined(SHIELD) +/* Nothing to do: functions declared in all shield configurations. */ +#elif defined(SHIELD_NONE) #define ShieldRaise(arena, seg, mode) \ BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END #define ShieldLower(arena, seg, mode) \ @@ -908,7 +914,9 @@ extern void (ShieldFlush)(Arena arena); #define ShieldSuspend(arena) BEGIN UNUSED(arena); END #define ShieldResume(arena) BEGIN UNUSED(arena); END #define ShieldFlush(arena) BEGIN UNUSED(arena); END -#endif +#else +#error "No shield configuration." +#endif /* SHIELD */ /* Protection Interface @@ -996,6 +1004,37 @@ extern Size VMReserved(VM vm); extern Size VMMapped(VM vm); +/* Land Interface -- see */ + +extern Bool LandCheck(Land land); +#define LandArena(land) ((land)->arena) +#define LandAlignment(land) ((land)->alignment) +extern Size LandSize(Land land); +extern Res LandInit(Land land, LandClass class, Arena arena, Align alignment, void *owner, ArgList args); +extern Res LandCreate(Land *landReturn, Arena arena, LandClass class, Align alignment, void *owner, ArgList args); +extern void LandDestroy(Land land); +extern void LandFinish(Land land); +extern Res LandInsert(Range rangeReturn, Land land, Range range); +extern Res LandDelete(Range rangeReturn, Land land, Range range); +extern Bool LandIterate(Land land, LandVisitor visitor, void *closureP, Size closureS); +extern Bool LandIterateAndDelete(Land land, LandDeleteVisitor visitor, void *closureP, Size closureS); +extern Bool LandFindFirst(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete); +extern Bool LandFindLast(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete); +extern Bool LandFindLargest(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete); +extern Res LandFindInZones(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high); +extern Res LandDescribe(Land land, mps_lib_FILE *stream, Count depth); +extern Bool LandFlush(Land dest, Land src); + +extern Size LandSlowSize(Land land); +extern Bool LandClassCheck(LandClass class); +extern LandClass LandClassGet(void); +#define LAND_SUPERCLASS(className) ((LandClass)SUPERCLASS(className)) +#define DEFINE_LAND_CLASS(className, var) \ + DEFINE_ALIAS_CLASS(className, LandClass, var) +#define IsLandSubclass(land, className) \ + IsSubclassPoly((land)->class, className ## Get()) + + /* Stack Probe */ extern void StackProbe(Size depth); diff --git a/mps/code/mpmss.c b/mps/code/mpmss.c index b46043ef153..f881459ba2e 100644 --- a/mps/code/mpmss.c +++ b/mps/code/mpmss.c @@ -26,19 +26,19 @@ /* stress -- create a pool of the requested type and allocate in it */ -static mps_res_t stress(mps_class_t class, size_t (*size)(size_t i), - mps_arena_t arena, ...) +static mps_res_t stress(mps_arena_t arena, size_t (*size)(size_t i), + const char *name, mps_class_t pool_class, + mps_arg_s *args) { mps_res_t res; mps_pool_t pool; - va_list arg; size_t i, k; int *ps[testSetSIZE]; size_t ss[testSetSIZE]; - va_start(arg, arena); - res = mps_pool_create_v(&pool, arena, class, arg); - va_end(arg); + printf("%s\n", name); + + res = mps_pool_create_k(&pool, arena, pool_class, args); if (res != MPS_RES_OK) return res; @@ -89,7 +89,7 @@ static mps_res_t stress(mps_class_t class, size_t (*size)(size_t i), } -/* randomSize -- produce sizes both latge and small */ +/* randomSize -- produce sizes both large and small */ static size_t randomSize(size_t i) { @@ -101,7 +101,7 @@ static size_t randomSize(size_t i) } -/* randomSize8 -- produce sizes both latge and small, 8-byte aligned */ +/* randomSize8 -- produce sizes both large and small, 8-byte aligned */ static size_t randomSize8(size_t i) { @@ -123,61 +123,90 @@ static size_t fixedSize(size_t i) static mps_pool_debug_option_s bothOptions = { - /* .fence_template = */ (const void *)"postpostpostpost", - /* .fence_size = */ MPS_PF_ALIGN, - /* .free_template = */ (const void *)"DEAD", + /* .fence_template = */ "post", + /* .fence_size = */ 4, + /* .free_template = */ "DEAD", /* .free_size = */ 4 }; static mps_pool_debug_option_s fenceOptions = { - /* .fence_template = */ (const void *)"\0XXX ''\"\"'' XXX\0", - /* .fence_size = */ 16, + /* .fence_template = */ "123456789abcdef", + /* .fence_size = */ 15, /* .free_template = */ NULL, /* .free_size = */ 0 }; /* testInArena -- test all the pool classes in the given arena */ -static void testInArena(mps_arena_t arena, mps_pool_debug_option_s *options) +static void testInArena(mps_arena_class_t arena_class, mps_arg_s *arena_args, + mps_pool_debug_option_s *options) { - /* IWBN to test MVFFDebug, but the MPS doesn't support debugging */ - /* cross-segment allocation (possibly MVFF ought not to). */ - printf("MVFF\n"); - die(stress(mps_class_mvff(), randomSize8, arena, - (size_t)65536, (size_t)32, (mps_align_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE), - "stress MVFF"); - printf("MV debug\n"); - die(stress(mps_class_mv_debug(), randomSize, arena, - options, (size_t)65536, (size_t)32, (size_t)65536), - "stress MV debug"); + mps_arena_t arena; - printf("MFS\n"); - fixedSizeSize = 13; - die(stress(mps_class_mfs(), fixedSize, arena, (size_t)100000, fixedSizeSize), + die(mps_arena_create_k(&arena, arena_class, arena_args), + "mps_arena_create"); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = sizeof(void *) << (rnd() % 4); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE); + die(stress(arena, randomSize8, "MVFF", mps_class_mvff(), args), + "stress MVFF"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = sizeof(void *) << (rnd() % 4); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, options); + die(stress(arena, randomSize8, "MVFF debug", mps_class_mvff_debug(), args), + "stress MVFF debug"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = (mps_align_t)1 << (rnd() % 6); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + die(stress(arena, randomSize, "MV", mps_class_mv(), args), + "stress MV"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = (mps_align_t)1 << (rnd() % 6); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, options); + die(stress(arena, randomSize, "MV debug", mps_class_mv_debug(), args), + "stress MV debug"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + fixedSizeSize = 1 + rnd() % 64; + MPS_ARGS_ADD(args, MPS_KEY_MFS_UNIT_SIZE, fixedSizeSize); + MPS_ARGS_ADD(args, MPS_KEY_EXTEND_BY, 100000); + die(stress(arena, fixedSize, "MFS", mps_class_mfs(), args), "stress MFS"); + } MPS_ARGS_END(args); - printf("MV\n"); - die(stress(mps_class_mv(), randomSize, arena, - (size_t)65536, (size_t)32, (size_t)65536), - "stress MV"); + mps_arena_destroy(arena); } int main(int argc, char *argv[]) { - mps_arena_t arena; - testlib_init(argc, argv); - die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), - "mps_arena_create"); - testInArena(arena, &bothOptions); - mps_arena_destroy(arena); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + testInArena(mps_arena_class_vm(), args, &bothOptions); + } MPS_ARGS_END(args); - die(mps_arena_create(&arena, mps_arena_class_vm(), smallArenaSIZE), - "mps_arena_create"); - testInArena(arena, &fenceOptions); - mps_arena_destroy(arena); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, smallArenaSIZE); + testInArena(mps_arena_class_vm(), args, &fenceOptions); + } MPS_ARGS_END(args); printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); return 0; diff --git a/mps/code/mpmst.h b/mps/code/mpmst.h index 5d38f3a3319..86087a31102 100644 --- a/mps/code/mpmst.h +++ b/mps/code/mpmst.h @@ -604,7 +604,53 @@ typedef struct GlobalsStruct { } GlobalsStruct; +/* LandClassStruct -- land class structure + * + * See . + */ + +#define LandClassSig ((Sig)0x5197A4DC) /* SIGnature LAND Class */ + +typedef struct LandClassStruct { + ProtocolClassStruct protocol; + const char *name; /* class name string */ + size_t size; /* size of outer structure */ + LandSizeMethod sizeMethod; /* total size of ranges in land */ + LandInitMethod init; /* initialize the land */ + LandFinishMethod finish; /* finish the land */ + LandInsertMethod insert; /* insert a range into the land */ + LandDeleteMethod delete; /* delete a range from the land */ + LandIterateMethod iterate; /* iterate over ranges in the land */ + LandIterateAndDeleteMethod iterateAndDelete; /* iterate and maybe delete */ + LandFindMethod findFirst; /* find first range of given size */ + LandFindMethod findLast; /* find last range of given size */ + LandFindMethod findLargest; /* find largest range */ + LandFindInZonesMethod findInZones; /* find first range of given size in zone set */ + LandDescribeMethod describe; /* describe the land */ + Sig sig; /* .class.end-sig */ +} LandClassStruct; + + +/* LandStruct -- generic land structure + * + * See , + */ + +#define LandSig ((Sig)0x5197A4D9) /* SIGnature LAND */ + +typedef struct LandStruct { + Sig sig; /* */ + LandClass class; /* land class structure */ + Arena arena; /* owning arena */ + Align alignment; /* alignment of addresses */ + Bool inLand; /* prevent reentrance */ +} LandStruct; + + /* CBSStruct -- coalescing block structure + * + * CBS is a Land implementation that maintains a collection of + * disjoint ranges in a splay tree. * * See . */ @@ -612,21 +658,58 @@ typedef struct GlobalsStruct { #define CBSSig ((Sig)0x519CB599) /* SIGnature CBS */ typedef struct CBSStruct { + LandStruct landStruct; /* superclass fields come first */ SplayTreeStruct splayTreeStruct; STATISTIC_DECL(Count treeSize); - Arena arena; - Pool blockPool; - Align alignment; - Bool fastFind; /* maintain and use size property? */ - Bool zoned; /* maintain and use zone property? */ - Bool inCBS; /* prevent reentrance */ + Pool blockPool; /* pool that manages blocks */ + Size blockStructSize; /* size of block structure */ Bool ownPool; /* did we create blockPool? */ + Size size; /* total size of ranges in CBS */ /* meters for sizes of search structures at each op */ METER_DECL(treeSearch); - Sig sig; /* sig at end because embeded */ + Sig sig; /* .class.end-sig */ } CBSStruct; +/* FailoverStruct -- fail over from one land to another + * + * Failover is a Land implementation that combines two other Lands, + * using primary until it fails, and then using secondary. + * + * See . + */ + +#define FailoverSig ((Sig)0x519FA170) /* SIGnature FAILOver */ + +typedef struct FailoverStruct { + LandStruct landStruct; /* superclass fields come first */ + Land primary; /* use this land normally */ + Land secondary; /* but use this one if primary fails */ + Sig sig; /* .class.end-sig */ +} FailoverStruct; + + +/* FreelistStruct -- address-ordered freelist + * + * Freelist is a subclass of Land that maintains a collection of + * disjoint ranges in an address-ordered freelist. + * + * See . + */ + +#define FreelistSig ((Sig)0x519F6331) /* SIGnature FREEL */ + +typedef union FreelistBlockUnion *FreelistBlock; + +typedef struct FreelistStruct { + LandStruct landStruct; /* superclass fields come first */ + FreelistBlock list; /* first block in list or NULL if empty */ + Count listSize; /* number of blocks in list */ + Size size; /* total size of ranges in list */ + Sig sig; /* .class.end-sig */ +} FreelistStruct; + + /* ArenaStruct -- generic arena * * See . */ @@ -661,9 +744,9 @@ typedef struct mps_arena_s { Serial chunkSerial; /* next chunk number */ ChunkCacheEntryStruct chunkCache; /* just one entry */ - Bool hasFreeCBS; /* Is freeCBS available? */ + Bool hasFreeLand; /* Is freeLand available? */ MFSStruct freeCBSBlockPoolStruct; - CBSStruct freeCBSStruct; + CBSStruct freeLandStruct; ZoneSet freeZones; /* zones not yet allocated */ Bool zoned; /* use zoned allocation? */ diff --git a/mps/code/mpmtypes.h b/mps/code/mpmtypes.h index 5b871a73c5b..cc3b756c989 100644 --- a/mps/code/mpmtypes.h +++ b/mps/code/mpmtypes.h @@ -33,6 +33,7 @@ typedef void (*Fun)(void); /* */ typedef MPS_T_WORD Word; /* */ typedef unsigned char Byte; /* */ typedef struct AddrStruct *Addr; /* */ +typedef const struct AddrStruct *ReadonlyAddr; /* */ typedef Word Size; /* */ typedef Word Count; /* */ typedef Word Index; /* */ @@ -76,7 +77,6 @@ typedef struct LockStruct *Lock; /* * */ typedef struct mps_pool_s *Pool; /* */ typedef struct mps_class_s *PoolClass; /* */ typedef PoolClass AbstractPoolClass; /* */ -typedef PoolClass AbstractAllocFreePoolClass; /* */ typedef PoolClass AbstractBufferPoolClass; /* */ typedef PoolClass AbstractSegBufPoolClass; /* */ typedef PoolClass AbstractScanPoolClass; /* */ @@ -108,7 +108,10 @@ typedef struct AllocPatternStruct *AllocPattern; typedef struct AllocFrameStruct *AllocFrame; /* */ typedef struct ReservoirStruct *Reservoir; /* */ typedef struct StackContextStruct *StackContext; -typedef unsigned FindDelete; /* */ +typedef struct RangeStruct *Range; /* */ +typedef struct LandStruct *Land; /* */ +typedef struct LandClassStruct *LandClass; /* */ +typedef unsigned FindDelete; /* */ /* Arena*Method -- see */ @@ -261,6 +264,22 @@ typedef struct TraceStartMessageStruct *TraceStartMessage; typedef struct TraceMessageStruct *TraceMessage; /* trace end */ +/* Land*Method -- see */ + +typedef Res (*LandInitMethod)(Land land, ArgList args); +typedef void (*LandFinishMethod)(Land land); +typedef Size (*LandSizeMethod)(Land land); +typedef Res (*LandInsertMethod)(Range rangeReturn, Land land, Range range); +typedef Res (*LandDeleteMethod)(Range rangeReturn, Land land, Range range); +typedef Bool (*LandVisitor)(Land land, Range range, void *closureP, Size closureS); +typedef Bool (*LandDeleteVisitor)(Bool *deleteReturn, Land land, Range range, void *closureP, Size closureS); +typedef Bool (*LandIterateMethod)(Land land, LandVisitor visitor, void *closureP, Size closureS); +typedef Bool (*LandIterateAndDeleteMethod)(Land land, LandDeleteVisitor visitor, void *closureP, Size closureS); +typedef Bool (*LandFindMethod)(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete); +typedef Res (*LandFindInZonesMethod)(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high); +typedef Res (*LandDescribeMethod)(Land land, mps_lib_FILE *stream, Count depth); + + /* CONSTANTS */ @@ -281,22 +300,9 @@ typedef struct TraceMessageStruct *TraceMessage; /* trace end */ #define RankSetEMPTY BS_EMPTY(RankSet) #define RankSetUNIV ((RankSet)((1u << RankLIMIT) - 1)) #define AttrFMT ((Attr)(1<<0)) /* */ -#define AttrSCAN ((Attr)(1<<1)) -#define AttrPM_NO_READ ((Attr)(1<<2)) -#define AttrPM_NO_WRITE ((Attr)(1<<3)) -#define AttrALLOC ((Attr)(1<<4)) -#define AttrFREE ((Attr)(1<<5)) -#define AttrBUF ((Attr)(1<<6)) -#define AttrBUF_RESERVE ((Attr)(1<<7)) -#define AttrBUF_ALLOC ((Attr)(1<<8)) -#define AttrGC ((Attr)(1<<9)) -#define AttrINCR_RB ((Attr)(1<<10)) -#define AttrINCR_WB ((Attr)(1<<11)) -#define AttrMOVINGGC ((Attr)(1<<12)) -#define AttrMASK (AttrFMT | AttrSCAN | AttrPM_NO_READ | \ - AttrPM_NO_WRITE | AttrALLOC | AttrFREE | \ - AttrBUF | AttrBUF_RESERVE | AttrBUF_ALLOC | \ - AttrGC | AttrINCR_RB | AttrINCR_WB | AttrMOVINGGC) +#define AttrGC ((Attr)(1<<1)) +#define AttrMOVINGGC ((Attr)(1<<2)) +#define AttrMASK (AttrFMT | AttrGC | AttrMOVINGGC) /* Segment preferences */ @@ -407,7 +413,7 @@ enum { }; -/* FindDelete operations -- see and */ +/* FindDelete operations -- see */ enum { FindDeleteNONE = 1, /* don't delete after finding */ diff --git a/mps/code/mps.c b/mps/code/mps.c index f8c886ac997..115996ba7a0 100644 --- a/mps/code/mps.c +++ b/mps/code/mps.c @@ -76,6 +76,8 @@ #include "freelist.c" #include "sa.c" #include "nailboard.c" +#include "land.c" +#include "failover.c" /* Additional pool classes */ @@ -85,20 +87,31 @@ #include "poolawl.c" #include "poollo.c" #include "poolsnc.c" -#include "pooln.c" #include "poolmv2.c" #include "poolmvff.c" /* ANSI Plinth */ -#if !defined(PLINTH_NONE) /* see CONFIG_PLINTH_NONE in config.h */ +#if defined(PLINTH) /* see CONFIG_PLINTH_NONE in config.h */ #include "mpsliban.c" #include "mpsioan.c" #endif +/* Generic ("ANSI") platform */ + +#if defined(PLATFORM_ANSI) + +#include "lockan.c" /* generic locks */ +#include "than.c" /* generic threads manager */ +#include "vman.c" /* malloc-based pseudo memory mapping */ +#include "protan.c" /* generic memory protection */ +#include "prmcan.c" /* generic protection mutator context */ +#include "span.c" /* generic stack probe */ +#include "ssan.c" /* generic stack scanner */ + /* Mac OS X on 32-bit Intel built with Clang or GCC */ -#if defined(MPS_PF_XCI3LL) || defined(MPS_PF_XCI3GC) +#elif defined(MPS_PF_XCI3LL) || defined(MPS_PF_XCI3GC) #include "lockix.c" /* Posix locks */ #include "thxc.c" /* OS X Mach threading */ diff --git a/mps/code/mps.h b/mps/code/mps.h index 18767cec60f..050696e19d0 100644 --- a/mps/code/mps.h +++ b/mps/code/mps.h @@ -13,7 +13,7 @@ * `MPS_` or `_mps_` and may use any identifiers with these prefixes in * future. * - * .naming.internal: Any idenfitier beginning with underscore is for + * .naming.internal: Any identifier beginning with an underscore is for * internal use within the interface and may change or be withdrawn without * warning. * @@ -188,9 +188,6 @@ extern const struct mps_key_s _mps_key_max_size; extern const struct mps_key_s _mps_key_align; #define MPS_KEY_ALIGN (&_mps_key_align) #define MPS_KEY_ALIGN_FIELD align -extern const struct mps_key_s _mps_key_cbs_extend_by; -#define MPS_KEY_CBS_EXTEND_BY (&_mps_key_cbs_extend_by) -#define MPS_KEY_CBS_EXTEND_BY_FIELD size extern const struct mps_key_s _mps_key_interior; #define MPS_KEY_INTERIOR (&_mps_key_interior) #define MPS_KEY_INTERIOR_FIELD b @@ -326,9 +323,9 @@ typedef struct _mps_sac_s { /* .sacc: Keep in sync with . */ typedef struct mps_sac_class_s { - size_t _block_size; - size_t _cached_count; - unsigned _frequency; + size_t mps_block_size; + size_t mps_cached_count; + unsigned mps_frequency; } mps_sac_class_s; #define mps_sac_classes_s mps_sac_class_s diff --git a/mps/code/mps.xcodeproj/project.pbxproj b/mps/code/mps.xcodeproj/project.pbxproj index 29e47139571..39a2d76f8ea 100644 --- a/mps/code/mps.xcodeproj/project.pbxproj +++ b/mps/code/mps.xcodeproj/project.pbxproj @@ -7,6 +7,54 @@ objects = { /* Begin PBXAggregateTarget section */ + 2215A9A9192A47BB00E9E2CE /* testci */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 2215A9AD192A47BB00E9E2CE /* Build configuration list for PBXAggregateTarget "testci" */; + buildPhases = ( + 2215A9AC192A47BB00E9E2CE /* ShellScript */, + ); + dependencies = ( + 2215A9AA192A47BB00E9E2CE /* PBXTargetDependency */, + ); + name = testci; + productName = testrun; + }; + 2215A9B1192A47C500E9E2CE /* testansi */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 2215A9B5192A47C500E9E2CE /* Build configuration list for PBXAggregateTarget "testansi" */; + buildPhases = ( + 2215A9B4192A47C500E9E2CE /* ShellScript */, + ); + dependencies = ( + 2215A9B2192A47C500E9E2CE /* PBXTargetDependency */, + ); + name = testansi; + productName = testrun; + }; + 2215A9B9192A47CE00E9E2CE /* testall */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 2215A9BD192A47CE00E9E2CE /* Build configuration list for PBXAggregateTarget "testall" */; + buildPhases = ( + 2215A9BC192A47CE00E9E2CE /* ShellScript */, + ); + dependencies = ( + 2215A9BA192A47CE00E9E2CE /* PBXTargetDependency */, + ); + name = testall; + productName = testrun; + }; + 2215A9C1192A47D500E9E2CE /* testpoll */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 2215A9C5192A47D500E9E2CE /* Build configuration list for PBXAggregateTarget "testpoll" */; + buildPhases = ( + 2215A9C4192A47D500E9E2CE /* ShellScript */, + ); + dependencies = ( + 2215A9C2192A47D500E9E2CE /* PBXTargetDependency */, + ); + name = testpoll; + productName = testrun; + }; 22CDE8EF16E9E97D00366D0A /* testrun */ = { isa = PBXAggregateTarget; buildConfigurationList = 22CDE8F016E9E97E00366D0A /* Build configuration list for PBXAggregateTarget "testrun" */; @@ -43,11 +91,11 @@ 22B2BC3D18B643B300C33E63 /* PBXTargetDependency */, 2291A5E6175CB207001D4920 /* PBXTargetDependency */, 2291A5E8175CB20E001D4920 /* PBXTargetDependency */, - 3114A65B156E95B4001E0AA3 /* PBXTargetDependency */, 3114A5CC156E932C001E0AA3 /* PBXTargetDependency */, 3114A5EA156E93C4001E0AA3 /* PBXTargetDependency */, 224CC79D175E187C002FF81B /* PBXTargetDependency */, 22B2BC3F18B643B700C33E63 /* PBXTargetDependency */, + 3114A65B156E95B4001E0AA3 /* PBXTargetDependency */, 2231BB6D18CA986B002D6322 /* PBXTargetDependency */, 31D60034156D3D5A00337B26 /* PBXTargetDependency */, 2286E4C918F4389E004111E2 /* PBXTargetDependency */, @@ -79,6 +127,7 @@ /* End PBXAggregateTarget section */ /* Begin PBXBuildFile section */ + 2215A9C9192A495F00E9E2CE /* pooln.c in Sources */ = {isa = PBXBuildFile; fileRef = 22FACEDE18880933000FDBC1 /* pooln.c */; }; 2231BB5118CA97D8002D6322 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; 2231BB5318CA97D8002D6322 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; 2231BB5F18CA97DC002D6322 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; @@ -112,7 +161,7 @@ 2291A5DB175CB05F001D4920 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; 2291A5DD175CB05F001D4920 /* libmps.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 31EEABFB156AAF9D00714D05 /* libmps.a */; }; 2291A5E4175CB076001D4920 /* exposet0.c in Sources */ = {isa = PBXBuildFile; fileRef = 2291A5AA175CAA9B001D4920 /* exposet0.c */; }; - 2291A5ED175CB5E2001D4920 /* fbmtest.c in Sources */ = {isa = PBXBuildFile; fileRef = 2291A5E9175CB4EC001D4920 /* fbmtest.c */; }; + 2291A5ED175CB5E2001D4920 /* landtest.c in Sources */ = {isa = PBXBuildFile; fileRef = 2291A5E9175CB4EC001D4920 /* landtest.c */; }; 22B2BC2E18B6434F00C33E63 /* mps.c in Sources */ = {isa = PBXBuildFile; fileRef = 31A47BA3156C1E130039B1C2 /* mps.c */; }; 22B2BC3718B6437C00C33E63 /* scheme-advanced.c in Sources */ = {isa = PBXBuildFile; fileRef = 22B2BC2B18B6434000C33E63 /* scheme-advanced.c */; }; 22C2ACA718BE400A006B3677 /* testlib.c in Sources */ = {isa = PBXBuildFile; fileRef = 31EEAC9E156AB73400714D05 /* testlib.c */; }; @@ -287,6 +336,34 @@ /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ + 2215A9AB192A47BB00E9E2CE /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFF1156D37A0000A585A; + remoteInfo = all; + }; + 2215A9B3192A47C500E9E2CE /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFF1156D37A0000A585A; + remoteInfo = all; + }; + 2215A9BB192A47CE00E9E2CE /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFF1156D37A0000A585A; + remoteInfo = all; + }; + 2215A9C3192A47D500E9E2CE /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3104AFF1156D37A0000A585A; + remoteInfo = all; + }; 2231BB4E18CA97D8002D6322 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; @@ -719,7 +796,7 @@ containerPortal = 31EEABDA156AAE9E00714D05 /* Project object */; proxyType = 1; remoteGlobalIDString = 3114A64B156E9596001E0AA3; - remoteInfo = fbmtest; + remoteInfo = landtest; }; 3114A674156E9619001E0AA3 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; @@ -1338,7 +1415,7 @@ 2291A5BD175CAB2F001D4920 /* awlutth */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = awlutth; sourceTree = BUILT_PRODUCTS_DIR; }; 2291A5D1175CAFCA001D4920 /* expt825 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = expt825; sourceTree = BUILT_PRODUCTS_DIR; }; 2291A5E3175CB05F001D4920 /* exposet0 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = exposet0; sourceTree = BUILT_PRODUCTS_DIR; }; - 2291A5E9175CB4EC001D4920 /* fbmtest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = fbmtest.c; sourceTree = ""; }; + 2291A5E9175CB4EC001D4920 /* landtest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = landtest.c; sourceTree = ""; }; 2291A5EA175CB503001D4920 /* abq.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = abq.h; sourceTree = ""; }; 2291A5EB175CB53E001D4920 /* range.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = range.c; sourceTree = ""; }; 2291A5EC175CB53E001D4920 /* range.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = range.h; sourceTree = ""; }; @@ -1353,6 +1430,11 @@ 22E30E831886FF1400D98EA9 /* nailboard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nailboard.h; sourceTree = ""; }; 22F846AF18F4379C00982BA7 /* lockut.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = lockut.c; sourceTree = ""; }; 22F846BD18F437B900982BA7 /* lockut */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = lockut; sourceTree = BUILT_PRODUCTS_DIR; }; + 22C5C99A18EC6AEC004C63D4 /* failover.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = failover.c; sourceTree = ""; }; + 22C5C99B18EC6AEC004C63D4 /* failover.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = failover.h; sourceTree = ""; }; + 22C5C99C18EC6AEC004C63D4 /* land.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = land.c; sourceTree = ""; }; + 22DD93E118ED815F00240DD2 /* failover.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = failover.txt; path = ../design/failover.txt; sourceTree = ""; }; + 22DD93E218ED815F00240DD2 /* land.txt */ = {isa = PBXFileReference; lastKnownFileType = text; name = land.txt; path = ../design/land.txt; sourceTree = ""; }; 22FA177516E8D6FC0098B23F /* amcssth */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = amcssth; sourceTree = BUILT_PRODUCTS_DIR; }; 22FA177616E8D7A80098B23F /* amcssth.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = amcssth.c; sourceTree = ""; }; 22FACED1188807FF000FDBC1 /* airtest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = airtest.c; sourceTree = ""; }; @@ -1408,7 +1490,7 @@ 3114A633156E94DB001E0AA3 /* abqtest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = abqtest; sourceTree = BUILT_PRODUCTS_DIR; }; 3114A63D156E94EA001E0AA3 /* abqtest.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = abqtest.c; sourceTree = ""; }; 3114A645156E9525001E0AA3 /* abq.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = abq.c; sourceTree = ""; }; - 3114A64C156E9596001E0AA3 /* fbmtest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = fbmtest; sourceTree = BUILT_PRODUCTS_DIR; }; + 3114A64C156E9596001E0AA3 /* landtest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = landtest; sourceTree = BUILT_PRODUCTS_DIR; }; 3114A662156E95D9001E0AA3 /* btcv */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = btcv; sourceTree = BUILT_PRODUCTS_DIR; }; 3114A66C156E95EB001E0AA3 /* btcv.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = btcv.c; sourceTree = ""; }; 3114A67C156E9668001E0AA3 /* mv2test */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = mv2test; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -2060,6 +2142,7 @@ 31160D9C1899540D0071EB17 /* config.txt */, 31160D9D1899540D0071EB17 /* critical-path.txt */, 31160D9E1899540D0071EB17 /* diag.txt */, + 22DD93E118ED815F00240DD2 /* failover.txt */, 31160D9F1899540D0071EB17 /* finalize.txt */, 31160DA01899540D0071EB17 /* fix.txt */, 31160DA11899540D0071EB17 /* freelist.txt */, @@ -2069,6 +2152,7 @@ 31160DA51899540D0071EB17 /* interface-c.txt */, 31160DA61899540D0071EB17 /* io.txt */, 31160DA71899540D0071EB17 /* keyword-arguments.txt */, + 22DD93E218ED815F00240DD2 /* land.txt */, 31160DA81899540D0071EB17 /* lib.txt */, 31160DA91899540D0071EB17 /* lock.txt */, 31160DAA1899540D0071EB17 /* locus.txt */, @@ -2139,7 +2223,6 @@ 3114A613156E944A001E0AA3 /* bttest.c */, 2291A5AA175CAA9B001D4920 /* exposet0.c */, 2291A5AB175CAA9B001D4920 /* expt825.c */, - 2291A5E9175CB4EC001D4920 /* fbmtest.c */, 3114A5CD156E9369001E0AA3 /* finalcv.c */, 3114A5E5156E93B9001E0AA3 /* finaltest.c */, 3124CAC6156BE48D00753214 /* fmtdy.c */, @@ -2153,6 +2236,7 @@ 22FACED6188807FF000FDBC1 /* fmtscheme.c */, 22FACED7188807FF000FDBC1 /* fmtscheme.h */, 224CC79E175E3202002FF81B /* fotest.c */, + 2291A5E9175CB4EC001D4920 /* landtest.c */, 2231BB6818CA9834002D6322 /* locbwcss.c */, 31D60036156D3E0200337B26 /* lockcov.c */, 2231BB6918CA983C002D6322 /* locusss.c */, @@ -2244,7 +2328,7 @@ 3114A605156E9430001E0AA3 /* bttest */, 3114A61C156E9485001E0AA3 /* teletest */, 3114A633156E94DB001E0AA3 /* abqtest */, - 3114A64C156E9596001E0AA3 /* fbmtest */, + 3114A64C156E9596001E0AA3 /* landtest */, 3114A662156E95D9001E0AA3 /* btcv */, 3114A67C156E9668001E0AA3 /* mv2test */, 3114A695156E971B001E0AA3 /* messtest */, @@ -2299,10 +2383,13 @@ 311F2F5917398AE900C15B6A /* eventcom.h */, 311F2F5A17398AE900C15B6A /* eventdef.h */, 311F2F5C17398AE900C15B6A /* eventrep.h */, + 22C5C99A18EC6AEC004C63D4 /* failover.c */, + 22C5C99B18EC6AEC004C63D4 /* failover.h */, 31EEAC1A156AB2B200714D05 /* format.c */, 2291A5EE175CB768001D4920 /* freelist.c */, 2291A5EF175CB768001D4920 /* freelist.h */, 31EEAC07156AB27B00714D05 /* global.c */, + 22C5C99C18EC6AEC004C63D4 /* land.c */, 31EEAC2B156AB2F200714D05 /* ld.c */, 311F2F5E17398B0E00C15B6A /* lock.h */, 31EEAC08156AB27B00714D05 /* locus.c */, @@ -2934,9 +3021,9 @@ productReference = 3114A633156E94DB001E0AA3 /* abqtest */; productType = "com.apple.product-type.tool"; }; - 3114A64B156E9596001E0AA3 /* fbmtest */ = { + 3114A64B156E9596001E0AA3 /* landtest */ = { isa = PBXNativeTarget; - buildConfigurationList = 3114A653156E9596001E0AA3 /* Build configuration list for PBXNativeTarget "fbmtest" */; + buildConfigurationList = 3114A653156E9596001E0AA3 /* Build configuration list for PBXNativeTarget "landtest" */; buildPhases = ( 3114A648156E9596001E0AA3 /* Sources */, 3114A649156E9596001E0AA3 /* Frameworks */, @@ -2947,9 +3034,9 @@ dependencies = ( 3114A659156E95B1001E0AA3 /* PBXTargetDependency */, ); - name = fbmtest; - productName = fbmtest; - productReference = 3114A64C156E9596001E0AA3 /* fbmtest */; + name = landtest; + productName = landtest; + productReference = 3114A64C156E9596001E0AA3 /* landtest */; productType = "com.apple.product-type.tool"; }; 3114A661156E95D9001E0AA3 /* btcv */ = { @@ -3311,6 +3398,10 @@ projectRoot = ""; targets = ( 3104AFF1156D37A0000A585A /* all */, + 2215A9B9192A47CE00E9E2CE /* testall */, + 2215A9B1192A47C500E9E2CE /* testansi */, + 2215A9A9192A47BB00E9E2CE /* testci */, + 2215A9C1192A47D500E9E2CE /* testpoll */, 22CDE8EF16E9E97D00366D0A /* testrun */, 31EEABFA156AAF9D00714D05 /* mps */, 3114A632156E94DB001E0AA3 /* abqtest */, @@ -3330,11 +3421,11 @@ 318DA8C31892B0F30089718C /* djbench */, 2291A5D3175CB05F001D4920 /* exposet0 */, 2291A5C1175CAFCA001D4920 /* expt825 */, - 3114A64B156E9596001E0AA3 /* fbmtest */, 3114A5BC156E9315001E0AA3 /* finalcv */, 3114A5D5156E93A0001E0AA3 /* finaltest */, 224CC78C175E1821002FF81B /* fotest */, 6313D46718A400B200EB03EF /* gcbench */, + 3114A64B156E9596001E0AA3 /* landtest */, 2231BB4C18CA97D8002D6322 /* locbwcss */, 31D60026156D3D3E00337B26 /* lockcov */, 2231BB5A18CA97DC002D6322 /* locusss */, @@ -3364,6 +3455,62 @@ /* End PBXProject section */ /* Begin PBXShellScriptBuildPhase section */ + 2215A9AC192A47BB00E9E2CE /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "../tool/testrun.sh \"$TARGET_BUILD_DIR\" \"$TARGET_NAME\"\n"; + showEnvVarsInLog = 0; + }; + 2215A9B4192A47C500E9E2CE /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "../tool/testrun.sh \"$TARGET_BUILD_DIR\" \"$TARGET_NAME\"\n"; + showEnvVarsInLog = 0; + }; + 2215A9BC192A47CE00E9E2CE /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "../tool/testrun.sh \"$TARGET_BUILD_DIR\" \"$TARGET_NAME\"\n"; + showEnvVarsInLog = 0; + }; + 2215A9C4192A47D500E9E2CE /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "../tool/testrun.sh \"$TARGET_BUILD_DIR\" \"$TARGET_NAME\"\n"; + showEnvVarsInLog = 0; + }; 22CDE8F416E9E9D400366D0A /* ShellScript */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; @@ -3375,7 +3522,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = /bin/sh; - shellScript = "../tool/testrun.sh \"$TARGET_BUILD_DIR\"\n"; + shellScript = "../tool/testrun.sh \"$TARGET_BUILD_DIR\" \"$TARGET_NAME\"\n"; showEnvVarsInLog = 0; }; /* End PBXShellScriptBuildPhase section */ @@ -3664,7 +3811,7 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - 2291A5ED175CB5E2001D4920 /* fbmtest.c in Sources */, + 2291A5ED175CB5E2001D4920 /* landtest.c in Sources */, 3114A672156E95F6001E0AA3 /* testlib.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -3791,8 +3938,9 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - 31D60048156D3ECF00337B26 /* testlib.c in Sources */, + 2215A9C9192A495F00E9E2CE /* pooln.c in Sources */, 31D6004B156D3EE600337B26 /* poolncv.c in Sources */, + 31D60048156D3ECF00337B26 /* testlib.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -3876,6 +4024,26 @@ /* End PBXSourcesBuildPhase section */ /* Begin PBXTargetDependency section */ + 2215A9AA192A47BB00E9E2CE /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFF1156D37A0000A585A /* all */; + targetProxy = 2215A9AB192A47BB00E9E2CE /* PBXContainerItemProxy */; + }; + 2215A9B2192A47C500E9E2CE /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFF1156D37A0000A585A /* all */; + targetProxy = 2215A9B3192A47C500E9E2CE /* PBXContainerItemProxy */; + }; + 2215A9BA192A47CE00E9E2CE /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFF1156D37A0000A585A /* all */; + targetProxy = 2215A9BB192A47CE00E9E2CE /* PBXContainerItemProxy */; + }; + 2215A9C2192A47D500E9E2CE /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3104AFF1156D37A0000A585A /* all */; + targetProxy = 2215A9C3192A47D500E9E2CE /* PBXContainerItemProxy */; + }; 2231BB4D18CA97D8002D6322 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = 31EEABFA156AAF9D00714D05 /* mps */; @@ -4183,7 +4351,7 @@ }; 3114A65B156E95B4001E0AA3 /* PBXTargetDependency */ = { isa = PBXTargetDependency; - target = 3114A64B156E9596001E0AA3 /* fbmtest */; + target = 3114A64B156E9596001E0AA3 /* landtest */; targetProxy = 3114A65A156E95B4001E0AA3 /* PBXContainerItemProxy */; }; 3114A675156E9619001E0AA3 /* PBXTargetDependency */ = { @@ -4319,6 +4487,90 @@ /* End PBXTargetDependency section */ /* Begin XCBuildConfiguration section */ + 2215A9AE192A47BB00E9E2CE /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testrun copy"; + }; + name = Debug; + }; + 2215A9AF192A47BB00E9E2CE /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testrun copy"; + }; + name = Release; + }; + 2215A9B0192A47BB00E9E2CE /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testrun copy"; + }; + name = RASH; + }; + 2215A9B6192A47C500E9E2CE /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testci copy"; + }; + name = Debug; + }; + 2215A9B7192A47C500E9E2CE /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testci copy"; + }; + name = Release; + }; + 2215A9B8192A47C500E9E2CE /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testci copy"; + }; + name = RASH; + }; + 2215A9BE192A47CE00E9E2CE /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testansi copy"; + }; + name = Debug; + }; + 2215A9BF192A47CE00E9E2CE /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testansi copy"; + }; + name = Release; + }; + 2215A9C0192A47CE00E9E2CE /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testansi copy"; + }; + name = RASH; + }; + 2215A9C6192A47D500E9E2CE /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testall copy"; + }; + name = Debug; + }; + 2215A9C7192A47D500E9E2CE /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testall copy"; + }; + name = Release; + }; + 2215A9C8192A47D500E9E2CE /* RASH */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "testall copy"; + }; + name = RASH; + }; 2231BB5618CA97D8002D6322 /* Debug */ = { isa = XCBuildConfiguration; buildSettings = { @@ -5532,6 +5784,46 @@ /* End XCBuildConfiguration section */ /* Begin XCConfigurationList section */ + 2215A9AD192A47BB00E9E2CE /* Build configuration list for PBXAggregateTarget "testci" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2215A9AE192A47BB00E9E2CE /* Debug */, + 2215A9AF192A47BB00E9E2CE /* Release */, + 2215A9B0192A47BB00E9E2CE /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2215A9B5192A47C500E9E2CE /* Build configuration list for PBXAggregateTarget "testansi" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2215A9B6192A47C500E9E2CE /* Debug */, + 2215A9B7192A47C500E9E2CE /* Release */, + 2215A9B8192A47C500E9E2CE /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2215A9BD192A47CE00E9E2CE /* Build configuration list for PBXAggregateTarget "testall" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2215A9BE192A47CE00E9E2CE /* Debug */, + 2215A9BF192A47CE00E9E2CE /* Release */, + 2215A9C0192A47CE00E9E2CE /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 2215A9C5192A47D500E9E2CE /* Build configuration list for PBXAggregateTarget "testpoll" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 2215A9C6192A47D500E9E2CE /* Debug */, + 2215A9C7192A47D500E9E2CE /* Release */, + 2215A9C8192A47D500E9E2CE /* RASH */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; 2231BB5518CA97D8002D6322 /* Build configuration list for PBXNativeTarget "locbwcss" */ = { isa = XCConfigurationList; buildConfigurations = ( @@ -5822,7 +6114,7 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - 3114A653156E9596001E0AA3 /* Build configuration list for PBXNativeTarget "fbmtest" */ = { + 3114A653156E9596001E0AA3 /* Build configuration list for PBXNativeTarget "landtest" */ = { isa = XCConfigurationList; buildConfigurations = ( 3114A654156E9596001E0AA3 /* Debug */, diff --git a/mps/code/mpscmv2.h b/mps/code/mpscmv2.h index 8490b8f311a..8586a639901 100644 --- a/mps/code/mpscmv2.h +++ b/mps/code/mpscmv2.h @@ -9,22 +9,6 @@ #include "mps.h" -/* The mvt pool class has five extra parameters to mps_pool_create: - * mps_res_t mps_pool_create(mps_pool_t * pool, mps_arena_t arena, - * mps_class_t mvt_class, - * size_t minimum_size, - * size_t mean_size, - * size_t maximum_size, - * mps_count_t reserve_depth - * mps_count_t fragmentation_limit); - * minimum_, mean_, and maximum_size are the mimimum, mean, and - * maximum (typical) size of objects expected to be allocated in the - * pool. reserve_depth is a measure of the expected hysteresis of the - * object population. fragmentation_limit is a percentage (between 0 - * and 100): if the free space managed by the pool exceeds the - * specified percentage, the pool will resort to a "first fit" - * allocation policy. - */ extern mps_class_t mps_class_mvt(void); /* The mvt pool class supports two extensions to the pool protocol: diff --git a/mps/code/mpsicv.c b/mps/code/mpsicv.c index 55396aee3fe..6a241561a22 100644 --- a/mps/code/mpsicv.c +++ b/mps/code/mpsicv.c @@ -21,7 +21,7 @@ #define exactRootsCOUNT 49 #define ambigRootsCOUNT 49 -#define OBJECTS 200000 +#define OBJECTS 100000 #define patternFREQ 100 /* objNULL needs to be odd so that it's ignored in exactRoots. */ @@ -552,6 +552,8 @@ static void *test(void *arg, size_t s) mps_free(mv, alloced_obj, 32); alloc_v_test(mv); + + mps_arena_park(arena); mps_pool_destroy(mv); mps_ap_destroy(ap); mps_root_destroy(fmtRoot); @@ -589,7 +591,6 @@ int main(int argc, char *argv[]) marker, (size_t)0), "root_create_reg"); - (mps_tramp)(&r, test, arena, 0); /* non-inlined trampoline */ mps_tramp(&r, test, arena, 0); mps_root_destroy(reg_root); mps_thread_dereg(thread); diff --git a/mps/code/mpsliban.c b/mps/code/mpsliban.c index 75a3d48d518..5e7cfdddc8b 100644 --- a/mps/code/mpsliban.c +++ b/mps/code/mpsliban.c @@ -61,13 +61,19 @@ int mps_lib_fputs(const char *s, mps_lib_FILE *stream) } -static void mps_lib_assert_fail_default(const char *file, - unsigned line, +static void mps_lib_assert_fail_default(const char *file, unsigned line, const char *condition) { - (void)fflush(stdout); /* synchronize */ - (void)fprintf(stderr, "%s:%u: MPS ASSERTION FAILED: %s\n", file, line, condition); - (void)fflush(stderr); /* make sure the message is output */ + /* Synchronize with stdout. */ + (void)fflush(stdout); + (void)fprintf(stderr, + "The MPS detected a problem!\n" + "%s:%u: MPS ASSERTION FAILED: %s\n" + "See the \"Assertions\" section in the reference manual:\n" + "http://ravenbrook.com/project/mps/master/manual/html/topic/error.html#assertions\n", + file, line, condition); + /* Ensure the message is output even if stderr is buffered. */ + (void)fflush(stderr); ASSERT_ABORT(); /* see config.h */ } diff --git a/mps/code/mv2test.c b/mps/code/mv2test.c index ce711b013d4..a3c5e807a04 100644 --- a/mps/code/mv2test.c +++ b/mps/code/mv2test.c @@ -68,11 +68,11 @@ static size_t randomSize(unsigned long i) #define TEST_SET_SIZE 1234 #define TEST_LOOPS 27 -static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size) +static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size, mps_align_t align) { mps_res_t res; - size = alignUp(size, MPS_PF_ALIGN); + size = alignUp(size, align); do { MPS_RESERVE_BLOCK(res, *p, ap, size); @@ -84,8 +84,9 @@ static mps_res_t make(mps_addr_t *p, mps_ap_t ap, size_t size) } -static mps_res_t stress(mps_class_t class, mps_arena_t arena, - size_t (*size)(unsigned long i), mps_arg_s args[]) +static mps_res_t stress(mps_arena_t arena, mps_align_t align, + size_t (*size)(unsigned long i), + mps_class_t class, mps_arg_s args[]) { mps_res_t res; mps_ap_t ap; @@ -102,7 +103,7 @@ static mps_res_t stress(mps_class_t class, mps_arena_t arena, for(i=0; i 0); AVERT(Bool, withReservoirPermit); @@ -315,7 +314,6 @@ Res PoolAlloc(Addr *pReturn, Pool pool, Size size, void PoolFree(Pool pool, Addr old, Size size) { AVERT(Pool, pool); - AVER(PoolHasAttr(pool, AttrFREE)); AVER(old != NULL); /* The pool methods should check that old is in pool. */ AVER(size > 0); @@ -380,7 +378,6 @@ Res PoolScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg) AVER(totalReturn != NULL); AVERT(ScanState, ss); AVERT(Pool, pool); - AVER(PoolHasAttr(pool, AttrSCAN)); AVERT(Seg, seg); AVER(ss->arena == pool->arena); diff --git a/mps/code/poolabs.c b/mps/code/poolabs.c index cc0eb6e838c..0d47e5c19f9 100644 --- a/mps/code/poolabs.c +++ b/mps/code/poolabs.c @@ -18,7 +18,6 @@ * * .hierarchy: define the following hierarchy of abstract pool classes: * AbstractPoolClass - implements init, finish, describe - * AbstractAllocFreePoolClass - implements alloc & free * AbstractBufferPoolClass - implements the buffer protocol * AbstractSegBufPoolClass - uses SegBuf buffer class * AbstractScanPoolClass - implements basic scanning @@ -31,7 +30,6 @@ SRCID(poolabs, "$Id$"); typedef PoolClassStruct AbstractPoolClassStruct; -typedef PoolClassStruct AbstractAllocFreePoolClassStruct; typedef PoolClassStruct AbstractBufferPoolClassStruct; typedef PoolClassStruct AbstractSegBufPoolClassStruct; typedef PoolClassStruct AbstractScanPoolClassStruct; @@ -49,23 +47,11 @@ typedef PoolClassStruct AbstractCollectPoolClassStruct; */ -/* PoolClassMixInAllocFree -- mix in the protocol for Alloc / Free */ - -void PoolClassMixInAllocFree(PoolClass class) -{ - /* Can't check class because it's not initialized yet */ - class->attr |= (AttrALLOC | AttrFREE); - class->alloc = PoolTrivAlloc; - class->free = PoolTrivFree; -} - - /* PoolClassMixInBuffer -- mix in the protocol for buffer reserve / commit */ void PoolClassMixInBuffer(PoolClass class) { /* Can't check class because it's not initialized yet */ - class->attr |= AttrBUF; class->bufferFill = PoolTrivBufferFill; class->bufferEmpty = PoolTrivBufferEmpty; /* By default, buffered pools treat frame operations as NOOPs */ @@ -81,7 +67,6 @@ void PoolClassMixInBuffer(PoolClass class) void PoolClassMixInScan(PoolClass class) { /* Can't check class because it's not initialized yet */ - class->attr |= AttrSCAN; class->access = PoolSegAccess; class->blacken = PoolTrivBlacken; class->grey = PoolTrivGrey; @@ -164,12 +149,6 @@ DEFINE_CLASS(AbstractPoolClass, class) class->sig = PoolClassSig; } -DEFINE_CLASS(AbstractAllocFreePoolClass, class) -{ - INHERIT_CLASS(class, AbstractPoolClass); - PoolClassMixInAllocFree(class); -} - DEFINE_CLASS(AbstractBufferPoolClass, class) { INHERIT_CLASS(class, AbstractPoolClass); diff --git a/mps/code/poolamc.c b/mps/code/poolamc.c index b19fcde45ae..890454ebbb8 100644 --- a/mps/code/poolamc.c +++ b/mps/code/poolamc.c @@ -45,7 +45,6 @@ typedef struct amcGenStruct { PoolGenStruct pgen; RingStruct amcRing; /* link in list of gens in pool */ Buffer forward; /* forwarding buffer */ - Count segs; /* number of segs in gen */ Sig sig; /* */ } amcGenStruct; @@ -72,12 +71,19 @@ enum { /* amcSegStruct -- AMC-specific fields appended to GCSegStruct * - * .seg-ramp-new: The "new" flag is usually true, and indicates that the - * segment has been counted towards the pool generation's newSize. It is - * set to FALSE otherwise. This is used by both ramping and hash array - * allocations. TODO: The code for this is scrappy and needs refactoring, - * and the *reasons* for setting these flags need properly documenting. - * RB 2013-07-17 + * .seq.old: The "old" flag is FALSE if the segment has never been + * collected, and so its size is accounted against the pool + * generation's newSize; it is TRUE if the segment has been collected + * at least once, and so its size is accounted against the pool + * generation's oldSize. + * + * .seg.deferred: The "deferred" flag is TRUE if its size accounting + * in the pool generation has been deferred. This is set if the + * segment was created in ramping mode (and so we don't want it to + * contribute to the pool generation's newSize and so provoke a + * collection via TracePoll), and by hash array allocations (where we + * don't want the allocation to provoke a collection that makes the + * location dependency stale immediately). */ typedef struct amcSegStruct *amcSeg; @@ -88,7 +94,8 @@ typedef struct amcSegStruct { GCSegStruct gcSegStruct; /* superclass fields must come first */ amcGen gen; /* generation this segment belongs to */ Nailboard board; /* nailboard for this segment or NULL if none */ - Bool new; /* .seg-ramp-new */ + BOOLFIELD(old); /* .seg.old */ + BOOLFIELD(deferred); /* .seg.deferred */ Sig sig; /* */ } amcSegStruct; @@ -106,7 +113,8 @@ static Bool amcSegCheck(amcSeg amcseg) CHECKD(Nailboard, amcseg->board); CHECKL(SegNailed(amcSeg2Seg(amcseg)) != TraceSetEMPTY); } - CHECKL(BoolCheck(amcseg->new)); + /* CHECKL(BoolCheck(amcseg->old)); */ + /* CHECKL(BoolCheck(amcseg->deferred)); */ return TRUE; } @@ -141,7 +149,8 @@ static Res AMCSegInit(Seg seg, Pool pool, Addr base, Size size, amcseg->gen = amcgen; amcseg->board = NULL; - amcseg->new = TRUE; + amcseg->old = FALSE; + amcseg->deferred = FALSE; amcseg->sig = amcSegSig; AVERT(amcSeg, amcseg); @@ -455,7 +464,6 @@ typedef struct AMCStruct { /* */ RankSet rankSet; /* rankSet for entire pool */ RingStruct genRing; /* ring of generations */ Bool gensBooted; /* used during boot (init) */ - Chain chain; /* chain used by this pool */ size_t gens; /* number of generations */ amcGen *gen; /* (pointer to) array of generations */ amcGen nursery; /* the default mutator generation */ @@ -480,7 +488,6 @@ typedef struct AMCStruct { /* */ ATTRIBUTE_UNUSED static Bool amcGenCheck(amcGen gen) { - Arena arena; AMC amc; CHECKS(amcGen, gen); @@ -489,9 +496,7 @@ static Bool amcGenCheck(amcGen gen) CHECKU(AMC, amc); CHECKD(Buffer, gen->forward); CHECKD_NOSIG(Ring, &gen->amcRing); - CHECKL((gen->pgen.totalSize == 0) == (gen->segs == 0)); - arena = amc->poolStruct.arena; - CHECKL(gen->pgen.totalSize >= gen->segs * ArenaAlign(arena)); + return TRUE; } @@ -643,12 +648,12 @@ DEFINE_BUFFER_CLASS(amcBufClass, class) /* amcGenCreate -- create a generation */ -static Res amcGenCreate(amcGen *genReturn, AMC amc, Serial genNr) +static Res amcGenCreate(amcGen *genReturn, AMC amc, GenDesc gen) { Arena arena; Buffer buffer; Pool pool; - amcGen gen; + amcGen amcgen; Res res; void *p; @@ -658,25 +663,24 @@ static Res amcGenCreate(amcGen *genReturn, AMC amc, Serial genNr) res = ControlAlloc(&p, arena, sizeof(amcGenStruct), FALSE); if(res != ResOK) goto failControlAlloc; - gen = (amcGen)p; + amcgen = (amcGen)p; res = BufferCreate(&buffer, EnsureamcBufClass(), pool, FALSE, argsNone); if(res != ResOK) goto failBufferCreate; - res = PoolGenInit(&gen->pgen, amc->chain, genNr, pool); + res = PoolGenInit(&amcgen->pgen, gen, pool); if(res != ResOK) goto failGenInit; - RingInit(&gen->amcRing); - gen->segs = 0; - gen->forward = buffer; - gen->sig = amcGenSig; + RingInit(&amcgen->amcRing); + amcgen->forward = buffer; + amcgen->sig = amcGenSig; - AVERT(amcGen, gen); + AVERT(amcGen, amcgen); - RingAppend(&amc->genRing, &gen->amcRing); - EVENT2(AMCGenCreate, amc, gen); - *genReturn = gen; + RingAppend(&amc->genRing, &amcgen->amcRing); + EVENT2(AMCGenCreate, amc, amcgen); + *genReturn = amcgen; return ResOK; failGenInit: @@ -695,8 +699,6 @@ static void amcGenDestroy(amcGen gen) Arena arena; AVERT(amcGen, gen); - AVER(gen->segs == 0); - AVER(gen->pgen.totalSize == 0); EVENT1(AMCGenDestroy, gen); arena = PoolArena(amcGenPool(gen)); @@ -717,16 +719,20 @@ static Res amcGenDescribe(amcGen gen, mps_lib_FILE *stream, Count depth) if(!TESTT(amcGen, gen)) return ResFAIL; + if (stream == NULL) + return ResFAIL; res = WriteF(stream, depth, - "amcGen $P ($U) {\n", - (WriteFP)gen, (WriteFU)amcGenNr(gen), - " buffer $P\n", gen->forward, - " segs $U, totalSize $U, newSize $U\n", - (WriteFU)gen->segs, - (WriteFU)gen->pgen.totalSize, - (WriteFU)gen->pgen.newSize, - "} amcGen $P\n", (WriteFP)gen, NULL); + "amcGen $P {\n", (WriteFP)gen, + " buffer $P\n", gen->forward, NULL); + if (res != ResOK) + return res; + + res = PoolGenDescribe(&gen->pgen, stream, depth + 2); + if (res != ResOK) + return res; + + res = WriteF(stream, depth, "} amcGen $P\n", (WriteFP)gen, NULL); return res; } @@ -802,6 +808,7 @@ static Res amcInitComm(Pool pool, RankSet rankSet, ArgList args) size_t genArraySize; size_t genCount; Bool interior = AMC_INTERIOR_DEFAULT; + Chain chain; ArgStruct arg; /* Suppress a warning about this structure not being used when there @@ -822,14 +829,14 @@ static Res amcInitComm(Pool pool, RankSet rankSet, ArgList args) ArgRequire(&arg, args, MPS_KEY_FORMAT); pool->format = arg.val.format; if (ArgPick(&arg, args, MPS_KEY_CHAIN)) - amc->chain = arg.val.chain; + chain = arg.val.chain; else - amc->chain = ArenaGlobals(arena)->defaultChain; + chain = ArenaGlobals(arena)->defaultChain; if (ArgPick(&arg, args, MPS_KEY_INTERIOR)) interior = arg.val.b; AVERT(Format, pool->format); - AVERT(Chain, amc->chain); + AVERT(Chain, chain); pool->alignment = pool->format->alignment; amc->rankSet = rankSet; @@ -865,7 +872,7 @@ static Res amcInitComm(Pool pool, RankSet rankSet, ArgList args) AVERT(AMC, amc); /* Init generations. */ - genCount = ChainGens(amc->chain); + genCount = ChainGens(chain); { void *p; @@ -875,11 +882,10 @@ static Res amcInitComm(Pool pool, RankSet rankSet, ArgList args) if(res != ResOK) goto failGensAlloc; amc->gen = p; - for(i = 0; i < genCount + 1; ++i) { - res = amcGenCreate(&amc->gen[i], amc, (Serial)i); - if(res != ResOK) { + for (i = 0; i <= genCount; ++i) { + res = amcGenCreate(&amc->gen[i], amc, ChainGen(chain, i)); + if (res != ResOK) goto failGenAlloc; - } } /* Set up forwarding buffers. */ for(i = 0; i < genCount; ++i) { @@ -945,21 +951,19 @@ static void AMCFinish(Pool pool) RING_FOR(node, &amc->genRing, nextNode) { amcGen gen = RING_ELT(amcGen, amcRing, node); BufferDetach(gen->forward, pool); - /* Maintain invariant < totalSize. */ - gen->pgen.newSize = (Size)0; } ring = PoolSegRing(pool); RING_FOR(node, ring, nextNode) { Seg seg = SegOfPoolRing(node); - Size size; amcGen gen = amcSegGen(seg); - - --gen->segs; - size = SegSize(seg); - gen->pgen.totalSize -= size; - - SegFree(seg); + amcSeg amcseg = Seg2amcSeg(seg); + AVERT(amcSeg, amcseg); + PoolGenFree(&gen->pgen, seg, + 0, + amcseg->old ? SegSize(seg) : 0, + amcseg->old ? 0 : SegSize(seg), + amcseg->deferred); } /* Disassociate forwarding buffers from gens before they are */ @@ -995,7 +999,6 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn, amcGen gen; PoolGen pgen; amcBuf amcbuf; - Bool isRamping; AVERT(Pool, pool); amc = Pool2AMC(pool); @@ -1016,13 +1019,13 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn, pgen = &gen->pgen; /* Create and attach segment. The location of this segment is */ - /* expressed as a generation number. We rely on the arena to */ + /* expressed via the pool generation. We rely on the arena to */ /* organize locations appropriately. */ alignedSize = SizeAlignUp(size, ArenaAlign(arena)); MPS_ARGS_BEGIN(args) { MPS_ARGS_ADD_FIELD(args, amcKeySegGen, p, gen); - res = ChainAlloc(&seg, amc->chain, PoolGenNr(pgen), amcSegClassGet(), - alignedSize, pool, withReservoirPermit, args); + res = PoolGenAlloc(&seg, pgen, amcSegClassGet(), alignedSize, + withReservoirPermit, args); } MPS_ARGS_END(args); if(res != ResOK) return res; @@ -1034,23 +1037,17 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn, else SegSetRankAndSummary(seg, BufferRankSet(buffer), RefSetUNIV); - /* Put the segment in the generation indicated by the buffer. */ - ++gen->segs; - pgen->totalSize += alignedSize; - - /* If ramping, or if the buffer is intended for allocating - hash table arrays, don't count it towards newSize. */ - isRamping = (amc->rampMode == RampRAMPING && - buffer == amc->rampGen->forward && - gen == amc->rampGen); - if (isRamping || amcbuf->forHashArrays) { - Seg2amcSeg(seg)->new = FALSE; - } else { - pgen->newSize += alignedSize; + /* If ramping, or if the buffer is intended for allocating hash + * table arrays, defer the size accounting. */ + if ((amc->rampMode == RampRAMPING + && buffer == amc->rampGen->forward + && gen == amc->rampGen) + || amcbuf->forHashArrays) + { + Seg2amcSeg(seg)->deferred = TRUE; } base = SegBase(seg); - *baseReturn = base; if(alignedSize < AMCLargeSegPAGES * ArenaAlign(arena)) { /* Small or Medium segment: give the buffer the entire seg. */ limit = AddrAdd(base, alignedSize); @@ -1072,6 +1069,9 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn, ShieldCover(arena, seg); } } + + PoolGenAccountForFill(pgen, SegSize(seg), Seg2amcSeg(seg)->deferred); + *baseReturn = base; *limitReturn = limit; return ResOK; } @@ -1114,6 +1114,11 @@ static void AMCBufferEmpty(Pool pool, Buffer buffer, (*pool->format->pad)(init, size); ShieldCover(arena, seg); } + + /* The unused part of the buffer is not reused by AMC, so we pass 0 + * for the unused argument. This call therefore has no effect on the + * accounting, but we call it anyway for consistency. */ + PoolGenAccountForEmpty(&amcSegGen(seg)->pgen, 0, Seg2amcSeg(seg)->deferred); } @@ -1177,16 +1182,19 @@ static void AMCRampEnd(Pool pool, Buffer buf) NOTREACHED; } - /* Adjust amc->rampGen->pgen.newSize: Now count all the segments */ - /* in the ramp generation as new (except if they're white). */ + /* Now all the segments in the ramp generation contribute to the + * pool generation's sizes. */ RING_FOR(node, PoolSegRing(pool), nextNode) { Seg seg = SegOfPoolRing(node); - - if(amcSegGen(seg) == amc->rampGen && !Seg2amcSeg(seg)->new + amcSeg amcseg = Seg2amcSeg(seg); + if(amcSegGen(seg) == amc->rampGen + && amcseg->deferred && SegWhite(seg) == TraceSetEMPTY) { - pgen->newSize += SegSize(seg); - Seg2amcSeg(seg)->new = TRUE; + PoolGenUndefer(pgen, + amcseg->old ? SegSize(seg) : 0, + amcseg->old ? 0 : SegSize(seg)); + amcseg->deferred = FALSE; } } } @@ -1200,14 +1208,17 @@ static void AMCRampEnd(Pool pool, Buffer buf) */ static Res AMCWhiten(Pool pool, Trace trace, Seg seg) { + Size condemned = 0; amcGen gen; AMC amc; Buffer buffer; + amcSeg amcseg; Res res; AVERT(Pool, pool); AVERT(Trace, trace); AVERT(Seg, seg); + amcseg = Seg2amcSeg(seg); buffer = SegBuffer(seg); if(buffer != NULL) { @@ -1263,14 +1274,14 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg) /* @@@@ We could subtract all the nailed grains. */ /* Relies on unsigned arithmetic wrapping round */ /* on under- and overflow (which it does). */ - trace->condemned -= AddrOffset(BufferScanLimit(buffer), - BufferLimit(buffer)); + condemned -= AddrOffset(BufferScanLimit(buffer), BufferLimit(buffer)); } } } SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); - trace->condemned += SegSize(seg); + condemned += SegSize(seg); + trace->condemned += condemned; amc = Pool2AMC(pool); AVERT(AMC, amc); @@ -1294,9 +1305,9 @@ static Res AMCWhiten(Pool pool, Trace trace, Seg seg) gen = amcSegGen(seg); AVERT(amcGen, gen); - if(Seg2amcSeg(seg)->new) { - gen->pgen.newSize -= SegSize(seg); - Seg2amcSeg(seg)->new = FALSE; + if (!amcseg->old) { + PoolGenAccountForAge(&gen->pgen, SegSize(seg), amcseg->deferred); + amcseg->old = TRUE; } /* Ensure we are forwarding into the right generation. */ @@ -1723,10 +1734,13 @@ static Res AMCFix(Pool pool, ScanState ss, Seg seg, Ref *refIO) /* Since we're moving an object from one segment to another, */ /* union the greyness and the summaries together. */ grey = SegGrey(seg); - if(SegRankSet(seg) != RankSetEMPTY) /* not for AMCZ */ + if(SegRankSet(seg) != RankSetEMPTY) { /* not for AMCZ */ grey = TraceSetUnion(grey, ss->traces); + SegSetSummary(toSeg, RefSetUnion(SegSummary(toSeg), SegSummary(seg))); + } else { + AVER(SegRankSet(toSeg) == RankSetEMPTY); + } SegSetGrey(toSeg, TraceSetUnion(SegGrey(toSeg), grey)); - SegSetSummary(toSeg, RefSetUnion(SegSummary(toSeg), SegSummary(seg))); /* */ (void)AddrCopy(newRef, ref, length); /* .exposed.seg */ @@ -1871,10 +1885,13 @@ static Res AMCHeaderFix(Pool pool, ScanState ss, Seg seg, Ref *refIO) /* Since we're moving an object from one segment to another, */ /* union the greyness and the summaries together. */ grey = SegGrey(seg); - if(SegRankSet(seg) != RankSetEMPTY) /* not for AMCZ */ + if(SegRankSet(seg) != RankSetEMPTY) { /* not for AMCZ */ grey = TraceSetUnion(grey, ss->traces); + SegSetSummary(toSeg, RefSetUnion(SegSummary(toSeg), SegSummary(seg))); + } else { + AVER(SegRankSet(toSeg) == RankSetEMPTY); + } SegSetGrey(toSeg, TraceSetUnion(SegGrey(toSeg), grey)); - SegSetSummary(toSeg, RefSetUnion(SegSummary(toSeg), SegSummary(seg))); /* */ (void)AddrCopy(newBase, AddrSub(ref, headerSize), length); /* .exposed.seg */ @@ -1991,9 +2008,7 @@ static void amcReclaimNailed(Pool pool, Trace trace, Seg seg) /* We may not free a buffered seg. */ AVER(SegBuffer(seg) == NULL); - --gen->segs; - gen->pgen.totalSize -= SegSize(seg); - SegFree(seg); + PoolGenFree(&gen->pgen, seg, 0, SegSize(seg), 0, Seg2amcSeg(seg)->deferred); } else { /* Seg retained */ STATISTIC_STAT( { @@ -2037,7 +2052,6 @@ static void AMCReclaim(Pool pool, Trace trace, Seg seg) { AMC amc; amcGen gen; - Size size; AVERT_CRITICAL(Pool, pool); amc = Pool2AMC(pool); @@ -2070,13 +2084,9 @@ static void AMCReclaim(Pool pool, Trace trace, Seg seg) /* segs should have been nailed anyway). */ AVER(SegBuffer(seg) == NULL); - --gen->segs; - size = SegSize(seg); - gen->pgen.totalSize -= size; + trace->reclaimSize += SegSize(seg); - trace->reclaimSize += size; - - SegFree(seg); + PoolGenFree(&gen->pgen, seg, 0, SegSize(seg), 0, Seg2amcSeg(seg)->deferred); } diff --git a/mps/code/poolams.c b/mps/code/poolams.c index d74a06caca8..5be47386301 100644 --- a/mps/code/poolams.c +++ b/mps/code/poolams.c @@ -55,7 +55,7 @@ Bool AMSSegCheck(AMSSeg amsseg) CHECKL(amsseg->grains == AMSGrains(amsseg->ams, SegSize(seg))); CHECKL(amsseg->grains > 0); - CHECKL(amsseg->grains >= amsseg->free + amsseg->newAlloc); + CHECKL(amsseg->grains == amsseg->freeGrains + amsseg->oldGrains + amsseg->newGrains); CHECKL(BoolCheck(amsseg->allocTableInUse)); if (!amsseg->allocTableInUse) @@ -94,7 +94,7 @@ void AMSSegFreeWalk(AMSSeg amsseg, FreeBlockStepMethod f, void *p) pool = SegPool(AMSSeg2Seg(amsseg)); seg = AMSSeg2Seg(amsseg); - if (amsseg->free == 0) + if (amsseg->freeGrains == 0) return; if (amsseg->allocTableInUse) { Index base, limit, next; @@ -107,10 +107,8 @@ void AMSSegFreeWalk(AMSSeg amsseg, FreeBlockStepMethod f, void *p) (*f)(AMS_INDEX_ADDR(seg, base), AMS_INDEX_ADDR(seg, limit), pool, p); next = limit + 1; } - } else { - if ( amsseg->firstFree < amsseg->grains ) - (*f)(AMS_INDEX_ADDR(seg, amsseg->firstFree), SegLimit(seg), pool, p); - } + } else if (amsseg->firstFree < amsseg->grains) + (*f)(AMS_INDEX_ADDR(seg, amsseg->firstFree), SegLimit(seg), pool, p); } @@ -129,7 +127,7 @@ void AMSSegFreeCheck(AMSSeg amsseg) AVERT(AMSSeg, amsseg); - if (amsseg->free == 0) + if (amsseg->freeGrains == 0) return; /* If it's not a debug class, don't bother walking. */ @@ -241,8 +239,9 @@ static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size, goto failNextMethod; amsseg->grains = size >> ams->grainShift; - amsseg->free = amsseg->grains; - amsseg->newAlloc = (Count)0; + amsseg->freeGrains = amsseg->grains; + amsseg->oldGrains = (Count)0; + amsseg->newGrains = (Count)0; amsseg->marksChanged = FALSE; /* */ amsseg->ambiguousFixes = FALSE; @@ -263,7 +262,6 @@ static Res AMSSegInit(Seg seg, Pool pool, Addr base, Size size, &amsseg->segRing); amsseg->sig = AMSSegSig; - ams->size += size; AVERT(AMSSeg, amsseg); return ResOK; @@ -299,8 +297,6 @@ static void AMSSegFinish(Seg seg) RingRemove(&amsseg->segRing); RingFinish(&amsseg->segRing); - AVER(ams->size >= SegSize(seg)); - ams->size -= SegSize(seg); amsseg->sig = SigInvalid; /* finish the superclass fields last */ @@ -359,7 +355,7 @@ static Res AMSSegMerge(Seg seg, Seg segHi, /* checks for .grain-align */ AVER(allGrains == AddrOffset(base, limit) >> ams->grainShift); /* checks for .empty */ - AVER(amssegHi->free == hiGrains); + AVER(amssegHi->freeGrains == hiGrains); AVER(!amssegHi->marksChanged); /* .alloc-early */ @@ -393,8 +389,9 @@ static Res AMSSegMerge(Seg seg, Seg segHi, MERGE_TABLES(nonwhiteTable, BTSetRange); amsseg->grains = allGrains; - amsseg->free = amsseg->free + amssegHi->free; - amsseg->newAlloc = amsseg->newAlloc + amssegHi->newAlloc; + amsseg->freeGrains = amsseg->freeGrains + amssegHi->freeGrains; + amsseg->oldGrains = amsseg->oldGrains + amssegHi->oldGrains; + amsseg->newGrains = amsseg->newGrains + amssegHi->newGrains; /* other fields in amsseg are unaffected */ RingRemove(&amssegHi->segRing); @@ -402,6 +399,7 @@ static Res AMSSegMerge(Seg seg, Seg segHi, amssegHi->sig = SigInvalid; AVERT(AMSSeg, amsseg); + PoolGenAccountForSegMerge(&ams->pgen); return ResOK; failSuper: @@ -443,7 +441,7 @@ static Res AMSSegSplit(Seg seg, Seg segHi, /* checks for .grain-align */ AVER(allGrains == amsseg->grains); /* checks for .empty */ - AVER(amsseg->free >= hiGrains); + AVER(amsseg->freeGrains >= hiGrains); if (amsseg->allocTableInUse) { AVER(BTIsResRange(amsseg->allocTable, loGrains, allGrains)); } else { @@ -485,9 +483,11 @@ static Res AMSSegSplit(Seg seg, Seg segHi, amsseg->grains = loGrains; amssegHi->grains = hiGrains; - amsseg->free -= hiGrains; - amssegHi->free = hiGrains; - amssegHi->newAlloc = (Count)0; + AVER(amsseg->freeGrains >= hiGrains); + amsseg->freeGrains -= hiGrains; + amssegHi->freeGrains = hiGrains; + amssegHi->oldGrains = (Count)0; + amssegHi->newGrains = (Count)0; amssegHi->marksChanged = FALSE; /* */ amssegHi->ambiguousFixes = FALSE; @@ -505,6 +505,7 @@ static Res AMSSegSplit(Seg seg, Seg segHi, amssegHi->sig = AMSSegSig; AVERT(AMSSeg, amsseg); AVERT(AMSSeg, amssegHi); + PoolGenAccountForSegSplit(&ams->pgen); return ResOK; failSuper: @@ -551,8 +552,11 @@ static Res AMSSegDescribe(Seg seg, mps_lib_FILE *stream, Count depth) buffer = SegBuffer(seg); res = WriteF(stream, depth, - "AMS $P\n", (WriteFP)amsseg->ams, - "grains $W\n", (WriteFW)amsseg->grains, + " AMS $P\n", (WriteFP)amsseg->ams, + " grains $W\n", (WriteFW)amsseg->grains, + " freeGrains $W\n", (WriteFW)amsseg->freeGrains, + " oldGrains $W\n", (WriteFW)amsseg->oldGrains, + " newGrains $W\n", (WriteFW)amsseg->newGrains, NULL); if (res != ResOK) return res; if (amsseg->allocTableInUse) @@ -690,14 +694,14 @@ static Res AMSSegCreate(Seg *segReturn, Pool pool, Size size, if (res != ResOK) goto failSize; - res = ChainAlloc(&seg, ams->chain, ams->pgen.nr, (*ams->segClass)(), - prefSize, pool, withReservoirPermit, argsNone); + res = PoolGenAlloc(&seg, &ams->pgen, (*ams->segClass)(), prefSize, + withReservoirPermit, argsNone); if (res != ResOK) { /* try to allocate one that's just large enough */ Size minSize = SizeAlignUp(size, ArenaAlign(arena)); if (minSize == prefSize) goto failSeg; - res = ChainAlloc(&seg, ams->chain, ams->pgen.nr, (*ams->segClass)(), - prefSize, pool, withReservoirPermit, argsNone); + res = PoolGenAlloc(&seg, &ams->pgen, (*ams->segClass)(), prefSize, + withReservoirPermit, argsNone); if (res != ResOK) goto failSeg; } @@ -730,9 +734,15 @@ static void AMSSegsDestroy(AMS ams) ring = PoolSegRing(AMS2Pool(ams)); RING_FOR(node, ring, next) { Seg seg = SegOfPoolRing(node); - AVER(Seg2AMSSeg(seg)->ams == ams); - AMSSegFreeCheck(Seg2AMSSeg(seg)); - SegFree(seg); + AMSSeg amsseg = Seg2AMSSeg(seg); + AVERT(AMSSeg, amsseg); + AVER(amsseg->ams == ams); + AMSSegFreeCheck(amsseg); + PoolGenFree(&ams->pgen, seg, + AMSGrainsSize(ams, amsseg->freeGrains), + AMSGrainsSize(ams, amsseg->oldGrains), + AMSGrainsSize(ams, amsseg->newGrains), + FALSE); } } @@ -821,8 +831,7 @@ Res AMSInitInternal(AMS ams, Format format, Chain chain, unsigned gen, pool->alignment = pool->format->alignment; ams->grainShift = SizeLog2(PoolAlignment(pool)); - ams->chain = chain; - res = PoolGenInit(&ams->pgen, ams->chain, gen, pool); + res = PoolGenInit(&ams->pgen, ChainGen(chain, gen), pool); if (res != ResOK) return res; @@ -836,8 +845,6 @@ Res AMSInitInternal(AMS ams, Format format, Chain chain, unsigned gen, ams->segsDestroy = AMSSegsDestroy; ams->segClass = AMSSegClassGet; - ams->size = 0; - ams->sig = AMSSig; AVERT(AMS, ams); return ResOK; @@ -904,15 +911,17 @@ static Bool amsSegAlloc(Index *baseReturn, Index *limitReturn, } else { if (amsseg->firstFree > amsseg->grains - grains) return FALSE; - base = amsseg->firstFree; limit = amsseg->grains; + base = amsseg->firstFree; + limit = amsseg->grains; amsseg->firstFree = limit; } /* We don't place buffers on white segments, so no need to adjust colour. */ AVER(!amsseg->colourTablesInUse); - amsseg->free -= limit - base; - amsseg->newAlloc += limit - base; + AVER(amsseg->freeGrains >= limit - base); + amsseg->freeGrains -= limit - base; + amsseg->newGrains += limit - base; *baseReturn = base; *limitReturn = limit; return TRUE; @@ -958,12 +967,15 @@ static Res AMSBufferFill(Addr *baseReturn, Addr *limitReturn, RING_FOR(node, ring, nextNode) { AMSSeg amsseg = RING_ELT(AMSSeg, segRing, node); AVERT_CRITICAL(AMSSeg, amsseg); - if (amsseg->free >= AMSGrains(ams, size)) { + if (amsseg->freeGrains >= AMSGrains(ams, size)) { seg = AMSSeg2Seg(amsseg); - if (SegRankSet(seg) == rankSet && SegBuffer(seg) == NULL + if (SegRankSet(seg) == rankSet + && SegBuffer(seg) == NULL /* Can't use a white or grey segment, see d.m.p.fill.colour. */ - && SegWhite(seg) == TraceSetEMPTY && SegGrey(seg) == TraceSetEMPTY) { + && SegWhite(seg) == TraceSetEMPTY + && SegGrey(seg) == TraceSetEMPTY) + { b = amsSegAlloc(&base, &limit, seg, size); if (b) goto found; @@ -983,10 +995,10 @@ found: baseAddr = AMS_INDEX_ADDR(seg, base); limitAddr = AMS_INDEX_ADDR(seg, limit); DebugPoolFreeCheck(pool, baseAddr, limitAddr); allocatedSize = AddrOffset(baseAddr, limitAddr); - ams->pgen.totalSize += allocatedSize; - ams->pgen.newSize += allocatedSize; - *baseReturn = baseAddr; *limitReturn = limitAddr; + PoolGenAccountForFill(&ams->pgen, allocatedSize, FALSE); + *baseReturn = baseAddr; + *limitReturn = limitAddr; return ResOK; } @@ -1040,9 +1052,9 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit) /* The nonwhiteTable is shared with allocTable and in use, so we * mustn't start using allocTable. In this case we know: 1. the * segment has been condemned (because colour tables are turned - * on in AMSCondemn); 2. the segment has not yet been reclaimed + * on in AMSWhiten); 2. the segment has not yet been reclaimed * (because colour tables are turned off in AMSReclaim); 3. the - * unused portion of the buffer is black (see AMSCondemn). So we + * unused portion of the buffer is black (see AMSWhiten). So we * need to whiten the unused portion of the buffer. The * allocTable will be turned back on (if necessary) in * AMSReclaim, when we know that the nonwhite grains are exactly @@ -1061,20 +1073,19 @@ static void AMSBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit) if (amsseg->colourTablesInUse) AMS_RANGE_WHITEN(seg, initIndex, limitIndex); - amsseg->free += limitIndex - initIndex; - /* The unused portion of the buffer must be new, since it's not condemned. */ - AVER(amsseg->newAlloc >= limitIndex - initIndex); - amsseg->newAlloc -= limitIndex - initIndex; + amsseg->freeGrains += limitIndex - initIndex; + /* Unused portion of the buffer must be new, since it's not condemned. */ + AVER(amsseg->newGrains >= limitIndex - initIndex); + amsseg->newGrains -= limitIndex - initIndex; size = AddrOffset(init, limit); - ams->pgen.totalSize -= size; - ams->pgen.newSize -= size; + PoolGenAccountForEmpty(&ams->pgen, size, FALSE); } -/* amsRangeCondemn -- Condemn a part of an AMS segment +/* amsRangeWhiten -- Condemn a part of an AMS segment * Allow calling it with base = limit, to simplify the callers. */ -static void amsRangeCondemn(Seg seg, Index base, Index limit) +static void amsRangeWhiten(Seg seg, Index base, Index limit) { if (base != limit) { AMSSeg amsseg = Seg2AMSSeg(seg); @@ -1087,9 +1098,9 @@ static void amsRangeCondemn(Seg seg, Index base, Index limit) } -/* AMSCondemn -- the pool class segment condemning method */ +/* AMSWhiten -- the pool class segment condemning method */ -static Res AMSCondemn(Pool pool, Trace trace, Seg seg) +static Res AMSWhiten(Pool pool, Trace trace, Seg seg) { AMS ams; AMSSeg amsseg; @@ -1139,23 +1150,24 @@ static Res AMSCondemn(Pool pool, Trace trace, Seg seg) scanLimitIndex = AMS_ADDR_INDEX(seg, BufferScanLimit(buffer)); limitIndex = AMS_ADDR_INDEX(seg, BufferLimit(buffer)); - amsRangeCondemn(seg, 0, scanLimitIndex); + amsRangeWhiten(seg, 0, scanLimitIndex); if (scanLimitIndex < limitIndex) AMS_RANGE_BLACKEN(seg, scanLimitIndex, limitIndex); - amsRangeCondemn(seg, limitIndex, amsseg->grains); + amsRangeWhiten(seg, limitIndex, amsseg->grains); /* We didn't condemn the buffer, subtract it from the count. */ uncondemned = limitIndex - scanLimitIndex; } else { /* condemn whole seg */ - amsRangeCondemn(seg, 0, amsseg->grains); + amsRangeWhiten(seg, 0, amsseg->grains); uncondemned = (Count)0; } - trace->condemned += SegSize(seg) - AMSGrainsSize(ams, uncondemned); - /* The unused part of the buffer is new allocation by definition. */ - ams->pgen.newSize -= AMSGrainsSize(ams, amsseg->newAlloc - uncondemned); - amsseg->newAlloc = uncondemned; + /* The unused part of the buffer remains new: the rest becomes old. */ + PoolGenAccountForAge(&ams->pgen, AMSGrainsSize(ams, amsseg->newGrains - uncondemned), FALSE); + amsseg->oldGrains += amsseg->newGrains - uncondemned; + amsseg->newGrains = uncondemned; amsseg->marksChanged = FALSE; /* */ amsseg->ambiguousFixes = FALSE; + trace->condemned += AMSGrainsSize(ams, amsseg->oldGrains); SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); @@ -1561,8 +1573,7 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg) { AMS ams; AMSSeg amsseg; - Count nowFree, grains; - Size reclaimedSize; + Count nowFree, grains, reclaimedGrains; PoolDebugMixin debug; AVERT(Pool, pool); @@ -1607,21 +1618,26 @@ static void AMSReclaim(Pool pool, Trace trace, Seg seg) } } - reclaimedSize = (nowFree - amsseg->free) << ams->grainShift; - amsseg->free = nowFree; - trace->reclaimSize += reclaimedSize; - ams->pgen.totalSize -= reclaimedSize; + reclaimedGrains = nowFree - amsseg->freeGrains; + AVER(amsseg->oldGrains >= reclaimedGrains); + amsseg->oldGrains -= reclaimedGrains; + amsseg->freeGrains += reclaimedGrains; + PoolGenAccountForReclaim(&ams->pgen, AMSGrainsSize(ams, reclaimedGrains), FALSE); + trace->reclaimSize += AMSGrainsSize(ams, reclaimedGrains); /* preservedInPlaceCount is updated on fix */ - trace->preservedInPlaceSize += (grains - amsseg->free) << ams->grainShift; + trace->preservedInPlaceSize += AMSGrainsSize(ams, amsseg->oldGrains); /* Ensure consistency of segment even if are just about to free it */ amsseg->colourTablesInUse = FALSE; SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); - if (amsseg->free == grains && SegBuffer(seg) == NULL) { + if (amsseg->freeGrains == grains && SegBuffer(seg) == NULL) /* No survivors */ - SegFree(seg); - } + PoolGenFree(&ams->pgen, seg, + AMSGrainsSize(ams, amsseg->freeGrains), + AMSGrainsSize(ams, amsseg->oldGrains), + AMSGrainsSize(ams, amsseg->newGrains), + FALSE); } @@ -1662,11 +1678,7 @@ static Res AMSDescribe(Pool pool, mps_lib_FILE *stream, Count depth) "AMS $P {\n", (WriteFP)ams, " pool $P ($U)\n", (WriteFP)pool, (WriteFU)pool->serial, - " size $W\n", - (WriteFW)ams->size, " grain shift $U\n", (WriteFU)ams->grainShift, - " chain $P\n", - (WriteFP)ams->chain, NULL); if (res != ResOK) return res; @@ -1708,7 +1720,7 @@ DEFINE_CLASS(AMSPoolClass, this) this->bufferClass = RankBufClassGet; this->bufferFill = AMSBufferFill; this->bufferEmpty = AMSBufferEmpty; - this->whiten = AMSCondemn; + this->whiten = AMSWhiten; this->blacken = AMSBlacken; this->scan = AMSScan; this->fix = AMSFix; @@ -1756,11 +1768,9 @@ Bool AMSCheck(AMS ams) CHECKS(AMS, ams); CHECKD(Pool, AMS2Pool(ams)); CHECKL(IsSubclassPoly(AMS2Pool(ams)->class, AMSPoolClassGet())); - CHECKL(PoolAlignment(AMS2Pool(ams)) == ((Size)1 << ams->grainShift)); + CHECKL(PoolAlignment(AMS2Pool(ams)) == AMSGrainsSize(ams, (Size)1)); CHECKL(PoolAlignment(AMS2Pool(ams)) == AMS2Pool(ams)->format->alignment); - CHECKD(Chain, ams->chain); CHECKD(PoolGen, &ams->pgen); - CHECKL(SizeIsAligned(ams->size, ArenaAlign(PoolArena(AMS2Pool(ams))))); CHECKL(FUNCHECK(ams->segSize)); CHECKD_NOSIG(Ring, &ams->segRing); CHECKL(FUNCHECK(ams->allocRing)); diff --git a/mps/code/poolams.h b/mps/code/poolams.h index 96cec6c6c7b..8c567910b77 100644 --- a/mps/code/poolams.h +++ b/mps/code/poolams.h @@ -41,7 +41,6 @@ typedef Res (*AMSSegSizePolicyFunction)(Size *sizeReturn, typedef struct AMSStruct { PoolStruct poolStruct; /* generic pool structure */ Shift grainShift; /* log2 of grain size */ - Chain chain; /* chain used by this pool */ PoolGenStruct pgen; /* generation representing the pool */ Size size; /* total segment size of the pool */ AMSSegSizePolicyFunction segSize; /* SegSize policy */ @@ -58,9 +57,10 @@ typedef struct AMSSegStruct { GCSegStruct gcSegStruct; /* superclass fields must come first */ AMS ams; /* owning ams */ RingStruct segRing; /* ring that this seg belongs to */ - Count grains; /* number of grains */ - Count free; /* number of free grains */ - Count newAlloc; /* number of grains allocated since last GC */ + Count grains; /* total grains */ + Count freeGrains; /* free grains */ + Count oldGrains; /* grains allocated prior to last collection */ + Count newGrains; /* grains allocated since last collection */ Bool allocTableInUse; /* allocTable is used */ Index firstFree; /* 1st free grain, if allocTable is not used */ BT allocTable; /* set if grain is allocated */ diff --git a/mps/code/poolawl.c b/mps/code/poolawl.c index 21edaaec1cb..9f751a92059 100644 --- a/mps/code/poolawl.c +++ b/mps/code/poolawl.c @@ -84,9 +84,7 @@ typedef Addr (*FindDependentMethod)(Addr object); typedef struct AWLStruct { PoolStruct poolStruct; Shift alignShift; - Chain chain; /* dummy chain */ PoolGenStruct pgen; /* generation representing the pool */ - Size size; /* allocated size in bytes */ Count succAccesses; /* number of successive single accesses */ FindDependentMethod findDependent; /* to find a dependent object */ awlStatTotalStruct stats; @@ -94,6 +92,7 @@ typedef struct AWLStruct { } AWLStruct, *AWL; #define Pool2AWL(pool) PARENT(AWLStruct, poolStruct, pool) +#define AWLGrainsSize(awl, grains) ((grains) << (awl)->alignShift) static Bool AWLCheck(AWL awl); @@ -102,6 +101,8 @@ static Bool AWLCheck(AWL awl); /* Conversion between indexes and Addrs */ #define awlIndexOfAddr(base, awl, p) \ (AddrOffset((base), (p)) >> (awl)->alignShift) +#define awlAddrOfIndex(base, awl, i) \ + AddrAdd(base, AWLGrainsSize(awl, i)) /* AWLSegStruct -- AWL segment subclass @@ -118,8 +119,10 @@ typedef struct AWLSegStruct { BT scanned; BT alloc; Count grains; - Count free; /* number of free grains */ - Count singleAccesses; /* number of accesses processed singly */ + Count freeGrains; /* free grains */ + Count oldGrains; /* grains allocated prior to last collection */ + Count newGrains; /* grains allocated since last collection */ + Count singleAccesses; /* number of accesses processed singly */ awlStatSegStruct stats; Sig sig; } AWLSegStruct, *AWLSeg; @@ -139,9 +142,8 @@ static Bool AWLSegCheck(AWLSeg awlseg) CHECKL(awlseg->mark != NULL); CHECKL(awlseg->scanned != NULL); CHECKL(awlseg->alloc != NULL); - /* Can't do any real check on ->grains */ CHECKL(awlseg->grains > 0); - CHECKL(awlseg->free <= awlseg->grains); + CHECKL(awlseg->grains == awlseg->freeGrains + awlseg->oldGrains + awlseg->newGrains); return TRUE; } @@ -224,10 +226,12 @@ static Res AWLSegInit(Seg seg, Pool pool, Addr base, Size size, BTResRange(awlseg->scanned, 0, bits); BTResRange(awlseg->alloc, 0, bits); SegSetRankAndSummary(seg, rankSet, RefSetUNIV); - awlseg->free = bits; - awlseg->sig = AWLSegSig; + awlseg->freeGrains = bits; + awlseg->oldGrains = (Count)0; + awlseg->newGrains = (Count)0; awlseg->singleAccesses = 0; awlStatSegInit(awlseg); + awlseg->sig = AWLSegSig; AVERT(AWLSeg, awlseg); return ResOK; @@ -473,8 +477,8 @@ static Res AWLSegCreate(AWLSeg *awlsegReturn, return ResMEMORY; MPS_ARGS_BEGIN(args) { MPS_ARGS_ADD_FIELD(args, awlKeySegRankSet, u, rankSet); - res = ChainAlloc(&seg, awl->chain, awl->pgen.nr, AWLSegClassGet(), - size, pool, reservoirPermit, args); + res = PoolGenAlloc(&seg, &awl->pgen, AWLSegClassGet(), size, + reservoirPermit, args); } MPS_ARGS_END(args); if (res != ResOK) return res; @@ -501,7 +505,7 @@ static Bool AWLSegAlloc(Addr *baseReturn, Addr *limitReturn, AVERT(AWLSeg, awlseg); AVERT(AWL, awl); AVER(size > 0); - AVER(size << awl->alignShift >= size); + AVER(AWLGrainsSize(awl, size) >= size); seg = AWLSeg2Seg(awlseg); if (size > SegSize(seg)) @@ -509,9 +513,8 @@ static Bool AWLSegAlloc(Addr *baseReturn, Addr *limitReturn, n = size >> awl->alignShift; if (!BTFindLongResRange(&i, &j, awlseg->alloc, 0, awlseg->grains, n)) return FALSE; - awl->size += size; - *baseReturn = AddrAdd(SegBase(seg), i << awl->alignShift); - *limitReturn = AddrAdd(SegBase(seg), j << awl->alignShift); + *baseReturn = awlAddrOfIndex(SegBase(seg), awl, i); + *limitReturn = awlAddrOfIndex(SegBase(seg),awl, j); return TRUE; } @@ -570,15 +573,12 @@ static Res AWLInit(Pool pool, ArgList args) AVERT(Chain, chain); AVER(gen <= ChainGens(chain)); - awl->chain = chain; - res = PoolGenInit(&awl->pgen, chain, gen, pool); + res = PoolGenInit(&awl->pgen, ChainGen(chain, gen), pool); if (res != ResOK) goto failGenInit; awl->alignShift = SizeLog2(PoolAlignment(pool)); - awl->size = (Size)0; - awl->succAccesses = 0; awlStatTotalInit(awl); awl->sig = AWLSig; @@ -608,8 +608,13 @@ static void AWLFinish(Pool pool) ring = &pool->segRing; RING_FOR(node, ring, nextNode) { Seg seg = SegOfPoolRing(node); - AVERT(Seg, seg); - SegFree(seg); + AWLSeg awlseg = Seg2AWLSeg(seg); + AVERT(AWLSeg, awlseg); + PoolGenFree(&awl->pgen, seg, + AWLGrainsSize(awl, awlseg->freeGrains), + AWLGrainsSize(awl, awlseg->oldGrains), + AWLGrainsSize(awl, awlseg->newGrains), + FALSE); } awl->sig = SigInvalid; PoolGenFinish(&awl->pgen); @@ -648,10 +653,11 @@ static Res AWLBufferFill(Addr *baseReturn, Addr *limitReturn, /* Only try to allocate in the segment if it is not already */ /* buffered, and has the same ranks as the buffer. */ - if (SegBuffer(seg) == NULL && SegRankSet(seg) == BufferRankSet(buffer)) - if (awlseg->free << awl->alignShift >= size - && AWLSegAlloc(&base, &limit, awlseg, awl, size)) - goto found; + if (SegBuffer(seg) == NULL + && SegRankSet(seg) == BufferRankSet(buffer) + && AWLGrainsSize(awl, awlseg->freeGrains) >= size + && AWLSegAlloc(&base, &limit, awlseg, awl, size)) + goto found; } /* No free space in existing awlsegs, so create new awlseg */ @@ -675,7 +681,10 @@ found: /* Shouldn't this depend on trace phase? @@@@ */ BTSetRange(awlseg->mark, i, j); BTSetRange(awlseg->scanned, i, j); - awlseg->free -= j - i; + AVER(awlseg->freeGrains >= j - i); + awlseg->freeGrains -= j - i; + awlseg->newGrains += j - i; + PoolGenAccountForFill(&awl->pgen, AddrOffset(base, limit), FALSE); } *baseReturn = base; *limitReturn = limit; @@ -711,7 +720,10 @@ static void AWLBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit) AVER(i <= j); if (i < j) { BTResRange(awlseg->alloc, i, j); - awlseg->free += j - i; + AVER(awlseg->newGrains >= j - i); + awlseg->newGrains -= j - i; + awlseg->freeGrains += j - i; + PoolGenAccountForEmpty(&awl->pgen, AddrOffset(init, limit), FALSE); } } @@ -737,6 +749,7 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg) AWL awl; AWLSeg awlseg; Buffer buffer; + Count uncondemned; /* All parameters checked by generic PoolWhiten. */ @@ -752,15 +765,13 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg) if(buffer == NULL) { awlRangeWhiten(awlseg, 0, awlseg->grains); - trace->condemned += SegSize(seg); + uncondemned = (Count)0; } else { /* Whiten everything except the buffer. */ Addr base = SegBase(seg); - Index scanLimitIndex = awlIndexOfAddr(base, awl, - BufferScanLimit(buffer)); - Index limitIndex = awlIndexOfAddr(base, awl, - BufferLimit(buffer)); - + Index scanLimitIndex = awlIndexOfAddr(base, awl, BufferScanLimit(buffer)); + Index limitIndex = awlIndexOfAddr(base, awl, BufferLimit(buffer)); + uncondemned = limitIndex - scanLimitIndex; awlRangeWhiten(awlseg, 0, scanLimitIndex); awlRangeWhiten(awlseg, limitIndex, awlseg->grains); @@ -771,14 +782,12 @@ static Res AWLWhiten(Pool pool, Trace trace, Seg seg) AVER(BTIsSetRange(awlseg->mark, scanLimitIndex, limitIndex)); AVER(BTIsSetRange(awlseg->scanned, scanLimitIndex, limitIndex)); } - - /* We didn't condemn the buffer, subtract it from the count. */ - /* @@@@ We could subtract all the free grains. */ - trace->condemned += SegSize(seg) - - AddrOffset(BufferScanLimit(buffer), - BufferLimit(buffer)); } + PoolGenAccountForAge(&awl->pgen, AWLGrainsSize(awl, awlseg->newGrains - uncondemned), FALSE); + awlseg->oldGrains += awlseg->newGrains - uncondemned; + awlseg->newGrains = uncondemned; + trace->condemned += AWLGrainsSize(awl, awlseg->oldGrains); SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); return ResOK; } @@ -1090,12 +1099,12 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg) Addr base; AWL awl; AWLSeg awlseg; + Buffer buffer; Index i; - Count oldFree; Format format; + Count reclaimedGrains = (Count)0; Count preservedInPlaceCount = (Count)0; Size preservedInPlaceSize = (Size)0; - Size freed; /* amount reclaimed, in bytes */ AVERT(Pool, pool); AVERT(Trace, trace); @@ -1109,8 +1118,9 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg) format = pool->format; base = SegBase(seg); + buffer = SegBuffer(seg); - i = 0; oldFree = awlseg->free; + i = 0; while(i < awlseg->grains) { Addr p, q; Index j; @@ -1119,16 +1129,13 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg) ++i; continue; } - p = AddrAdd(base, i << awl->alignShift); - if(SegBuffer(seg) != NULL) { - Buffer buffer = SegBuffer(seg); - - if(p == BufferScanLimit(buffer) - && BufferScanLimit(buffer) != BufferLimit(buffer)) - { - i = awlIndexOfAddr(base, awl, BufferLimit(buffer)); - continue; - } + p = awlAddrOfIndex(base, awl, i); + if (buffer != NULL + && p == BufferScanLimit(buffer) + && BufferScanLimit(buffer) != BufferLimit(buffer)) + { + i = awlIndexOfAddr(base, awl, BufferLimit(buffer)); + continue; } q = format->skip(AddrAdd(p, format->headerSize)); q = AddrSub(q, format->headerSize); @@ -1145,20 +1152,30 @@ static void AWLReclaim(Pool pool, Trace trace, Seg seg) BTResRange(awlseg->mark, i, j); BTSetRange(awlseg->scanned, i, j); BTResRange(awlseg->alloc, i, j); - awlseg->free += j - i; + reclaimedGrains += j - i; } i = j; } AVER(i == awlseg->grains); - freed = (awlseg->free - oldFree) << awl->alignShift; - awl->size -= freed; - trace->reclaimSize += freed; + AVER(reclaimedGrains <= awlseg->grains); + AVER(awlseg->oldGrains >= reclaimedGrains); + awlseg->oldGrains -= reclaimedGrains; + awlseg->freeGrains += reclaimedGrains; + PoolGenAccountForReclaim(&awl->pgen, AWLGrainsSize(awl, reclaimedGrains), FALSE); + + trace->reclaimSize += AWLGrainsSize(awl, reclaimedGrains); trace->preservedInPlaceCount += preservedInPlaceCount; trace->preservedInPlaceSize += preservedInPlaceSize; SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); - /* @@@@ never frees a segment, see job001687. */ - return; + + if (awlseg->freeGrains == awlseg->grains && buffer == NULL) + /* No survivors */ + PoolGenFree(&awl->pgen, seg, + AWLGrainsSize(awl, awlseg->freeGrains), + AWLGrainsSize(awl, awlseg->oldGrains), + AWLGrainsSize(awl, awlseg->newGrains), + FALSE); } @@ -1306,8 +1323,7 @@ static Bool AWLCheck(AWL awl) CHECKS(AWL, awl); CHECKD(Pool, &awl->poolStruct); CHECKL(awl->poolStruct.class == AWLPoolClassGet()); - CHECKL((Align)1 << awl->alignShift == awl->poolStruct.alignment); - CHECKD(Chain, awl->chain); + CHECKL(AWLGrainsSize(awl, (Count)1) == awl->poolStruct.alignment); /* Nothing to check about succAccesses. */ CHECKL(FUNCHECK(awl->findDependent)); /* Don't bother to check stats. */ diff --git a/mps/code/poollo.c b/mps/code/poollo.c index 3420e22b9ab..c561f351fc1 100644 --- a/mps/code/poollo.c +++ b/mps/code/poollo.c @@ -24,13 +24,13 @@ typedef struct LOStruct *LO; typedef struct LOStruct { PoolStruct poolStruct; /* generic pool structure */ Shift alignShift; /* log_2 of pool alignment */ - Chain chain; /* chain used by this pool */ PoolGenStruct pgen; /* generation representing the pool */ Sig sig; } LOStruct; #define PoolPoolLO(pool) PARENT(LOStruct, poolStruct, pool) #define LOPool(lo) (&(lo)->poolStruct) +#define LOGrainsSize(lo, grains) ((grains) << (lo)->alignShift) /* forward declaration */ @@ -48,8 +48,9 @@ typedef struct LOSegStruct { LO lo; /* owning LO */ BT mark; /* mark bit table */ BT alloc; /* alloc bit table */ - Count free; /* number of free grains */ - Count newAlloc; /* number of grains allocated since last GC */ + Count freeGrains; /* free grains */ + Count oldGrains; /* grains allocated prior to last collection */ + Count newGrains; /* grains allocated since last collection */ Sig sig; /* */ } LOSegStruct; @@ -61,6 +62,7 @@ typedef struct LOSegStruct { static Res loSegInit(Seg seg, Pool pool, Addr base, Size size, Bool reservoirPermit, ArgList args); static void loSegFinish(Seg seg); +static Count loSegGrains(LOSeg loseg); /* LOSegClass -- Class definition for LO segments */ @@ -88,8 +90,8 @@ static Bool LOSegCheck(LOSeg loseg) CHECKL(loseg->mark != NULL); CHECKL(loseg->alloc != NULL); /* Could check exactly how many bits are set in the alloc table. */ - CHECKL(loseg->free + loseg->newAlloc - <= SegSize(LOSegSeg(loseg)) >> loseg->lo->alignShift); + CHECKL(loseg->freeGrains + loseg->oldGrains + loseg->newGrains + == SegSize(LOSegSeg(loseg)) >> loseg->lo->alignShift); return TRUE; } @@ -106,7 +108,7 @@ static Res loSegInit(Seg seg, Pool pool, Addr base, Size size, Size tablebytes; /* # bytes in each control array */ Arena arena; /* number of bits needed in each control array */ - Count bits; + Count grains; void *p; AVERT(Seg, seg); @@ -126,8 +128,8 @@ static Res loSegInit(Seg seg, Pool pool, Addr base, Size size, AVER(SegWhite(seg) == TraceSetEMPTY); - bits = size >> lo->alignShift; - tablebytes = BTSize(bits); + grains = size >> lo->alignShift; + tablebytes = BTSize(grains); res = ControlAlloc(&p, arena, tablebytes, reservoirPermit); if(res != ResOK) goto failMarkTable; @@ -136,11 +138,12 @@ static Res loSegInit(Seg seg, Pool pool, Addr base, Size size, if(res != ResOK) goto failAllocTable; loseg->alloc = p; - BTResRange(loseg->alloc, 0, bits); - BTSetRange(loseg->mark, 0, bits); + BTResRange(loseg->alloc, 0, grains); + BTSetRange(loseg->mark, 0, grains); loseg->lo = lo; - loseg->free = bits; - loseg->newAlloc = (Count)0; + loseg->freeGrains = grains; + loseg->oldGrains = (Count)0; + loseg->newGrains = (Count)0; loseg->sig = LOSegSig; AVERT(LOSeg, loseg); return ResOK; @@ -163,7 +166,7 @@ static void loSegFinish(Seg seg) Pool pool; Arena arena; Size tablesize; - Count bits; + Count grains; AVERT(Seg, seg); loseg = SegLOSeg(seg); @@ -173,8 +176,8 @@ static void loSegFinish(Seg seg) AVERT(LO, lo); arena = PoolArena(pool); - bits = SegSize(seg) >> lo->alignShift; - tablesize = BTSize(bits); + grains = loSegGrains(loseg); + tablesize = BTSize(grains); ControlFree(arena, (Addr)loseg->alloc, tablesize); ControlFree(arena, (Addr)loseg->mark, tablesize); loseg->sig = SigInvalid; @@ -186,7 +189,7 @@ static void loSegFinish(Seg seg) ATTRIBUTE_UNUSED -static Count loSegBits(LOSeg loseg) +static Count loSegGrains(LOSeg loseg) { LO lo; Size size; @@ -205,7 +208,7 @@ static Count loSegBits(LOSeg loseg) (AddrOffset((base), (p)) >> (lo)->alignShift) #define loAddrOfIndex(base, lo, i) \ - (AddrAdd((base), (i) << (lo)->alignShift)) + (AddrAdd((base), LOGrainsSize((lo), (i)))) /* loSegFree -- mark block from baseIndex to limitIndex free */ @@ -214,12 +217,11 @@ static void loSegFree(LOSeg loseg, Index baseIndex, Index limitIndex) { AVERT(LOSeg, loseg); AVER(baseIndex < limitIndex); - AVER(limitIndex <= loSegBits(loseg)); + AVER(limitIndex <= loSegGrains(loseg)); AVER(BTIsSetRange(loseg->alloc, baseIndex, limitIndex)); BTResRange(loseg->alloc, baseIndex, limitIndex); BTSetRange(loseg->mark, baseIndex, limitIndex); - loseg->free += limitIndex - baseIndex; } @@ -234,7 +236,7 @@ static Bool loSegFindFree(Addr *bReturn, Addr *lReturn, LO lo; Seg seg; Count agrains; - Count bits; + Count grains; Addr segBase; AVER(bReturn != NULL); @@ -249,23 +251,22 @@ static Bool loSegFindFree(Addr *bReturn, Addr *lReturn, /* of the allocation request */ agrains = size >> lo->alignShift; AVER(agrains >= 1); - AVER(agrains <= loseg->free); + AVER(agrains <= loseg->freeGrains); AVER(size <= SegSize(seg)); - if(SegBuffer(seg) != NULL) { + if(SegBuffer(seg) != NULL) /* Don't bother trying to allocate from a buffered segment */ return FALSE; - } - bits = SegSize(seg) >> lo->alignShift; + grains = loSegGrains(loseg); if(!BTFindLongResRange(&baseIndex, &limitIndex, loseg->alloc, - 0, bits, agrains)) { + 0, grains, agrains)) { return FALSE; } /* check that BTFindLongResRange really did find enough space */ AVER(baseIndex < limitIndex); - AVER((limitIndex-baseIndex) << lo->alignShift >= size); + AVER(LOGrainsSize(lo, limitIndex - baseIndex) >= size); segBase = SegBase(seg); *bReturn = loAddrOfIndex(segBase, lo, baseIndex); *lReturn = loAddrOfIndex(segBase, lo, limitIndex); @@ -293,9 +294,9 @@ static Res loSegCreate(LOSeg *loSegReturn, Pool pool, Size size, lo = PoolPoolLO(pool); AVERT(LO, lo); - res = ChainAlloc(&seg, lo->chain, lo->pgen.nr, EnsureLOSegClass(), - SizeAlignUp(size, ArenaAlign(PoolArena(pool))), - pool, withReservoirPermit, argsNone); + res = PoolGenAlloc(&seg, &lo->pgen, EnsureLOSegClass(), + SizeAlignUp(size, ArenaAlign(PoolArena(pool))), + withReservoirPermit, argsNone); if (res != ResOK) return res; @@ -313,7 +314,7 @@ static void loSegReclaim(LOSeg loseg, Trace trace) { Addr p, base, limit; Bool marked; - Count bytesReclaimed = (Count)0; + Count reclaimedGrains = (Count)0; Seg seg; LO lo; Format format; @@ -371,23 +372,30 @@ static void loSegReclaim(LOSeg loseg, Trace trace) Index j = loIndexOfAddr(base, lo, q); /* This object is not marked, so free it */ loSegFree(loseg, i, j); - bytesReclaimed += AddrOffset(p, q); + reclaimedGrains += j - i; } p = q; } AVER(p == limit); - AVER(bytesReclaimed <= SegSize(seg)); - trace->reclaimSize += bytesReclaimed; - lo->pgen.totalSize -= bytesReclaimed; + AVER(reclaimedGrains <= loSegGrains(loseg)); + AVER(loseg->oldGrains >= reclaimedGrains); + loseg->oldGrains -= reclaimedGrains; + loseg->freeGrains += reclaimedGrains; + PoolGenAccountForReclaim(&lo->pgen, LOGrainsSize(lo, reclaimedGrains), FALSE); + + trace->reclaimSize += LOGrainsSize(lo, reclaimedGrains); trace->preservedInPlaceCount += preservedInPlaceCount; trace->preservedInPlaceSize += preservedInPlaceSize; SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); - if(!marked) { - SegFree(seg); - } + if (!marked) + PoolGenFree(&lo->pgen, seg, + LOGrainsSize(lo, loseg->freeGrains), + LOGrainsSize(lo, loseg->oldGrains), + LOGrainsSize(lo, loseg->newGrains), + FALSE); } /* This walks over _all_ objects in the heap, whether they are */ @@ -400,7 +408,7 @@ static void LOWalk(Pool pool, Seg seg, Addr base; LO lo; LOSeg loseg; - Index i, limit; + Index i, grains; Format format; AVERT(Pool, pool); @@ -417,10 +425,10 @@ static void LOWalk(Pool pool, Seg seg, AVERT(Format, format); base = SegBase(seg); - limit = SegSize(seg) >> lo->alignShift; + grains = loSegGrains(loseg); i = 0; - while(i < limit) { + while(i < grains) { /* object is a slight misnomer because it might point to a */ /* free grain */ Addr object = loAddrOfIndex(base, lo, i); @@ -475,9 +483,11 @@ static Res LOInit(Pool pool, ArgList args) Arena arena; Res res; ArgStruct arg; + Chain chain; unsigned gen = LO_GEN_DEFAULT; AVERT(Pool, pool); + AVERT(ArgList, args); arena = PoolArena(pool); @@ -486,22 +496,22 @@ static Res LOInit(Pool pool, ArgList args) ArgRequire(&arg, args, MPS_KEY_FORMAT); pool->format = arg.val.format; if (ArgPick(&arg, args, MPS_KEY_CHAIN)) - lo->chain = arg.val.chain; + chain = arg.val.chain; else { - lo->chain = ArenaGlobals(arena)->defaultChain; + chain = ArenaGlobals(arena)->defaultChain; gen = 1; /* avoid the nursery of the default chain by default */ } if (ArgPick(&arg, args, MPS_KEY_GEN)) gen = arg.val.u; AVERT(Format, pool->format); - AVERT(Chain, lo->chain); - AVER(gen <= ChainGens(lo->chain)); + AVERT(Chain, chain); + AVER(gen <= ChainGens(chain)); pool->alignment = pool->format->alignment; lo->alignShift = SizeLog2((Size)PoolAlignment(pool)); - res = PoolGenInit(&lo->pgen, lo->chain, gen, pool); + res = PoolGenInit(&lo->pgen, ChainGen(chain, gen), pool); if (res != ResOK) goto failGenInit; @@ -530,10 +540,12 @@ static void LOFinish(Pool pool) RING_FOR(node, &pool->segRing, nextNode) { Seg seg = SegOfPoolRing(node); LOSeg loseg = SegLOSeg(seg); - AVERT(LOSeg, loseg); - UNUSED(loseg); /* */ - SegFree(seg); + PoolGenFree(&lo->pgen, seg, + LOGrainsSize(lo, loseg->freeGrains), + LOGrainsSize(lo, loseg->oldGrains), + LOGrainsSize(lo, loseg->newGrains), + FALSE); } PoolGenFinish(&lo->pgen); @@ -568,7 +580,7 @@ static Res LOBufferFill(Addr *baseReturn, Addr *limitReturn, Seg seg = SegOfPoolRing(node); loseg = SegLOSeg(seg); AVERT(LOSeg, loseg); - if((loseg->free << lo->alignShift) >= size + if(LOGrainsSize(lo, loseg->freeGrains) >= size && loSegFindFree(&base, &limit, loseg, size)) goto found; } @@ -593,12 +605,12 @@ found: AVER(BTIsResRange(loseg->alloc, baseIndex, limitIndex)); AVER(BTIsSetRange(loseg->mark, baseIndex, limitIndex)); BTSetRange(loseg->alloc, baseIndex, limitIndex); - loseg->free -= limitIndex - baseIndex; - loseg->newAlloc += limitIndex - baseIndex; + AVER(loseg->freeGrains >= limitIndex - baseIndex); + loseg->freeGrains -= limitIndex - baseIndex; + loseg->newGrains += limitIndex - baseIndex; } - lo->pgen.totalSize += AddrOffset(base, limit); - lo->pgen.newSize += AddrOffset(base, limit); + PoolGenAccountForFill(&lo->pgen, AddrOffset(base, limit), FALSE); *baseReturn = base; *limitReturn = limit; @@ -617,7 +629,7 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit) Addr base, segBase; Seg seg; LOSeg loseg; - Index baseIndex, initIndex, limitIndex; + Index initIndex, limitIndex; AVERT(Pool, pool); lo = PARENT(LOStruct, poolStruct, pool); @@ -642,21 +654,17 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit) AVER(init <= SegLimit(seg)); /* convert base, init, and limit, to quantum positions */ - baseIndex = loIndexOfAddr(segBase, lo, base); initIndex = loIndexOfAddr(segBase, lo, init); limitIndex = loIndexOfAddr(segBase, lo, limit); - /* Record the unused portion at the end of the buffer */ - /* as being free. */ - AVER(baseIndex == limitIndex - || BTIsSetRange(loseg->alloc, baseIndex, limitIndex)); if(initIndex != limitIndex) { + /* Free the unused portion of the buffer (this must be "new", since + * it's not condemned). */ loSegFree(loseg, initIndex, limitIndex); - lo->pgen.totalSize -= AddrOffset(init, limit); - /* All of the buffer must be new, since buffered segs are not condemned. */ - AVER(loseg->newAlloc >= limitIndex - baseIndex); - loseg->newAlloc -= limitIndex - initIndex; - lo->pgen.newSize -= AddrOffset(init, limit); + AVER(loseg->newGrains >= limitIndex - initIndex); + loseg->newGrains -= limitIndex - initIndex; + loseg->freeGrains += limitIndex - initIndex; + PoolGenAccountForEmpty(&lo->pgen, AddrOffset(init, limit), FALSE); } } @@ -666,7 +674,9 @@ static void LOBufferEmpty(Pool pool, Buffer buffer, Addr init, Addr limit) static Res LOWhiten(Pool pool, Trace trace, Seg seg) { LO lo; - Count bits; + LOSeg loseg; + Buffer buffer; + Count grains, uncondemned; AVERT(Pool, pool); lo = PoolPoolLO(pool); @@ -676,21 +686,32 @@ static Res LOWhiten(Pool pool, Trace trace, Seg seg) AVERT(Seg, seg); AVER(SegWhite(seg) == TraceSetEMPTY); - if(SegBuffer(seg) == NULL) { - LOSeg loseg = SegLOSeg(seg); - AVERT(LOSeg, loseg); + loseg = SegLOSeg(seg); + AVERT(LOSeg, loseg); + grains = loSegGrains(loseg); - bits = SegSize(seg) >> lo->alignShift; - /* Allocated objects should be whitened, free areas should */ - /* be left "black". */ - BTCopyInvertRange(loseg->alloc, loseg->mark, 0, bits); - /* @@@@ We could subtract all the free grains. */ - trace->condemned += SegSize(seg); - lo->pgen.newSize -= loseg->newAlloc << lo->alignShift; - loseg->newAlloc = (Count)0; - SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); + /* Whiten allocated objects; leave free areas black. */ + buffer = SegBuffer(seg); + if (buffer != NULL) { + Addr base = SegBase(seg); + Index scanLimitIndex = loIndexOfAddr(base, lo, BufferScanLimit(buffer)); + Index limitIndex = loIndexOfAddr(base, lo, BufferLimit(buffer)); + uncondemned = limitIndex - scanLimitIndex; + if (0 < scanLimitIndex) + BTCopyInvertRange(loseg->alloc, loseg->mark, 0, scanLimitIndex); + if (limitIndex < grains) + BTCopyInvertRange(loseg->alloc, loseg->mark, limitIndex, grains); + } else { + uncondemned = (Count)0; + BTCopyInvertRange(loseg->alloc, loseg->mark, 0, grains); } + PoolGenAccountForAge(&lo->pgen, LOGrainsSize(lo, loseg->newGrains - uncondemned), FALSE); + loseg->oldGrains += loseg->newGrains - uncondemned; + loseg->newGrains = uncondemned; + trace->condemned += LOGrainsSize(lo, loseg->oldGrains); + SegSetWhite(seg, TraceSetAdd(SegWhite(seg), trace)); + return ResOK; } @@ -814,8 +835,7 @@ static Bool LOCheck(LO lo) CHECKD(Pool, &lo->poolStruct); CHECKL(lo->poolStruct.class == EnsureLOPoolClass()); CHECKL(ShiftCheck(lo->alignShift)); - CHECKL((Align)1 << lo->alignShift == PoolAlignment(&lo->poolStruct)); - CHECKD(Chain, lo->chain); + CHECKL(LOGrainsSize(lo, (Count)1) == PoolAlignment(&lo->poolStruct)); CHECKD(PoolGen, &lo->pgen); return TRUE; } diff --git a/mps/code/poolmfs.c b/mps/code/poolmfs.c index c7ed27f3f38..3d970092de0 100644 --- a/mps/code/poolmfs.c +++ b/mps/code/poolmfs.c @@ -57,18 +57,8 @@ typedef struct MFSHeaderStruct { } HeaderStruct, *Header; - #define UNIT_MIN sizeof(HeaderStruct) -MFSInfo MFSGetInfo(void) -{ - static const struct MFSInfoStruct info = - { - /* unitSizeMin */ UNIT_MIN - }; - return &info; -} - Pool (MFSPool)(MFS mfs) { @@ -161,6 +151,8 @@ void MFSFinishTracts(Pool pool, MFSTractVisitor visitor, static void MFSTractFreeVisitor(Pool pool, Addr base, Size size, void *closureP, Size closureS) { + AVER(closureP == UNUSED_POINTER); + AVER(closureS == UNUSED_SIZE); UNUSED(closureP); UNUSED(closureS); ArenaFree(base, size, pool); @@ -175,7 +167,7 @@ static void MFSFinish(Pool pool) mfs = PoolPoolMFS(pool); AVERT(MFS, mfs); - MFSFinishTracts(pool, MFSTractFreeVisitor, NULL, 0); + MFSFinishTracts(pool, MFSTractFreeVisitor, UNUSED_POINTER, UNUSED_SIZE); mfs->sig = SigInvalid; } @@ -337,7 +329,7 @@ static Res MFSDescribe(Pool pool, mps_lib_FILE *stream, Count depth) DEFINE_POOL_CLASS(MFSPoolClass, this) { - INHERIT_CLASS(this, AbstractAllocFreePoolClass); + INHERIT_CLASS(this, AbstractPoolClass); this->name = "MFS"; this->size = sizeof(MFSStruct); this->offset = offsetof(MFSStruct, poolStruct); diff --git a/mps/code/poolmfs.h b/mps/code/poolmfs.h index 7ab337d4393..5f2fd0780ed 100644 --- a/mps/code/poolmfs.h +++ b/mps/code/poolmfs.h @@ -2,7 +2,7 @@ * * $Id$ * - * Copyright (c) 2001 Ravenbrook Limited. See end of file for license. + * Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license. * * The MFS pool is used to manage small fixed-size chunks of memory. It * stores control structures in the memory it manages, rather than to one @@ -39,14 +39,6 @@ extern Bool MFSCheck(MFS mfs); extern Pool (MFSPool)(MFS mfs); -typedef const struct MFSInfoStruct *MFSInfo; - -struct MFSInfoStruct { - Size unitSizeMin; /* minimum unit size */ -}; - -extern MFSInfo MFSGetInfo(void); - extern const struct mps_key_s _mps_key_MFSExtendSelf; #define MFSExtendSelf (&_mps_key_MFSExtendSelf) #define MFSExtendSelf_FIELD b @@ -63,7 +55,7 @@ extern void MFSFinishTracts(Pool pool, MFSTractVisitor visitor, /* C. COPYRIGHT AND LICENSE * - * Copyright (C) 2001-2002 Ravenbrook Limited . + * Copyright (C) 2001-2014 Ravenbrook Limited . * All rights reserved. This is an open source license. Contact * Ravenbrook for commercial licensing options. * diff --git a/mps/code/poolmrg.c b/mps/code/poolmrg.c index 2cb2f1fc95a..ea40bef31c1 100644 --- a/mps/code/poolmrg.c +++ b/mps/code/poolmrg.c @@ -868,7 +868,6 @@ DEFINE_POOL_CLASS(MRGPoolClass, this) this->name = "MRG"; this->size = sizeof(MRGStruct); this->offset = offsetof(MRGStruct, poolStruct); - this->attr |= AttrSCAN; this->init = MRGInit; this->finish = MRGFinish; this->grey = PoolTrivGrey; diff --git a/mps/code/poolmv.c b/mps/code/poolmv.c index 1a854f79f61..fb527d71911 100644 --- a/mps/code/poolmv.c +++ b/mps/code/poolmv.c @@ -134,9 +134,7 @@ typedef struct MVSpanStruct { ATTRIBUTE_UNUSED static Bool MVSpanCheck(MVSpan span) { - Addr addr, base, limit; - Arena arena; - Tract tract; + Addr base, limit; CHECKS(MVSpan, span); @@ -172,13 +170,22 @@ static Bool MVSpanCheck(MVSpan span) CHECKL(span->largest == SpanSize(span)+1); } - /* Each tract of the span must refer to the span */ - arena = PoolArena(TractPool(span->tract)); - TRACT_FOR(tract, addr, arena, base, limit) { - CHECKD_NOSIG(Tract, tract); - CHECKL(TractP(tract) == (void *)span); + /* Note that even if the CHECKs are compiled away there is still a + * significant cost in looping over the tracts, hence this guard. */ +#if defined(AVER_AND_CHECK_ALL) + { + Addr addr; + Arena arena; + Tract tract; + /* Each tract of the span must refer to the span */ + arena = PoolArena(TractPool(span->tract)); + TRACT_FOR(tract, addr, arena, base, limit) { + CHECKD_NOSIG(Tract, tract); + CHECKL(TractP(tract) == (void *)span); + } + CHECKL(addr == limit); } - CHECKL(addr == limit); +#endif return TRUE; } @@ -210,6 +217,7 @@ static void MVDebugVarargs(ArgStruct args[MPS_ARGS_MAX], va_list varargs) static Res MVInit(Pool pool, ArgList args) { + Align align = MV_ALIGN_DEFAULT; Size extendBy = MV_EXTEND_BY_DEFAULT; Size avgSize = MV_AVG_SIZE_DEFAULT; Size maxSize = MV_MAX_SIZE_DEFAULT; @@ -219,6 +227,8 @@ static Res MVInit(Pool pool, ArgList args) Res res; ArgStruct arg; + if (ArgPick(&arg, args, MPS_KEY_ALIGN)) + align = arg.val.align; if (ArgPick(&arg, args, MPS_KEY_EXTEND_BY)) extendBy = arg.val.size; if (ArgPick(&arg, args, MPS_KEY_MEAN_SIZE)) @@ -226,12 +236,14 @@ static Res MVInit(Pool pool, ArgList args) if (ArgPick(&arg, args, MPS_KEY_MAX_SIZE)) maxSize = arg.val.size; + AVERT(Align, align); AVER(extendBy > 0); AVER(avgSize > 0); AVER(avgSize <= extendBy); AVER(maxSize > 0); AVER(extendBy <= maxSize); + pool->alignment = align; mv = Pool2MV(pool); arena = PoolArena(pool); @@ -619,6 +631,7 @@ static void MVFree(Pool pool, Addr old, Size size) AVERT(MV, mv); AVER(old != (Addr)0); + AVER(AddrIsAligned(old, pool->alignment)); AVER(size > 0); size = SizeAlignUp(size, pool->alignment); @@ -773,7 +786,6 @@ static Res MVDescribe(Pool pool, mps_lib_FILE *stream, Count depth) DEFINE_POOL_CLASS(MVPoolClass, this) { INHERIT_CLASS(this, AbstractBufferPoolClass); - PoolClassMixInAllocFree(this); this->name = "MV"; this->size = sizeof(MVStruct); this->offset = offsetof(MVStruct, poolStruct); diff --git a/mps/code/poolmv2.c b/mps/code/poolmv2.c index 0012c5683bc..fed87cf4824 100644 --- a/mps/code/poolmv2.c +++ b/mps/code/poolmv2.c @@ -14,6 +14,7 @@ #include "mpscmvt.h" #include "abq.h" #include "cbs.h" +#include "failover.h" #include "freelist.h" #include "meter.h" #include "range.h" @@ -51,8 +52,9 @@ static Res MVTContingencySearch(Addr *baseReturn, Addr *limitReturn, MVT mvt, Size min); static Bool MVTCheckFit(Addr base, Addr limit, Size min, Arena arena); static ABQ MVTABQ(MVT mvt); -static CBS MVTCBS(MVT mvt); -static Freelist MVTFreelist(MVT mvt); +static Land MVTCBS(MVT mvt); +static Land MVTFreelist(MVT mvt); +static Land MVTFailover(MVT mvt); /* Types */ @@ -62,6 +64,7 @@ typedef struct MVTStruct PoolStruct poolStruct; CBSStruct cbsStruct; /* The coalescing block structure */ FreelistStruct flStruct; /* The emergency free list structure */ + FailoverStruct foStruct; /* The fail-over mechanism */ ABQStruct abqStruct; /* The available block queue */ /* */ Size minSize; /* Pool parameter */ @@ -136,7 +139,6 @@ DEFINE_POOL_CLASS(MVTPoolClass, this) this->name = "MVT"; this->size = sizeof(MVTStruct); this->offset = offsetof(MVTStruct, poolStruct); - this->attr |= AttrFREE; this->varargs = MVTVarargs; this->init = MVTInit; this->finish = MVTFinish; @@ -149,12 +151,6 @@ DEFINE_POOL_CLASS(MVTPoolClass, this) /* Macros */ - -/* .trans.something: the C language sucks */ -#define unless(cond) if (!(cond)) -#define when(cond) if (cond) - - #define Pool2MVT(pool) PARENT(MVTStruct, poolStruct, pool) #define MVT2Pool(mvt) (&(mvt)->poolStruct) @@ -168,15 +164,21 @@ static ABQ MVTABQ(MVT mvt) } -static CBS MVTCBS(MVT mvt) +static Land MVTCBS(MVT mvt) { - return &mvt->cbsStruct; + return &mvt->cbsStruct.landStruct; } -static Freelist MVTFreelist(MVT mvt) +static Land MVTFreelist(MVT mvt) { - return &mvt->flStruct; + return &mvt->flStruct.landStruct; +} + + +static Land MVTFailover(MVT mvt) +{ + return &mvt->foStruct.landStruct; } @@ -252,7 +254,12 @@ static Res MVTInit(Pool pool, ArgList args) fragLimit = (Count)(arg.val.d * 100); } - AVER(SizeIsAligned(align, MPS_PF_ALIGN)); + AVERT(Align, align); + /* This restriction on the alignment is necessary because of the use + * of a Freelist to store the free address ranges in low-memory + * situations. See . + */ + AVER(AlignIsAligned(align, FreelistMinimumAlignment)); AVER(0 < minSize); AVER(minSize <= meanSize); AVER(meanSize <= maxSize); @@ -269,19 +276,29 @@ static Res MVTInit(Pool pool, ArgList args) if (abqDepth < 3) abqDepth = 3; - res = CBSInit(MVTCBS(mvt), arena, (void *)mvt, align, - /* fastFind */ FALSE, /* zoned */ FALSE, args); + res = LandInit(MVTCBS(mvt), CBSFastLandClassGet(), arena, align, mvt, + mps_args_none); if (res != ResOK) goto failCBS; + res = LandInit(MVTFreelist(mvt), FreelistLandClassGet(), arena, align, mvt, + mps_args_none); + if (res != ResOK) + goto failFreelist; + + MPS_ARGS_BEGIN(foArgs) { + MPS_ARGS_ADD(foArgs, FailoverPrimary, MVTCBS(mvt)); + MPS_ARGS_ADD(foArgs, FailoverSecondary, MVTFreelist(mvt)); + res = LandInit(MVTFailover(mvt), FailoverLandClassGet(), arena, align, mvt, + foArgs); + } MPS_ARGS_END(foArgs); + if (res != ResOK) + goto failFailover; + res = ABQInit(arena, MVTABQ(mvt), (void *)mvt, abqDepth, sizeof(RangeStruct)); if (res != ResOK) goto failABQ; - res = FreelistInit(MVTFreelist(mvt), align); - if (res != ResOK) - goto failFreelist; - pool->alignment = align; mvt->reuseSize = reuseSize; mvt->fillSize = fillSize; @@ -344,10 +361,12 @@ static Res MVTInit(Pool pool, ArgList args) reserveDepth, fragLimit); return ResOK; -failFreelist: - ABQFinish(arena, MVTABQ(mvt)); failABQ: - CBSFinish(MVTCBS(mvt)); + LandFinish(MVTFailover(mvt)); +failFailover: + LandFinish(MVTFreelist(mvt)); +failFreelist: + LandFinish(MVTCBS(mvt)); failCBS: AVER(res != ResOK); return res; @@ -365,6 +384,7 @@ static Bool MVTCheck(MVT mvt) CHECKD(CBS, &mvt->cbsStruct); CHECKD(ABQ, &mvt->abqStruct); CHECKD(Freelist, &mvt->flStruct); + CHECKD(Failover, &mvt->foStruct); CHECKL(mvt->reuseSize >= 2 * mvt->fillSize); CHECKL(mvt->fillSize >= mvt->maxSize); CHECKL(mvt->maxSize >= mvt->meanSize); @@ -414,10 +434,11 @@ static void MVTFinish(Pool pool) SegFree(SegOfPoolRing(node)); } - /* Finish the Freelist, ABQ and CBS structures */ - FreelistFinish(MVTFreelist(mvt)); + /* Finish the ABQ, Failover, Freelist and CBS structures */ ABQFinish(arena, MVTABQ(mvt)); - CBSFinish(MVTCBS(mvt)); + LandFinish(MVTFailover(mvt)); + LandFinish(MVTFreelist(mvt)); + LandFinish(MVTCBS(mvt)); } @@ -607,14 +628,7 @@ static Bool MVTABQFill(Addr *baseReturn, Addr *limitReturn, } -/* MVTContingencyFill -- try to fill a request from the CBS or Freelist - * - * (The CBS and Freelist are lumped together under the heading of - * "contingency" for historical reasons: the Freelist used to be part - * of the CBS. There is no principled reason why these two are - * searched at the same time: if it should prove convenient to - * separate them, go ahead.) - */ +/* MVTContingencyFill -- try to fill a request from the free lists */ static Bool MVTContingencyFill(Addr *baseReturn, Addr *limitReturn, MVT mvt, Size minSize) { @@ -703,8 +717,7 @@ static Res MVTBufferFill(Addr *baseReturn, Addr *limitReturn, METER_ACC(mvt->underflows, minSize); /* If fragmentation is acceptable, attempt to find a free block from - the CBS or Freelist. - */ + the free lists. */ if (mvt->available >= mvt->availLimit) { METER_ACC(mvt->fragLimitContingencies, minSize); if (MVTContingencyFill(baseReturn, limitReturn, mvt, minSize)) @@ -745,6 +758,7 @@ static Bool MVTDeleteOverlapping(Bool *deleteReturn, void *element, AVER(deleteReturn != NULL); AVER(element != NULL); AVER(closureP != NULL); + AVER(closureS == UNUSED_SIZE); UNUSED(closureS); oldRange = element; @@ -790,8 +804,8 @@ overflow: } -/* MVTInsert -- insert an address range into the CBS (or the Freelist - * if that fails) and update the ABQ accordingly. +/* MVTInsert -- insert an address range into the free lists and update + * the ABQ accordingly. */ static Res MVTInsert(MVT mvt, Addr base, Addr limit) { @@ -800,18 +814,9 @@ static Res MVTInsert(MVT mvt, Addr base, Addr limit) AVERT(MVT, mvt); AVER(base < limit); - - /* Attempt to flush the Freelist to the CBS to give maximum - * opportunities for coalescence. */ - FreelistFlushToCBS(MVTFreelist(mvt), MVTCBS(mvt)); RangeInit(&range, base, limit); - res = CBSInsert(&newRange, MVTCBS(mvt), &range); - if (ResIsAllocFailure(res)) { - /* CBS ran out of memory for splay nodes: add range to emergency - * free list instead. */ - res = FreelistInsert(&newRange, MVTFreelist(mvt), &range); - } + res = LandInsert(&newRange, MVTFailover(mvt), &range); if (res != ResOK) return res; @@ -820,7 +825,7 @@ static Res MVTInsert(MVT mvt, Addr base, Addr limit) * with ranges on the ABQ, so ensure that the corresponding ranges * are coalesced on the ABQ. */ - ABQIterate(MVTABQ(mvt), MVTDeleteOverlapping, &newRange, 0); + ABQIterate(MVTABQ(mvt), MVTDeleteOverlapping, &newRange, UNUSED_SIZE); (void)MVTReserve(mvt, &newRange); } @@ -828,8 +833,8 @@ static Res MVTInsert(MVT mvt, Addr base, Addr limit) } -/* MVTDelete -- delete an address range from the CBS and the Freelist, - * and update the ABQ accordingly. +/* MVTDelete -- delete an address range from the free lists, and + * update the ABQ accordingly. */ static Res MVTDelete(MVT mvt, Addr base, Addr limit) { @@ -840,27 +845,7 @@ static Res MVTDelete(MVT mvt, Addr base, Addr limit) AVER(base < limit); RangeInit(&range, base, limit); - res = CBSDelete(&rangeOld, MVTCBS(mvt), &range); - if (ResIsAllocFailure(res)) { - /* CBS ran out of memory for splay nodes, which must mean that - * there were fragments on both sides: see - * . Handle this by - * deleting the whole of rangeOld (which requires no - * allocation) and re-inserting the fragments. */ - RangeStruct rangeOld2; - res = CBSDelete(&rangeOld2, MVTCBS(mvt), &rangeOld); - AVER(res == ResOK); - AVER(RangesEqual(&rangeOld2, &rangeOld)); - AVER(RangeBase(&rangeOld) != base); - res = MVTInsert(mvt, RangeBase(&rangeOld), base); - AVER(res == ResOK); - AVER(RangeLimit(&rangeOld) != limit); - res = MVTInsert(mvt, limit, RangeLimit(&rangeOld)); - AVER(res == ResOK); - } else if (res == ResFAIL) { - /* Not found in the CBS: try the Freelist. */ - res = FreelistDelete(&rangeOld, MVTFreelist(mvt), &range); - } + res = LandDelete(&rangeOld, MVTFailover(mvt), &range); if (res != ResOK) return res; AVER(RangesNest(&rangeOld, &range)); @@ -869,7 +854,7 @@ static Res MVTDelete(MVT mvt, Addr base, Addr limit) * might be on the ABQ, so ensure it is removed. */ if (RangeSize(&rangeOld) >= mvt->reuseSize) - ABQIterate(MVTABQ(mvt), MVTDeleteOverlapping, &rangeOld, 0); + ABQIterate(MVTABQ(mvt), MVTDeleteOverlapping, &rangeOld, UNUSED_SIZE); /* There might be fragments at the left or the right of the deleted * range, and either might be big enough to go back on the ABQ. @@ -1034,16 +1019,16 @@ static Res MVTDescribe(Pool pool, mps_lib_FILE *stream, Count depth) NULL); if(res != ResOK) return res; - res = CBSDescribe(MVTCBS(mvt), stream, depth + 2); + res = LandDescribe(MVTCBS(mvt), stream, depth + 2); + if(res != ResOK) return res; + res = LandDescribe(MVTFreelist(mvt), stream, depth + 2); + if(res != ResOK) return res; + res = LandDescribe(MVTFailover(mvt), stream, depth + 2); if(res != ResOK) return res; - res = ABQDescribe(MVTABQ(mvt), (ABQDescribeElement)RangeDescribe, stream, depth + 2); if(res != ResOK) return res; - res = FreelistDescribe(MVTFreelist(mvt), stream, depth + 2); - if(res != ResOK) return res; - METER_WRITE(mvt->segAllocs, stream, depth + 2); METER_WRITE(mvt->segFrees, stream, depth + 2); METER_WRITE(mvt->bufferFills, stream, depth + 2); @@ -1220,13 +1205,20 @@ static Bool MVTReturnSegs(MVT mvt, Range range, Arena arena) } -/* MVTRefillCallback -- called from CBSIterate or FreelistIterate at - * the behest of MVTRefillABQIfEmpty +/* MVTRefillABQIfEmpty -- refill the ABQ from the free lists if it is + * empty. */ -static Bool MVTRefillCallback(MVT mvt, Range range) + +static Bool MVTRefillVisitor(Land land, Range range, + void *closureP, Size closureS) { - AVERT(ABQ, MVTABQ(mvt)); - AVERT(Range, range); + MVT mvt; + + AVERT(Land, land); + mvt = closureP; + AVERT(MVT, mvt); + AVER(closureS == UNUSED_SIZE); + UNUSED(closureS); if (RangeSize(range) < mvt->reuseSize) return TRUE; @@ -1235,80 +1227,54 @@ static Bool MVTRefillCallback(MVT mvt, Range range) return MVTReserve(mvt, range); } -static Bool MVTCBSRefillCallback(CBS cbs, Range range, - void *closureP, Size closureS) -{ - MVT mvt; - AVERT(CBS, cbs); - mvt = closureP; - AVERT(MVT, mvt); - UNUSED(closureS); - return MVTRefillCallback(mvt, range); -} - -static Bool MVTFreelistRefillCallback(Bool *deleteReturn, Range range, - void *closureP, Size closureS) -{ - MVT mvt; - mvt = closureP; - AVERT(MVT, mvt); - UNUSED(closureS); - AVER(deleteReturn != NULL); - *deleteReturn = FALSE; - return MVTRefillCallback(mvt, range); -} - -/* MVTRefillABQIfEmpty -- refill the ABQ from the CBS and the Freelist if - * it is empty - */ static void MVTRefillABQIfEmpty(MVT mvt, Size size) { AVERT(MVT, mvt); AVER(size > 0); /* If there have never been any overflows from the ABQ back to the - * CBS/Freelist, then there cannot be any blocks in the CBS/Freelist + * free lists, then there cannot be any blocks in the free lists * that are worth adding to the ABQ. So as an optimization, we don't * bother to look. */ if (mvt->abqOverflow && ABQIsEmpty(MVTABQ(mvt))) { mvt->abqOverflow = FALSE; METER_ACC(mvt->refills, size); - CBSIterate(MVTCBS(mvt), &MVTCBSRefillCallback, mvt, 0); - FreelistIterate(MVTFreelist(mvt), &MVTFreelistRefillCallback, mvt, 0); + /* The iteration stops if the ABQ overflows, so may finish or not. */ + (void)LandIterate(MVTFailover(mvt), MVTRefillVisitor, mvt, UNUSED_SIZE); } } -/* Closure for MVTContingencySearch */ -typedef struct MVTContigencyStruct *MVTContigency; +/* MVTContingencySearch -- search free lists for a block of a given size */ -typedef struct MVTContigencyStruct +typedef struct MVTContigencyClosureStruct { MVT mvt; - Bool found; RangeStruct range; Arena arena; Size min; /* meters */ Count steps; Count hardSteps; -} MVTContigencyStruct; +} MVTContigencyClosureStruct, *MVTContigencyClosure; - -/* MVTContingencyCallback -- called from CBSIterate or FreelistIterate - * at the behest of MVTContingencySearch. - */ -static Bool MVTContingencyCallback(MVTContigency cl, Range range) +static Bool MVTContingencyVisitor(Land land, Range range, + void *closureP, Size closureS) { MVT mvt; Size size; Addr base, limit; + MVTContigencyClosure cl; - AVER(cl != NULL); + AVERT(Land, land); + AVERT(Range, range); + AVER(closureP != NULL); + cl = closureP; mvt = cl->mvt; AVERT(MVT, mvt); - AVERT(Range, range); + AVER(closureS == UNUSED_SIZE); + UNUSED(closureS); base = RangeBase(range); limit = RangeLimit(range); @@ -1321,7 +1287,6 @@ static Bool MVTContingencyCallback(MVTContigency cl, Range range) /* verify that min will fit when seg-aligned */ if (size >= 2 * cl->min) { RangeInit(&cl->range, base, limit); - cl->found = TRUE; return FALSE; } @@ -1329,7 +1294,6 @@ static Bool MVTContingencyCallback(MVTContigency cl, Range range) cl->hardSteps++; if (MVTCheckFit(base, limit, cl->min, cl->arena)) { RangeInit(&cl->range, base, limit); - cl->found = TRUE; return FALSE; } @@ -1337,46 +1301,18 @@ static Bool MVTContingencyCallback(MVTContigency cl, Range range) return TRUE; } -static Bool MVTCBSContingencyCallback(CBS cbs, Range range, - void *closureP, Size closureS) -{ - MVTContigency cl = closureP; - UNUSED(cbs); - UNUSED(closureS); - return MVTContingencyCallback(cl, range); -} - -static Bool MVTFreelistContingencyCallback(Bool *deleteReturn, Range range, - void *closureP, Size closureS) -{ - MVTContigency cl = closureP; - UNUSED(closureS); - AVER(deleteReturn != NULL); - *deleteReturn = FALSE; - return MVTContingencyCallback(cl, range); -} - -/* MVTContingencySearch -- search the CBS and the Freelist for a block - * of size min */ - static Bool MVTContingencySearch(Addr *baseReturn, Addr *limitReturn, MVT mvt, Size min) { - MVTContigencyStruct cls; + MVTContigencyClosureStruct cls; cls.mvt = mvt; - cls.found = FALSE; cls.arena = PoolArena(MVT2Pool(mvt)); cls.min = min; cls.steps = 0; cls.hardSteps = 0; - FreelistFlushToCBS(MVTFreelist(mvt), MVTCBS(mvt)); - - CBSIterate(MVTCBS(mvt), MVTCBSContingencyCallback, (void *)&cls, 0); - FreelistIterate(MVTFreelist(mvt), MVTFreelistContingencyCallback, - (void *)&cls, 0); - if (!cls.found) + if (LandIterate(MVTFailover(mvt), MVTContingencyVisitor, &cls, UNUSED_SIZE)) return FALSE; AVER(RangeSize(&cls.range) >= min); @@ -1393,6 +1329,7 @@ static Bool MVTContingencySearch(Addr *baseReturn, Addr *limitReturn, /* MVTCheckFit -- verify that segment-aligned block of size min can * fit in a candidate address range. */ + static Bool MVTCheckFit(Addr base, Addr limit, Size min, Arena arena) { Seg seg; @@ -1422,12 +1359,10 @@ static Bool MVTCheckFit(Addr base, Addr limit, Size min, Arena arena) /* Return the CBS of an MVT pool for the benefit of fotest.c. */ -extern CBS _mps_mvt_cbs(mps_pool_t); -CBS _mps_mvt_cbs(mps_pool_t mps_pool) { - Pool pool; +extern Land _mps_mvt_cbs(Pool); +Land _mps_mvt_cbs(Pool pool) { MVT mvt; - pool = (Pool)mps_pool; AVERT(Pool, pool); mvt = Pool2MVT(pool); AVERT(MVT, mvt); diff --git a/mps/code/poolmvff.c b/mps/code/poolmvff.c index d986003b051..e3974d79d7b 100644 --- a/mps/code/poolmvff.c +++ b/mps/code/poolmvff.c @@ -21,6 +21,7 @@ #include "mpscmvff.h" #include "dbgpool.h" #include "cbs.h" +#include "failover.h" #include "freelist.h" #include "mpm.h" @@ -47,9 +48,9 @@ typedef struct MVFFStruct { /* MVFF pool outer structure */ Size minSegSize; /* minimum size of segment */ Size avgSize; /* client estimate of allocation size */ Size total; /* total bytes in pool */ - Size free; /* total free bytes in pool */ CBSStruct cbsStruct; /* free list */ FreelistStruct flStruct; /* emergency free list */ + FailoverStruct foStruct; /* fail-over mechanism */ Bool firstFit; /* as opposed to last fit */ Bool slotHigh; /* prefers high part of large block */ Sig sig; /* */ @@ -58,10 +59,9 @@ typedef struct MVFFStruct { /* MVFF pool outer structure */ #define Pool2MVFF(pool) PARENT(MVFFStruct, poolStruct, pool) #define MVFF2Pool(mvff) (&((mvff)->poolStruct)) -#define CBSOfMVFF(mvff) (&((mvff)->cbsStruct)) -#define MVFFOfCBS(cbs) PARENT(MVFFStruct, cbsStruct, cbs) -#define FreelistOfMVFF(mvff) (&((mvff)->flStruct)) -#define MVFFOfFreelist(fl) PARENT(MVFFStruct, flStruct, fl) +#define CBSOfMVFF(mvff) (&((mvff)->cbsStruct.landStruct)) +#define FreelistOfMVFF(mvff) (&((mvff)->flStruct.landStruct)) +#define FailoverOfMVFF(mvff) (&((mvff)->foStruct.landStruct)) static Bool MVFFCheck(MVFF mvff); @@ -80,48 +80,29 @@ typedef MVFFDebugStruct *MVFFDebug; #define MVFFDebug2MVFF(mvffd) (&((mvffd)->mvffStruct)) -/* MVFFAddToFreeList -- Add given range to free list +/* MVFFInsert -- add given range to free lists * - * Updates MVFF counters for additional free space. Returns maximally - * coalesced range containing given range. Does not attempt to free - * segments (see MVFFFreeSegs). + * Updates rangeIO to be maximally coalesced range containing given + * range. Does not attempt to free segments (see MVFFFreeSegs). */ -static Res MVFFAddToFreeList(Addr *baseIO, Addr *limitIO, MVFF mvff) { - Res res; - RangeStruct range, newRange; - - AVER(baseIO != NULL); - AVER(limitIO != NULL); +static Res MVFFInsert(Range rangeIO, MVFF mvff) { + AVERT(Range, rangeIO); AVERT(MVFF, mvff); - RangeInit(&range, *baseIO, *limitIO); - res = CBSInsert(&newRange, CBSOfMVFF(mvff), &range); - if (ResIsAllocFailure(res)) { - /* CBS ran out of memory for splay nodes: add range to emergency - * free list instead. */ - res = FreelistInsert(&newRange, FreelistOfMVFF(mvff), &range); - } - - if (res == ResOK) { - mvff->free += RangeSize(&range); - *baseIO = RangeBase(&newRange); - *limitIO = RangeLimit(&newRange); - } - - return res; + return LandInsert(rangeIO, FailoverOfMVFF(mvff), rangeIO); } -/* MVFFFreeSegs -- Free segments from given range +/* MVFFFreeSegs -- free segments from given range * - * Given a free range, attempts to find entire segments within - * it, and returns them to the arena, updating total size counter. + * Given a free range, attempts to find entire segments within it, and + * returns them to the arena, updating total size counter. * - * This is usually called immediately after MVFFAddToFreeList. - * It is not combined with MVFFAddToFreeList because the latter - * is also called when new segments are added under MVFFAlloc. + * This is usually called immediately after MVFFInsert. It is not + * combined with MVFFInsert because the latter is also called when new + * segments are added under MVFFAlloc. */ -static void MVFFFreeSegs(MVFF mvff, Addr base, Addr limit) +static void MVFFFreeSegs(MVFF mvff, Range range) { Seg seg = NULL; /* suppress "may be used uninitialized" */ Arena arena; @@ -131,72 +112,42 @@ static void MVFFFreeSegs(MVFF mvff, Addr base, Addr limit) Res res; AVERT(MVFF, mvff); - AVER(base < limit); + AVERT(Range, range); /* Could profitably AVER that the given range is free, */ /* but the CBS doesn't provide that facility. */ - if (AddrOffset(base, limit) < mvff->minSegSize) + if (RangeSize(range) < mvff->minSegSize) return; /* not large enough for entire segments */ arena = PoolArena(MVFF2Pool(mvff)); - b = SegOfAddr(&seg, arena, base); + b = SegOfAddr(&seg, arena, RangeBase(range)); AVER(b); segBase = SegBase(seg); segLimit = SegLimit(seg); - while(segLimit <= limit) { /* segment ends in range */ - if (segBase >= base) { /* segment starts in range */ - RangeStruct range, oldRange; - RangeInit(&range, segBase, segLimit); - - res = CBSDelete(&oldRange, CBSOfMVFF(mvff), &range); - if (res == ResOK) { - mvff->free -= RangeSize(&range); - } else if (ResIsAllocFailure(res)) { - /* CBS ran out of memory for splay nodes, which must mean that - * there were fragments on both sides: see - * . Handle this by - * deleting the whole of oldRange (which requires no - * allocation) and re-inserting the fragments. */ - RangeStruct oldRange2; - res = CBSDelete(&oldRange2, CBSOfMVFF(mvff), &oldRange); - AVER(res == ResOK); - AVER(RangesEqual(&oldRange2, &oldRange)); - mvff->free -= RangeSize(&oldRange); - AVER(RangeBase(&oldRange) != segBase); - { - Addr leftBase = RangeBase(&oldRange); - Addr leftLimit = segBase; - res = MVFFAddToFreeList(&leftBase, &leftLimit, mvff); - } - AVER(RangeLimit(&oldRange) != segLimit); - { - Addr rightBase = segLimit; - Addr rightLimit = RangeLimit(&oldRange); - res = MVFFAddToFreeList(&rightBase, &rightLimit, mvff); - } - } else if (res == ResFAIL) { - /* Not found in the CBS: must be found in the Freelist. */ - res = FreelistDelete(&oldRange, FreelistOfMVFF(mvff), &range); - AVER(res == ResOK); - mvff->free -= RangeSize(&range); - } + while(segLimit <= RangeLimit(range)) { /* segment ends in range */ + if (segBase >= RangeBase(range)) { /* segment starts in range */ + RangeStruct delRange, oldRange; + RangeInit(&delRange, segBase, segLimit); + res = LandDelete(&oldRange, FailoverOfMVFF(mvff), &delRange); AVER(res == ResOK); - AVER(RangesNest(&oldRange, &range)); + AVER(RangesNest(&oldRange, &delRange)); /* Can't free the segment earlier, because if it was on the * Freelist rather than the CBS then it likely contains data * that needs to be read in order to update the Freelist. */ SegFree(seg); - mvff->total -= RangeSize(&range); + + AVER(mvff->total >= RangeSize(&delRange)); + mvff->total -= RangeSize(&delRange); } - /* Avoid calling SegNext if the next segment would fail */ + /* Avoid calling SegFindAboveAddr if the next segment would fail */ /* the loop test, mainly because there might not be a */ /* next segment. */ - if (segLimit == limit) /* segment ends at end of range */ + if (segLimit == RangeLimit(range)) /* segment ends at end of range */ break; b = SegFindAboveAddr(&seg, arena, segBase); @@ -212,8 +163,8 @@ static void MVFFFreeSegs(MVFF mvff, Addr base, Addr limit) /* MVFFAddSeg -- Allocates a new segment from the arena * * Allocates a new segment from the arena (with the given - * withReservoirPermit flag) of at least the specified size. The - * specified size should be pool-aligned. Adds it to the free list. + * withReservoirPermit flag) of at least the specified size. The + * specified size should be pool-aligned. Adds it to the free lists. */ static Res MVFFAddSeg(Seg *segReturn, MVFF mvff, Size size, Bool withReservoirPermit) @@ -224,7 +175,7 @@ static Res MVFFAddSeg(Seg *segReturn, Seg seg; Res res; Align align; - Addr base, limit; + RangeStruct range; AVERT(MVFF, mvff); AVER(size > 0); @@ -259,12 +210,11 @@ static Res MVFFAddSeg(Seg *segReturn, } mvff->total += segSize; - base = SegBase(seg); - limit = AddrAdd(base, segSize); - DebugPoolFreeSplat(pool, base, limit); - res = MVFFAddToFreeList(&base, &limit, mvff); + RangeInitSize(&range, SegBase(seg), segSize); + DebugPoolFreeSplat(pool, RangeBase(&range), RangeLimit(&range)); + res = MVFFInsert(&range, mvff); AVER(res == ResOK); - AVER(base <= SegBase(seg)); + AVER(RangeBase(&range) <= SegBase(seg)); if (mvff->minSegSize > segSize) mvff->minSegSize = segSize; /* Don't call MVFFFreeSegs; that would be silly. */ @@ -274,50 +224,32 @@ static Res MVFFAddSeg(Seg *segReturn, } -/* MVFFFindFirstFree -- Finds the first (or last) suitable free block +/* MVFFFindFree -- find the first (or last) suitable free block * * Finds a free block of the given (pool aligned) size, according * to a first (or last) fit policy controlled by the MVFF fields * firstFit, slotHigh (for whether to allocate the top or bottom * portion of a larger block). * - * Will return FALSE if the free list has no large enough block. - * In particular, will not attempt to allocate a new segment. + * Will return FALSE if the free lists have no large enough block. In + * particular, will not attempt to allocate a new segment. */ -static Bool MVFFFindFirstFree(Addr *baseReturn, Addr *limitReturn, - MVFF mvff, Size size) +static Bool MVFFFindFree(Range rangeReturn, MVFF mvff, Size size) { Bool foundBlock; FindDelete findDelete; - RangeStruct range, oldRange; + RangeStruct oldRange; - AVER(baseReturn != NULL); - AVER(limitReturn != NULL); + AVER(rangeReturn != NULL); AVERT(MVFF, mvff); AVER(size > 0); AVER(SizeIsAligned(size, PoolAlignment(MVFF2Pool(mvff)))); - FreelistFlushToCBS(FreelistOfMVFF(mvff), CBSOfMVFF(mvff)); - findDelete = mvff->slotHigh ? FindDeleteHIGH : FindDeleteLOW; foundBlock = - (mvff->firstFit ? CBSFindFirst : CBSFindLast) - (&range, &oldRange, CBSOfMVFF(mvff), size, findDelete); - - if (!foundBlock) { - /* Failed to find a block in the CBS: try the emergency free list - * as well. */ - foundBlock = - (mvff->firstFit ? FreelistFindFirst : FreelistFindLast) - (&range, &oldRange, FreelistOfMVFF(mvff), size, findDelete); - } - - if (foundBlock) { - *baseReturn = RangeBase(&range); - *limitReturn = RangeLimit(&range); - mvff->free -= size; - } + (mvff->firstFit ? LandFindFirst : LandFindLast) + (rangeReturn, &oldRange, FailoverOfMVFF(mvff), size, findDelete); return foundBlock; } @@ -330,7 +262,7 @@ static Res MVFFAlloc(Addr *aReturn, Pool pool, Size size, { Res res; MVFF mvff; - Addr base, limit; + RangeStruct range; Bool foundBlock; AVERT(Pool, pool); @@ -343,29 +275,28 @@ static Res MVFFAlloc(Addr *aReturn, Pool pool, Size size, size = SizeAlignUp(size, PoolAlignment(pool)); - foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size); + foundBlock = MVFFFindFree(&range, mvff, size); if (!foundBlock) { Seg seg; res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit); if (res != ResOK) return res; - foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size); + foundBlock = MVFFFindFree(&range, mvff, size); /* We know that the found range must intersect the new segment. */ /* In particular, it doesn't necessarily lie entirely within it. */ - /* The next three AVERs test for intersection of two intervals. */ - AVER(base >= SegBase(seg) || limit <= SegLimit(seg)); - AVER(base < SegLimit(seg)); - AVER(SegBase(seg) < limit); + /* The next two AVERs test for intersection of two intervals. */ + AVER(RangeBase(&range) < SegLimit(seg)); + AVER(SegBase(seg) < RangeLimit(&range)); /* We also know that the found range is no larger than the segment. */ - AVER(SegSize(seg) >= AddrOffset(base, limit)); + AVER(SegSize(seg) >= RangeSize(&range)); } AVER(foundBlock); - AVER(AddrOffset(base, limit) == size); + AVER(RangeSize(&range) == size); - *aReturn = base; + *aReturn = RangeBase(&range); return ResOK; } @@ -376,7 +307,7 @@ static Res MVFFAlloc(Addr *aReturn, Pool pool, Size size, static void MVFFFree(Pool pool, Addr old, Size size) { Res res; - Addr base, limit; + RangeStruct range; MVFF mvff; AVERT(Pool, pool); @@ -387,42 +318,16 @@ static void MVFFFree(Pool pool, Addr old, Size size) AVER(AddrIsAligned(old, PoolAlignment(pool))); AVER(size > 0); - size = SizeAlignUp(size, PoolAlignment(pool)); - base = old; - limit = AddrAdd(base, size); + RangeInitSize(&range, old, SizeAlignUp(size, PoolAlignment(pool))); - res = MVFFAddToFreeList(&base, &limit, mvff); + res = MVFFInsert(&range, mvff); AVER(res == ResOK); if (res == ResOK) - MVFFFreeSegs(mvff, base, limit); + MVFFFreeSegs(mvff, &range); return; } -/* MVFFFindLargest -- call CBSFindLargest and then fall back to - * FreelistFindLargest if no block in the CBS was big enough. */ - -static Bool MVFFFindLargest(Range range, Range oldRange, MVFF mvff, - Size size, FindDelete findDelete) -{ - AVER(range != NULL); - AVER(oldRange != NULL); - AVERT(MVFF, mvff); - AVER(size > 0); - AVERT(FindDelete, findDelete); - - FreelistFlushToCBS(FreelistOfMVFF(mvff), CBSOfMVFF(mvff)); - - if (CBSFindLargest(range, oldRange, CBSOfMVFF(mvff), size, findDelete)) - return TRUE; - - if (FreelistFindLargest(range, oldRange, FreelistOfMVFF(mvff), - size, findDelete)) - return TRUE; - - return FALSE; -} - /* MVFFBufferFill -- Fill the buffer * @@ -447,18 +352,17 @@ static Res MVFFBufferFill(Addr *baseReturn, Addr *limitReturn, AVER(SizeIsAligned(size, PoolAlignment(pool))); AVERT(Bool, withReservoirPermit); - found = MVFFFindLargest(&range, &oldRange, mvff, size, FindDeleteENTIRE); + found = LandFindLargest(&range, &oldRange, FailoverOfMVFF(mvff), size, FindDeleteENTIRE); if (!found) { - /* Add a new segment to the free list and try again. */ + /* Add a new segment to the free lists and try again. */ res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit); if (res != ResOK) return res; - found = MVFFFindLargest(&range, &oldRange, mvff, size, FindDeleteENTIRE); + found = LandFindLargest(&range, &oldRange, FailoverOfMVFF(mvff), size, FindDeleteENTIRE); } AVER(found); AVER(RangeSize(&range) >= size); - mvff->free -= RangeSize(&range); *baseReturn = RangeBase(&range); *limitReturn = RangeLimit(&range); @@ -473,21 +377,22 @@ static void MVFFBufferEmpty(Pool pool, Buffer buffer, { Res res; MVFF mvff; + RangeStruct range; AVERT(Pool, pool); mvff = Pool2MVFF(pool); AVERT(MVFF, mvff); AVERT(Buffer, buffer); AVER(BufferIsReady(buffer)); - AVER(base <= limit); + RangeInit(&range, base, limit); - if (base == limit) + if (RangeIsEmpty(&range)) return; - res = MVFFAddToFreeList(&base, &limit, mvff); + res = MVFFInsert(&range, mvff); AVER(res == ResOK); if (res == ResOK) - MVFFFreeSegs(mvff, base, limit); + MVFFFreeSegs(mvff, &range); return; } @@ -531,7 +436,7 @@ static Res MVFFInit(Pool pool, ArgList args) { Size extendBy = MVFF_EXTEND_BY_DEFAULT; Size avgSize = MVFF_AVG_SIZE_DEFAULT; - Size align = MVFF_ALIGN_DEFAULT; + Align align = MVFF_ALIGN_DEFAULT; Bool slotHigh = MVFF_SLOT_HIGH_DEFAULT; Bool arenaHigh = MVFF_ARENA_HIGH_DEFAULT; Bool firstFit = MVFF_FIRST_FIT_DEFAULT; @@ -570,7 +475,12 @@ static Res MVFFInit(Pool pool, ArgList args) AVER(extendBy > 0); /* .arg.check */ AVER(avgSize > 0); /* .arg.check */ AVER(avgSize <= extendBy); /* .arg.check */ - AVER(SizeIsAligned(align, MPS_PF_ALIGN)); + AVERT(Align, align); + /* This restriction on the alignment is necessary because of the use + * of a Freelist to store the free address ranges in low-memory + * situations. . + */ + AVER(AlignIsAligned(align, FreelistMinimumAlignment)); AVERT(Bool, slotHigh); AVERT(Bool, arenaHigh); AVERT(Bool, firstFit); @@ -596,16 +506,25 @@ static Res MVFFInit(Pool pool, ArgList args) SegPrefExpress(mvff->segPref, arenaHigh ? SegPrefHigh : SegPrefLow, NULL); mvff->total = 0; - mvff->free = 0; - res = FreelistInit(FreelistOfMVFF(mvff), align); + res = LandInit(FreelistOfMVFF(mvff), FreelistLandClassGet(), arena, align, + mvff, mps_args_none); if (res != ResOK) - goto failInit; + goto failFreelistInit; - res = CBSInit(CBSOfMVFF(mvff), arena, (void *)mvff, align, - /* fastFind */ TRUE, /* zoned */ FALSE, args); + res = LandInit(CBSOfMVFF(mvff), CBSFastLandClassGet(), arena, align, mvff, + mps_args_none); if (res != ResOK) - goto failInit; + goto failCBSInit; + + MPS_ARGS_BEGIN(foArgs) { + MPS_ARGS_ADD(foArgs, FailoverPrimary, CBSOfMVFF(mvff)); + MPS_ARGS_ADD(foArgs, FailoverSecondary, FreelistOfMVFF(mvff)); + res = LandInit(FailoverOfMVFF(mvff), FailoverLandClassGet(), arena, align, + mvff, foArgs); + } MPS_ARGS_END(foArgs); + if (res != ResOK) + goto failFailoverInit; mvff->sig = MVFFSig; AVERT(MVFF, mvff); @@ -613,7 +532,11 @@ static Res MVFFInit(Pool pool, ArgList args) BOOLOF(slotHigh), BOOLOF(arenaHigh), BOOLOF(firstFit)); return ResOK; -failInit: +failFailoverInit: + LandFinish(CBSOfMVFF(mvff)); +failCBSInit: + LandFinish(FreelistOfMVFF(mvff)); +failFreelistInit: ControlFree(arena, p, sizeof(SegPrefStruct)); return res; } @@ -625,7 +548,6 @@ static void MVFFFinish(Pool pool) { MVFF mvff; Arena arena; - Seg seg; Ring ring, node, nextNode; AVERT(Pool, pool); @@ -634,20 +556,24 @@ static void MVFFFinish(Pool pool) ring = PoolSegRing(pool); RING_FOR(node, ring, nextNode) { + Size size; + Seg seg; seg = SegOfPoolRing(node); AVER(SegPool(seg) == pool); + size = AddrOffset(SegBase(seg), SegLimit(seg)); + AVER(size <= mvff->total); + mvff->total -= size; SegFree(seg); } - /* Could maintain mvff->total here and check it falls to zero, */ - /* but that would just make the function slow. If only we had */ - /* a way to do operations only if AVERs are turned on. */ + AVER(mvff->total == 0); arena = PoolArena(pool); ControlFree(arena, mvff->segPref, sizeof(SegPrefStruct)); - CBSFinish(CBSOfMVFF(mvff)); - FreelistFinish(FreelistOfMVFF(mvff)); + LandFinish(FailoverOfMVFF(mvff)); + LandFinish(FreelistOfMVFF(mvff)); + LandFinish(CBSOfMVFF(mvff)); mvff->sig = SigInvalid; } @@ -686,16 +612,15 @@ static Res MVFFDescribe(Pool pool, mps_lib_FILE *stream, Count depth) " extendBy $W\n", (WriteFW)mvff->extendBy, " avgSize $W\n", (WriteFW)mvff->avgSize, " total $U\n", (WriteFU)mvff->total, - " free $U\n", (WriteFU)mvff->free, NULL); if (res != ResOK) return res; - res = CBSDescribe(CBSOfMVFF(mvff), stream, depth + 2); + res = LandDescribe(CBSOfMVFF(mvff), stream, depth + 2); if (res != ResOK) return res; - res = FreelistDescribe(FreelistOfMVFF(mvff), stream, depth + 2); + res = LandDescribe(FreelistOfMVFF(mvff), stream, depth + 2); if (res != ResOK) return res; @@ -707,7 +632,7 @@ static Res MVFFDescribe(Pool pool, mps_lib_FILE *stream, Count depth) DEFINE_POOL_CLASS(MVFFPoolClass, this) { - INHERIT_CLASS(this, AbstractAllocFreePoolClass); + INHERIT_CLASS(this, AbstractPoolClass); PoolClassMixInBuffer(this); this->name = "MVFF"; this->size = sizeof(MVFFStruct); @@ -764,13 +689,15 @@ size_t mps_mvff_free_size(mps_pool_t mps_pool) { Pool pool; MVFF mvff; + Land land; pool = (Pool)mps_pool; AVERT(Pool, pool); mvff = Pool2MVFF(pool); AVERT(MVFF, mvff); + land = FailoverOfMVFF(mvff); - return (size_t)mvff->free; + return (size_t)LandSize(land); } /* Total owned bytes. See */ @@ -802,11 +729,11 @@ static Bool MVFFCheck(MVFF mvff) CHECKL(mvff->minSegSize >= ArenaAlign(PoolArena(MVFF2Pool(mvff)))); CHECKL(mvff->avgSize > 0); /* see .arg.check */ CHECKL(mvff->avgSize <= mvff->extendBy); /* see .arg.check */ - CHECKL(mvff->total >= mvff->free); - CHECKL(SizeIsAligned(mvff->free, PoolAlignment(MVFF2Pool(mvff)))); CHECKL(SizeIsAligned(mvff->total, ArenaAlign(PoolArena(MVFF2Pool(mvff))))); - CHECKD(CBS, CBSOfMVFF(mvff)); - CHECKD(Freelist, FreelistOfMVFF(mvff)); + CHECKD(CBS, &mvff->cbsStruct); + CHECKD(Freelist, &mvff->flStruct); + CHECKD(Failover, &mvff->foStruct); + CHECKL(mvff->total >= LandSize(FailoverOfMVFF(mvff))); CHECKL(BoolCheck(mvff->slotHigh)); CHECKL(BoolCheck(mvff->firstFit)); return TRUE; @@ -815,12 +742,10 @@ static Bool MVFFCheck(MVFF mvff) /* Return the CBS of an MVFF pool for the benefit of fotest.c. */ -extern CBS _mps_mvff_cbs(mps_pool_t); -CBS _mps_mvff_cbs(mps_pool_t mps_pool) { - Pool pool; +extern Land _mps_mvff_cbs(Pool); +Land _mps_mvff_cbs(Pool pool) { MVFF mvff; - pool = (Pool)mps_pool; AVERT(Pool, pool); mvff = Pool2MVFF(pool); AVERT(MVFF, mvff); diff --git a/mps/code/pooln.c b/mps/code/pooln.c index 226d1bfae28..a9c5c2a9f0f 100644 --- a/mps/code/pooln.c +++ b/mps/code/pooln.c @@ -271,7 +271,7 @@ DEFINE_POOL_CLASS(NPoolClass, this) this->name = "N"; this->size = sizeof(PoolNStruct); this->offset = offsetof(PoolNStruct, poolStruct); - this->attr |= (AttrALLOC | AttrBUF | AttrFREE | AttrGC | AttrSCAN); + this->attr |= AttrGC; this->init = NInit; this->finish = NFinish; this->alloc = NAlloc; diff --git a/mps/code/protan.c b/mps/code/protan.c index 3709e88be59..0e8abf8ac7e 100644 --- a/mps/code/protan.c +++ b/mps/code/protan.c @@ -50,16 +50,14 @@ void ProtSync(Arena arena) synced = TRUE; if (SegFirst(&seg, arena)) { - Addr base; do { - base = SegBase(seg); if (SegPM(seg) != AccessSetEMPTY) { /* */ ShieldEnter(arena); TraceSegAccess(arena, seg, SegPM(seg)); ShieldLeave(arena); synced = FALSE; } - } while(SegNext(&seg, arena, base)); + } while(SegNext(&seg, arena, seg)); } } while(!synced); } diff --git a/mps/code/protix.c b/mps/code/protix.c index 854bfc98174..5b310333e84 100644 --- a/mps/code/protix.c +++ b/mps/code/protix.c @@ -44,9 +44,6 @@ #if !defined(MPS_OS_LI) && !defined(MPS_OS_FR) && !defined(MPS_OS_XC) #error "protix.c is Unix-specific, currently for MPS_OS_LI FR XC" #endif -#ifndef PROTECTION -#error "protix.c implements protection, but PROTECTION is not set" -#endif #include #include diff --git a/mps/code/protli.c b/mps/code/protli.c index ba48cab7f51..dc38154f2b7 100644 --- a/mps/code/protli.c +++ b/mps/code/protli.c @@ -16,9 +16,6 @@ #ifndef MPS_OS_LI #error "protli.c is Linux-specific, but MPS_OS_LI is not set" #endif -#ifndef PROTECTION -#error "protli.c implements protection, but PROTECTION is not set" -#endif #include #include diff --git a/mps/code/protsgix.c b/mps/code/protsgix.c index 39f19c90b4c..e587ac86424 100644 --- a/mps/code/protsgix.c +++ b/mps/code/protsgix.c @@ -24,9 +24,6 @@ #if defined(MPS_OS_XC) && defined(MPS_ARCH_PP) #error "protsgix.c does not work on Darwin on PowerPC. Use protxcpp.c" #endif -#ifndef PROTECTION -#error "protsgix.c implements protection, but PROTECTION is not set" -#endif #include /* for many functions */ #include /* for getpid */ diff --git a/mps/code/protw3.c b/mps/code/protw3.c index 84ef680ffb9..4ff3e85da42 100644 --- a/mps/code/protw3.c +++ b/mps/code/protw3.c @@ -12,9 +12,6 @@ #ifndef MPS_OS_W3 #error "protw3.c is Win32-specific, but MPS_OS_W3 is not set" #endif -#ifndef PROTECTION -#error "protw3.c implements protection, but PROTECTION is not set" -#endif #include "mpswin.h" diff --git a/mps/code/protxc.c b/mps/code/protxc.c index 1d2ae27917c..7e8f230d061 100644 --- a/mps/code/protxc.c +++ b/mps/code/protxc.c @@ -76,9 +76,6 @@ #if !defined(MPS_OS_XC) #error "protxc.c is OS X specific" #endif -#if !defined(PROTECTION) -#error "protxc.c implements protection, but PROTECTION is not defined" -#endif SRCID(protxc, "$Id$"); diff --git a/mps/code/qs.c b/mps/code/qs.c index 50fe8c48723..2a62a5ef71a 100644 --- a/mps/code/qs.c +++ b/mps/code/qs.c @@ -367,6 +367,7 @@ static void *go(void *p, size_t s) qsort(list, listl, sizeof(mps_word_t), &compare); validate(); + mps_arena_park(arena); mps_root_destroy(regroot); mps_root_destroy(actroot); mps_ap_destroy(ap); @@ -374,6 +375,7 @@ static void *go(void *p, size_t s) mps_pool_destroy(mpool); mps_chain_destroy(chain); mps_fmt_destroy(format); + mps_arena_release(arena); return NULL; } @@ -527,6 +529,7 @@ int main(int argc, char *argv[]) die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), "mps_arena_create"); + mps_tramp(&r, &go, NULL, 0); mps_arena_destroy(arena); diff --git a/mps/code/range.c b/mps/code/range.c index 6faf1d254f4..8b8f1c8bf8a 100644 --- a/mps/code/range.c +++ b/mps/code/range.c @@ -15,7 +15,6 @@ SRCID(range, "$Id$"); Bool RangeCheck(Range range) { - CHECKS(Range, range); CHECKL(range->base <= range->limit); return TRUE; @@ -29,14 +28,17 @@ void RangeInit(Range range, Addr base, Addr limit) range->base = base; range->limit = limit; - range->sig = RangeSig; AVERT(Range, range); } +void RangeInitSize(Range range, Addr base, Size size) +{ + RangeInit(range, base, AddrAdd(base, size)); +} + void RangeFinish(Range range) { AVERT(Range, range); - range->sig = SigInvalid; } Res RangeDescribe(Range range, mps_lib_FILE *stream, Count depth) diff --git a/mps/code/range.h b/mps/code/range.h index 541fa5ff574..ac262c98c1a 100644 --- a/mps/code/range.h +++ b/mps/code/range.h @@ -14,15 +14,8 @@ #include "mpmtypes.h" -/* Signatures */ - -#define RangeSig ((Sig)0x5196A493) /* SIGnature RANGE */ - - /* Prototypes */ -typedef struct RangeStruct *Range; - #define RangeBase(range) ((range)->base) #define RangeLimit(range) ((range)->limit) #define RangeSize(range) (AddrOffset(RangeBase(range), RangeLimit(range))) @@ -30,6 +23,7 @@ typedef struct RangeStruct *Range; #define RangeIsEmpty(range) (RangeSize(range) == 0) extern void RangeInit(Range range, Addr base, Addr limit); +extern void RangeInitSize(Range range, Addr base, Size size); extern void RangeFinish(Range range); extern Res RangeDescribe(Range range, mps_lib_FILE *stream, Count depth); extern Bool RangeCheck(Range range); @@ -46,7 +40,6 @@ extern void RangeCopy(Range to, Range from); /* Types */ typedef struct RangeStruct { - Sig sig; Addr base; Addr limit; } RangeStruct; diff --git a/mps/code/sac.c b/mps/code/sac.c index c85956c8f82..58fca2b33c3 100644 --- a/mps/code/sac.c +++ b/mps/code/sac.c @@ -117,10 +117,10 @@ Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount, /* to be large enough, but that gets complicated, if you have to */ /* merge classes because of the adjustment. */ for (i = 0; i < classesCount; ++i) { - AVER(classes[i]._block_size > 0); - AVER(SizeIsAligned(classes[i]._block_size, PoolAlignment(pool))); - AVER(prevSize < classes[i]._block_size); - prevSize = classes[i]._block_size; + AVER(classes[i].mps_block_size > 0); + AVER(SizeIsAligned(classes[i].mps_block_size, PoolAlignment(pool))); + AVER(prevSize < classes[i].mps_block_size); + prevSize = classes[i].mps_block_size; /* no restrictions on count */ /* no restrictions on frequency */ } @@ -128,7 +128,7 @@ Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount, /* Calculate frequency scale */ for (i = 0; i < classesCount; ++i) { unsigned oldFreq = totalFreq; - totalFreq += classes[i]._frequency; + totalFreq += classes[i].mps_frequency; AVER(oldFreq <= totalFreq); /* check for overflow */ UNUSED(oldFreq); /* */ } @@ -136,10 +136,10 @@ Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount, /* Find middle one */ totalFreq /= 2; for (i = 0; i < classesCount; ++i) { - if (totalFreq < classes[i]._frequency) break; - totalFreq -= classes[i]._frequency; + if (totalFreq < classes[i].mps_frequency) break; + totalFreq -= classes[i].mps_frequency; } - if (totalFreq <= classes[i]._frequency / 2) + if (totalFreq <= classes[i].mps_frequency / 2) middleIndex = i; else middleIndex = i + 1; /* there must exist another class at i+1 */ @@ -155,9 +155,9 @@ Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount, /* It's important this matches SACFind. */ esac = ExternalSACOfSAC(sac); for (j = middleIndex + 1, i = 0; j < classesCount; ++j, i += 2) { - esac->_freelists[i]._size = classes[j]._block_size; + esac->_freelists[i]._size = classes[j].mps_block_size; esac->_freelists[i]._count = 0; - esac->_freelists[i]._count_max = classes[j]._cached_count; + esac->_freelists[i]._count_max = classes[j].mps_cached_count; esac->_freelists[i]._blocks = NULL; } esac->_freelists[i]._size = SizeMAX; @@ -165,19 +165,19 @@ Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount, esac->_freelists[i]._count_max = 0; esac->_freelists[i]._blocks = NULL; for (j = middleIndex, i = 1; j > 0; --j, i += 2) { - esac->_freelists[i]._size = classes[j-1]._block_size; + esac->_freelists[i]._size = classes[j-1].mps_block_size; esac->_freelists[i]._count = 0; - esac->_freelists[i]._count_max = classes[j]._cached_count; + esac->_freelists[i]._count_max = classes[j].mps_cached_count; esac->_freelists[i]._blocks = NULL; } esac->_freelists[i]._size = 0; esac->_freelists[i]._count = 0; - esac->_freelists[i]._count_max = classes[j]._cached_count; + esac->_freelists[i]._count_max = classes[j].mps_cached_count; esac->_freelists[i]._blocks = NULL; /* finish init */ esac->_trapped = FALSE; - esac->_middle = classes[middleIndex]._block_size; + esac->_middle = classes[middleIndex].mps_block_size; sac->pool = pool; sac->classesCount = classesCount; sac->middleIndex = middleIndex; diff --git a/mps/code/sacss.c b/mps/code/sacss.c index ee86c83989d..d65dca43aba 100644 --- a/mps/code/sacss.c +++ b/mps/code/sacss.c @@ -7,6 +7,7 @@ #include "mpscmv.h" #include "mpscmvff.h" +#include "mpscmfs.h" #include "mpslib.h" #include "mpsavm.h" #include "mps.h" @@ -15,9 +16,7 @@ #include "mpslib.h" #include -#include "mpstd.h" #include -#include #include @@ -28,9 +27,6 @@ #define testSetSIZE 200 #define testLOOPS 10 -#define topClassSIZE 0xA00 -#define classCOUNT 4 - /* make -- allocate an object */ @@ -45,25 +41,36 @@ static mps_res_t make(mps_addr_t *p, mps_sac_t sac, size_t size) /* stress -- create a pool of the requested type and allocate in it */ -static mps_res_t stress(mps_class_t class, - size_t classes_count, mps_sac_classes_s *classes, - size_t (*size)(size_t i), mps_arena_t arena, ...) +static mps_res_t stress(mps_arena_t arena, mps_align_t align, + size_t (*size)(size_t i), + const char *name, mps_class_t pool_class, + mps_arg_s *args) { mps_res_t res; mps_pool_t pool; mps_sac_t sac; - va_list arg; size_t i, k; int *ps[testSetSIZE]; size_t ss[testSetSIZE]; + mps_sac_classes_s classes[4] = { + {1, 1, 1}, + {2, 1, 2}, + {16, 9, 5}, + {100, 9, 4}, + }; + size_t classes_count = sizeof classes / sizeof *classes; + for (i = 0; i < classes_count; ++i) { + classes[i].mps_block_size *= alignUp(align, sizeof(void *)); + } - va_start(arg, arena); - res = mps_pool_create_v(&pool, arena, class, arg); - va_end(arg); + printf("%s\n", name); + + res = mps_pool_create_k(&pool, arena, pool_class, args); if (res != MPS_RES_OK) return res; - die(mps_sac_create(&sac, pool, classes_count, classes), "SACCreate"); + die(mps_sac_create(&sac, pool, classes_count, classes), + "SACCreate"); /* allocate a load of objects */ for (i = 0; i < testSetSIZE; ++i) { @@ -125,9 +132,9 @@ static mps_res_t stress(mps_class_t class, } -/* randomSize8 -- produce sizes both latge and small */ +/* randomSize -- produce sizes both large and small */ -static size_t randomSize8(size_t i) +static size_t randomSize(size_t i) { size_t maxSize = 2 * 160 * 0x2000; size_t size; @@ -138,58 +145,97 @@ static size_t randomSize8(size_t i) } -/* testInArena -- test all the pool classes in the given arena */ +/* fixedSize -- produce always the same size */ + +static size_t fixedSizeSize = 0; + +static size_t fixedSize(size_t i) +{ + testlib_unused(i); + return fixedSizeSize; +} + static mps_pool_debug_option_s debugOptions = { - /* .fence_template = */ (const void *)"postpostpostpost", - /* .fence_size = */ MPS_PF_ALIGN, - /* .free_template = */ (const void *)"DEAD", + /* .fence_template = */ "post", + /* .fence_size = */ 4, + /* .free_template = */ "DEAD", /* .free_size = */ 4 }; -static mps_sac_classes_s classes[4] = { - {MPS_PF_ALIGN, 1, 1}, - {MPS_PF_ALIGN * 2, 1, 2}, - {128 + MPS_PF_ALIGN, 9, 5}, - {topClassSIZE, 9, 4} -}; -static void testInArena(mps_arena_t arena) +/* testInArena -- test all the pool classes in the given arena */ + +static void testInArena(mps_arena_class_t arena_class, mps_arg_s *arena_args) { - printf("MVFF\n\n"); - die(stress(mps_class_mvff(), classCOUNT, classes, randomSize8, arena, - (size_t)65536, (size_t)32, (mps_align_t)MPS_PF_ALIGN, TRUE, TRUE, TRUE), - "stress MVFF"); - printf("MV debug\n\n"); - die(stress(mps_class_mv_debug(), classCOUNT, classes, randomSize8, arena, - &debugOptions, (size_t)65536, (size_t)32, (size_t)65536), - "stress MV debug"); - printf("MV\n\n"); - die(stress(mps_class_mv(), classCOUNT, classes, randomSize8, arena, - (size_t)65536, (size_t)32, (size_t)65536), - "stress MV"); + mps_arena_t arena; + + die(mps_arena_create_k(&arena, arena_class, arena_args), + "mps_arena_create"); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = sizeof(void *) << (rnd() % 4); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE); + die(stress(arena, align, randomSize, "MVFF", mps_class_mvff(), args), + "stress MVFF"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = sizeof(void *) << (rnd() % 4); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_ARENA_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_SLOT_HIGH, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_MVFF_FIRST_FIT, TRUE); + MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, &debugOptions); + die(stress(arena, align, randomSize, "MVFF debug", + mps_class_mvff_debug(), args), + "stress MVFF debug"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = (mps_align_t)1 << (rnd() % 6); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + die(stress(arena, align, randomSize, "MV", mps_class_mv(), args), + "stress MV"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + mps_align_t align = (mps_align_t)1 << (rnd() % 6); + MPS_ARGS_ADD(args, MPS_KEY_ALIGN, align); + MPS_ARGS_ADD(args, MPS_KEY_POOL_DEBUG_OPTIONS, &debugOptions); + die(stress(arena, align, randomSize, "MV debug", + mps_class_mv_debug(), args), + "stress MV debug"); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + fixedSizeSize = MPS_PF_ALIGN * (1 + rnd() % 100); + MPS_ARGS_ADD(args, MPS_KEY_MFS_UNIT_SIZE, fixedSizeSize); + die(stress(arena, fixedSizeSize, fixedSize, "MFS", mps_class_mfs(), args), + "stress MFS"); + } MPS_ARGS_END(args); + + mps_arena_destroy(arena); } int main(int argc, char *argv[]) { - mps_arena_t arena; - testlib_init(argc, argv); - die(mps_arena_create(&arena, mps_arena_class_vm(), testArenaSIZE), - "mps_arena_create"); - testInArena(arena); - mps_arena_destroy(arena); + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); + testInArena(mps_arena_class_vm(), args); + } MPS_ARGS_END(args); MPS_ARGS_BEGIN(args) { MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); MPS_ARGS_ADD(args, MPS_KEY_ARENA_ZONED, FALSE); - die(mps_arena_create_k(&arena, mps_arena_class_vm(), args), - "mps_arena_create"); + testInArena(mps_arena_class_vm(), args); } MPS_ARGS_END(args); - testInArena(arena); - mps_arena_destroy(arena); printf("%s: Conclusion: Failed to find any defects.\n", argv[0]); return 0; diff --git a/mps/code/seg.c b/mps/code/seg.c index bfeaf84a8af..c4fcfc455a0 100644 --- a/mps/code/seg.c +++ b/mps/code/seg.c @@ -309,9 +309,12 @@ void SegSetSummary(Seg seg, RefSet summary) AVERT(Seg, seg); AVER(summary == RefSetEMPTY || SegRankSet(seg) != RankSetEMPTY); -#ifdef PROTECTION_NONE +#if defined(REMEMBERED_SET_NONE) + /* Without protection, we can't maintain the remembered set because + there are writes we don't know about. */ summary = RefSetUNIV; #endif + if (summary != SegSummary(seg)) seg->class->setSummary(seg, summary); } @@ -324,11 +327,12 @@ void SegSetRankAndSummary(Seg seg, RankSet rankSet, RefSet summary) AVERT(Seg, seg); AVERT(RankSet, rankSet); -#ifdef PROTECTION_NONE +#if defined(REMEMBERED_SET_NONE) if (rankSet != RankSetEMPTY) { summary = RefSetUNIV; } #endif + seg->class->setRankSummary(seg, rankSet, summary); } @@ -638,6 +642,10 @@ Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at, AVER(at < limit); AVERT(Bool, withReservoirPermit); + /* Can only split a buffered segment if the entire buffer is below + * the split point. */ + AVER(SegBuffer(seg) == NULL || BufferLimit(SegBuffer(seg)) <= at); + ShieldFlush(arena); /* see */ /* Allocate the new segment object from the control pool */ @@ -736,8 +744,6 @@ Bool SegCheck(Seg seg) CHECKL(seg->sm == AccessSetEMPTY); CHECKL(seg->pm == AccessSetEMPTY); } else { - /* Segments with ranks may only belong to scannable pools. */ - CHECKL(PoolHasAttr(pool, AttrSCAN)); /* : The Tracer only permits */ /* one rank per segment [ref?] so this field is either empty or a */ /* singleton. */ diff --git a/mps/code/segsmss.c b/mps/code/segsmss.c index 7efac5c64a5..20cedf67dd2 100644 --- a/mps/code/segsmss.c +++ b/mps/code/segsmss.c @@ -40,7 +40,6 @@ extern PoolClass AMSTPoolClassGet(void); typedef struct AMSTStruct { AMSStruct amsStruct; /* generic AMS structure */ - Chain chain; /* chain to use */ Bool failSegs; /* fail seg splits & merges when true */ Count splits; /* count of successful segment splits */ Count merges; /* count of successful segment merges */ @@ -335,25 +334,30 @@ static Res AMSTInit(Pool pool, ArgList args) Format format; Chain chain; Res res; - static GenParamStruct genParam = { 1024, 0.2 }; + unsigned gen = AMS_GEN_DEFAULT; ArgStruct arg; AVERT(Pool, pool); - + AVERT(ArgList, args); + + if (ArgPick(&arg, args, MPS_KEY_CHAIN)) + chain = arg.val.chain; + else { + chain = ArenaGlobals(PoolArena(pool))->defaultChain; + gen = 1; /* avoid the nursery of the default chain by default */ + } + if (ArgPick(&arg, args, MPS_KEY_GEN)) + gen = arg.val.u; ArgRequire(&arg, args, MPS_KEY_FORMAT); format = arg.val.format; - res = ChainCreate(&chain, pool->arena, 1, &genParam); - if (res != ResOK) - return res; - res = AMSInitInternal(Pool2AMS(pool), format, chain, 0, FALSE); + res = AMSInitInternal(Pool2AMS(pool), format, chain, gen, FALSE); if (res != ResOK) return res; amst = Pool2AMST(pool); ams = Pool2AMS(pool); ams->segSize = AMSTSegSizePolicy; ams->segClass = AMSTSegClassGet; - amst->chain = chain; amst->failSegs = TRUE; amst->splits = 0; amst->merges = 0; @@ -388,7 +392,6 @@ static void AMSTFinish(Pool pool) AMSFinish(pool); amst->sig = SigInvalid; - ChainDestroy(amst->chain); } @@ -399,7 +402,7 @@ static Bool AMSSegIsFree(Seg seg) AMSSeg amsseg; AVERT(Seg, seg); amsseg = Seg2AMSSeg(seg); - return(amsseg->free == amsseg->grains); + return amsseg->freeGrains == amsseg->grains; } @@ -433,7 +436,7 @@ static Bool AMSSegRegionIsFree(Seg seg, Addr base, Addr limit) * Used as a means of overriding the behaviour of AMSBufferFill. * The code is similar to AMSBufferEmpty. */ -static void AMSUnallocateRange(Seg seg, Addr base, Addr limit) +static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit) { AMSSeg amsseg; Index baseIndex, limitIndex; @@ -461,8 +464,10 @@ static void AMSUnallocateRange(Seg seg, Addr base, Addr limit) BTResRange(amsseg->allocTable, baseIndex, limitIndex); } } - amsseg->free += limitIndex - baseIndex; - amsseg->newAlloc -= limitIndex - baseIndex; + amsseg->freeGrains += limitIndex - baseIndex; + AVER(amsseg->newGrains >= limitIndex - baseIndex); + amsseg->newGrains -= limitIndex - baseIndex; + PoolGenAccountForEmpty(&ams->pgen, AddrOffset(base, limit), FALSE); } @@ -471,7 +476,7 @@ static void AMSUnallocateRange(Seg seg, Addr base, Addr limit) * Used as a means of overriding the behaviour of AMSBufferFill. * The code is similar to AMSUnallocateRange. */ -static void AMSAllocateRange(Seg seg, Addr base, Addr limit) +static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit) { AMSSeg amsseg; Index baseIndex, limitIndex; @@ -499,9 +504,10 @@ static void AMSAllocateRange(Seg seg, Addr base, Addr limit) BTSetRange(amsseg->allocTable, baseIndex, limitIndex); } } - AVER(amsseg->free >= limitIndex - baseIndex); - amsseg->free -= limitIndex - baseIndex; - amsseg->newAlloc += limitIndex - baseIndex; + AVER(amsseg->freeGrains >= limitIndex - baseIndex); + amsseg->freeGrains -= limitIndex - baseIndex; + amsseg->newGrains += limitIndex - baseIndex; + PoolGenAccountForFill(&ams->pgen, AddrOffset(base, limit), FALSE); } @@ -526,6 +532,7 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn, PoolClass super; Addr base, limit; Arena arena; + AMS ams; AMST amst; Bool b; Seg seg; @@ -537,6 +544,7 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn, AVER(limitReturn != NULL); /* other parameters are checked by next method */ arena = PoolArena(pool); + ams = Pool2AMS(pool); amst = Pool2AMST(pool); /* call next method */ @@ -558,14 +566,14 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn, Seg mergedSeg; Res mres; - AMSUnallocateRange(seg, base, limit); + AMSUnallocateRange(ams, seg, base, limit); mres = SegMerge(&mergedSeg, segLo, seg, withReservoirPermit); if (ResOK == mres) { /* successful merge */ - AMSAllocateRange(mergedSeg, base, limit); + AMSAllocateRange(ams, mergedSeg, base, limit); /* leave range as-is */ } else { /* failed to merge */ AVER(amst->failSegs); /* deliberate fails only */ - AMSAllocateRange(seg, base, limit); + AMSAllocateRange(ams, seg, base, limit); } } @@ -576,13 +584,13 @@ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn, Addr mid = AddrAdd(base, half); Seg segLo, segHi; Res sres; - AMSUnallocateRange(seg, mid, limit); + AMSUnallocateRange(ams, seg, mid, limit); sres = SegSplit(&segLo, &segHi, seg, mid, withReservoirPermit); if (ResOK == sres) { /* successful split */ limit = mid; /* range is lower segment */ } else { /* failed to split */ AVER(amst->failSegs); /* deliberate fails only */ - AMSAllocateRange(seg, mid, limit); + AMSAllocateRange(ams, seg, mid, limit); } } @@ -758,14 +766,19 @@ static void *test(void *arg, size_t s) mps_ap_t busy_ap; mps_addr_t busy_init; const char *indent = " "; + mps_chain_t chain; + static mps_gen_param_s genParam = {1024, 0.2}; arena = (mps_arena_t)arg; (void)s; /* unused */ die(mps_fmt_create_A(&format, arena, dylan_fmt_A()), "fmt_create"); + die(mps_chain_create(&chain, arena, 1, &genParam), "chain_create"); MPS_ARGS_BEGIN(args) { MPS_ARGS_ADD(args, MPS_KEY_FORMAT, format); + MPS_ARGS_ADD(args, MPS_KEY_CHAIN, chain); + MPS_ARGS_ADD(args, MPS_KEY_GEN, 0); die(mps_pool_create_k(&pool, arena, mps_class_amst(), args), "pool_create(amst)"); } MPS_ARGS_END(args); @@ -836,11 +849,14 @@ static void *test(void *arg, size_t s) } (void)mps_commit(busy_ap, busy_init, 64); + + mps_arena_park(arena); mps_ap_destroy(busy_ap); mps_ap_destroy(ap); mps_root_destroy(exactRoot); mps_root_destroy(ambigRoot); mps_pool_destroy(pool); + mps_chain_destroy(chain); mps_fmt_destroy(format); return NULL; diff --git a/mps/code/splay.c b/mps/code/splay.c index 0e5c1ad6edb..b7ea5a91455 100644 --- a/mps/code/splay.c +++ b/mps/code/splay.c @@ -945,13 +945,12 @@ Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, /* SplayTreeFirst, SplayTreeNext -- iterators * - * SplayTreeFirst receives a key that must precede all - * nodes in the tree. It returns TreeEMPTY if the tree is empty. - * Otherwise, it splays the tree to the first node, and returns the - * new root. + * SplayTreeFirst returns TreeEMPTY if the tree is empty. Otherwise, + * it splays the tree to the first node, and returns the new root. * * SplayTreeNext takes a tree and splays it to the successor of a key - * and returns the new root. Returns TreeEMPTY is there are no successors. + * and returns the new root. Returns TreeEMPTY is there are no + * successors. * * SplayTreeFirst and SplayTreeNext do not require the tree to remain * unmodified. @@ -1006,7 +1005,7 @@ Tree SplayTreeNext(SplayTree splay, TreeKey oldKey) { */ static Res SplayNodeDescribe(Tree node, mps_lib_FILE *stream, - SplayNodeDescribeMethod nodeDescribe) + TreeDescribeMethod nodeDescribe) { Res res; @@ -1319,13 +1318,27 @@ void SplayNodeRefresh(SplayTree splay, Tree node) } +/* SplayNodeInit -- initialize client property without splaying */ + +void SplayNodeInit(SplayTree splay, Tree node) +{ + AVERT(SplayTree, splay); + AVERT(Tree, node); + AVER(!TreeHasLeft(node)); /* otherwise, call SplayNodeRefresh */ + AVER(!TreeHasRight(node)); /* otherwise, call SplayNodeRefresh */ + AVER(SplayHasUpdate(splay)); /* otherwise, why call? */ + + splay->updateNode(splay, node); +} + + /* SplayTreeDescribe -- Describe a splay tree * * See . */ Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, Count depth, - SplayNodeDescribeMethod nodeDescribe) + TreeDescribeMethod nodeDescribe) { Res res; diff --git a/mps/code/splay.h b/mps/code/splay.h index 96e9e15b04e..027e7096afc 100644 --- a/mps/code/splay.h +++ b/mps/code/splay.h @@ -19,7 +19,6 @@ typedef Bool (*SplayTestNodeMethod)(SplayTree splay, Tree node, void *closureP, Size closureS); typedef Bool (*SplayTestTreeMethod)(SplayTree splay, Tree node, void *closureP, Size closureS); -typedef Res (*SplayNodeDescribeMethod)(Tree node, mps_lib_FILE *stream); typedef void (*SplayUpdateNodeMethod)(SplayTree splay, Tree node); extern void SplayTrivUpdate(SplayTree splay, Tree node); @@ -70,10 +69,10 @@ extern Bool SplayFindLast(Tree *nodeReturn, SplayTree splay, void *closureP, Size closureS); extern void SplayNodeRefresh(SplayTree splay, Tree node); +extern void SplayNodeInit(SplayTree splay, Tree node); extern Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, - Count depth, - SplayNodeDescribeMethod nodeDescribe); + Count depth, TreeDescribeMethod nodeDescribe); extern void SplayDebugUpdate(SplayTree splay, Tree tree); diff --git a/mps/code/ssan.c b/mps/code/ssan.c index 6f632dd1d24..27233e7b9f6 100644 --- a/mps/code/ssan.c +++ b/mps/code/ssan.c @@ -3,10 +3,16 @@ * $Id$ * Copyright (c) 2001 Ravenbrook Limited. See end of file for license. * - * This module provides zero functionality. It exists to feed the - * linker (prevent linker errors). + * This module makes a best effort to scan the stack and fix the + * registers which may contain roots, using only the features of the + * Standard C library. + * + * .assume.setjmp: The implementation assumes that setjmp stores all + * the registers that need to be scanned in the jmp_buf. */ +#include + #include "mpmtypes.h" #include "misc.h" #include "ss.h" @@ -17,8 +23,19 @@ SRCID(ssan, "$Id$"); Res StackScan(ScanState ss, Addr *stackBot) { - UNUSED(ss); UNUSED(stackBot); - return ResUNIMPL; + jmp_buf jb; + void *stackTop = &jb; + + /* .assume.stack: This implementation assumes that the stack grows + * downwards, so that the address of the jmp_buf is the limit of the + * part of the stack that needs to be scanned. (StackScanInner makes + * the same assumption.) + */ + AVER(stackTop < (void *)stackBot); + + (void)setjmp(jb); + + return StackScanInner(ss, stackBot, stackTop, sizeof jb / sizeof(Addr*)); } diff --git a/mps/code/steptest.c b/mps/code/steptest.c index deee85fef2e..546eaa7e417 100644 --- a/mps/code/steptest.c +++ b/mps/code/steptest.c @@ -478,6 +478,8 @@ static void *test(void *arg, size_t s) printf(" %"PRIuLONGEST" clock reads; ", (ulongest_t)clock_reads); print_time("", total_clock_time / clock_reads, " per read;"); print_time(" recently measured as ", clock_time, ").\n"); + + mps_arena_park(arena); mps_ap_destroy(ap); mps_root_destroy(exactRoot); mps_root_destroy(ambigRoot); diff --git a/mps/code/testlib.h b/mps/code/testlib.h index 9c197cae839..38a4c94bdab 100644 --- a/mps/code/testlib.h +++ b/mps/code/testlib.h @@ -68,6 +68,22 @@ #endif +/* setenv -- set environment variable + * + * Windows lacks setenv(), but _putenv_s() has similar functionality. + * + * + * This macro version may evaluate the name argument twice. + */ + +#if defined(MPS_OS_W3) + +#define setenv(name, value, overwrite) \ + (((overwrite) || !getenv(name)) ? _putenv_s(name, value) : 0) + +#endif + + /* ulongest_t -- longest unsigned integer type * * Define a longest unsigned integer type for testing, scanning, and diff --git a/mps/code/trace.c b/mps/code/trace.c index d072de98585..f85a35d8d0c 100644 --- a/mps/code/trace.c +++ b/mps/code/trace.c @@ -634,33 +634,6 @@ failRootFlip: return res; } -/* traceCopySizes -- preserve size information for later use - * - * A PoolGen's newSize is important information that we want to emit in - * a diagnostic message at TraceStart. In order to do that we must copy - * the information before Whiten changes it. This function does that. - */ - -static void traceCopySizes(Trace trace) -{ - Ring node, nextNode; - Index i; - Arena arena = trace->arena; - - RING_FOR(node, &arena->chainRing, nextNode) { - Chain chain = RING_ELT(Chain, chainRing, node); - - for(i = 0; i < chain->genCount; ++i) { - Ring n, nn; - GenDesc desc = &chain->gens[i]; - RING_FOR(n, &desc->locusRing, nn) { - PoolGen gen = RING_ELT(PoolGen, genRing, n); - gen->newSizeAtCreate = gen->newSize; - } - } - } - return; -} /* TraceCreate -- create a Trace object * @@ -677,6 +650,17 @@ static void traceCopySizes(Trace trace) * This code is written to be adaptable to allocating Trace objects * dynamically. */ +static void TraceCreatePoolGen(GenDesc gen) +{ + Ring n, nn; + RING_FOR(n, &gen->locusRing, nn) { + PoolGen pgen = RING_ELT(PoolGen, genRing, n); + EVENT11(TraceCreatePoolGen, gen, gen->capacity, gen->mortality, gen->zones, + pgen->pool, pgen->totalSize, pgen->freeSize, pgen->newSize, + pgen->oldSize, pgen->newDeferredSize, pgen->oldDeferredSize); + } +} + Res TraceCreate(Trace *traceReturn, Arena arena, int why) { TraceId ti; @@ -747,7 +731,24 @@ found: /* .. _request.dylan.160098: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/160098 */ ShieldSuspend(arena); - traceCopySizes(trace); + STATISTIC_STAT ({ + /* Iterate over all chains, all GenDescs within a chain, and all + * PoolGens within a GenDesc. */ + Ring node; + Ring nextNode; + + RING_FOR(node, &arena->chainRing, nextNode) { + Chain chain = RING_ELT(Chain, chainRing, node); + Index i; + for (i = 0; i < chain->genCount; ++i) { + GenDesc gen = &chain->gens[i]; + TraceCreatePoolGen(gen); + } + } + + /* Now do topgen GenDesc, and all PoolGens within it. */ + TraceCreatePoolGen(&arena->topGen); + }); *traceReturn = trace; return ResOK; @@ -1564,9 +1565,9 @@ double TraceWorkFactor = 0.25; * * TraceStart should be passed a trace with state TraceINIT, i.e., * recently returned from TraceCreate, with some condemned segments - * added. mortality is the fraction of the condemned set expected to - * survive. finishingTime is relative to the current polling clock, see - * . + * added. mortality is the fraction of the condemned set expected not + * to survive. finishingTime is relative to the current polling clock, + * see . * * .start.black: All segments are black w.r.t. a newly allocated trace. * However, if TraceStart initialized segments to black when it @@ -1588,19 +1589,6 @@ static Res rootGrey(Root root, void *p) } -static void TraceStartPoolGen(Chain chain, GenDesc desc, Bool top, Index i) -{ - Ring n, nn; - RING_FOR(n, &desc->locusRing, nn) { - PoolGen gen = RING_ELT(PoolGen, genRing, n); - EVENT11(TraceStartPoolGen, chain, BOOLOF(top), i, desc, - desc->capacity, desc->mortality, desc->zones, - gen->pool, gen->nr, gen->totalSize, - gen->newSizeAtCreate); - } -} - - /* TraceStart -- start a trace whose white set has been established * * The main job of TraceStart is to set up the grey list for a trace. The @@ -1665,26 +1653,6 @@ Res TraceStart(Trace trace, double mortality, double finishingTime) } while (SegNext(&seg, arena, seg)); } - STATISTIC_STAT ({ - /* @@ */ - /* Iterate over all chains, all GenDescs within a chain, */ - /* (and all PoolGens within a GenDesc). */ - Ring node; - Ring nextNode; - Index i; - - RING_FOR(node, &arena->chainRing, nextNode) { - Chain chain = RING_ELT(Chain, chainRing, node); - for(i = 0; i < chain->genCount; ++i) { - GenDesc desc = &chain->gens[i]; - TraceStartPoolGen(chain, desc, FALSE, i); - } - } - - /* Now do topgen GenDesc (and all PoolGens within it). */ - TraceStartPoolGen(NULL, &arena->topGen, TRUE, 0); - }); - res = RootsIterate(ArenaGlobals(arena), rootGrey, (void *)trace); AVER(res == ResOK); @@ -1738,7 +1706,10 @@ Res TraceStart(Trace trace, double mortality, double finishingTime) void TraceQuantum(Trace trace) { Size pollEnd; - Arena arena = trace->arena; + Arena arena; + + AVERT(Trace, trace); + arena = trace->arena; pollEnd = traceWorkClock(trace) + trace->rate; do { diff --git a/mps/code/traceanc.c b/mps/code/traceanc.c index 675e18e1f95..8e0be12dac9 100644 --- a/mps/code/traceanc.c +++ b/mps/code/traceanc.c @@ -560,7 +560,7 @@ void ArenaRelease(Globals globals) AVERT(Globals, globals); arenaForgetProtection(globals); globals->clamped = FALSE; - (void)TracePoll(globals); + ArenaPoll(globals); } diff --git a/mps/code/tract.c b/mps/code/tract.c index 2468887bc17..5ca5ab6f040 100644 --- a/mps/code/tract.c +++ b/mps/code/tract.c @@ -210,25 +210,25 @@ Res ChunkInit(Chunk chunk, Arena arena, /* Add the chunk's free address space to the arena's freeCBS, so that we can allocate from it. */ - if (arena->hasFreeCBS) { - res = ArenaFreeCBSInsert(arena, - PageIndexBase(chunk, chunk->allocBase), - chunk->limit); + if (arena->hasFreeLand) { + res = ArenaFreeLandInsert(arena, + PageIndexBase(chunk, chunk->allocBase), + chunk->limit); if (res != ResOK) - goto failCBSInsert; + goto failLandInsert; } chunk->sig = ChunkSig; AVERT(Chunk, chunk); /* As part of the bootstrap, the first created chunk becomes the primary - chunk. This step allows AreaFreeCBSInsert to allocate pages. */ + chunk. This step allows AreaFreeLandInsert to allocate pages. */ if (arena->primary == NULL) arena->primary = chunk; return ResOK; -failCBSInsert: +failLandInsert: (arena->class->chunkFinish)(chunk); /* .no-clean: No clean-ups needed past this point for boot, as we will discard the chunk. */ @@ -248,10 +248,10 @@ void ChunkFinish(Chunk chunk) chunk->sig = SigInvalid; RingRemove(&chunk->chunkRing); - if (ChunkArena(chunk)->hasFreeCBS) - ArenaFreeCBSDelete(ChunkArena(chunk), - PageIndexBase(chunk, chunk->allocBase), - chunk->limit); + if (ChunkArena(chunk)->hasFreeLand) + ArenaFreeLandDelete(ChunkArena(chunk), + PageIndexBase(chunk, chunk->allocBase), + chunk->limit); if (chunk->arena->primary == chunk) chunk->arena->primary = NULL; diff --git a/mps/code/tract.h b/mps/code/tract.h index c359032feee..b957e024fd1 100644 --- a/mps/code/tract.h +++ b/mps/code/tract.h @@ -37,9 +37,6 @@ typedef union PagePoolUnion { * * .tract: Tracts represent the grains of memory allocation from * the arena. See . - * - * .bool: The hasSeg field is a boolean, but can't be represented - * as type Bool. See . */ typedef struct TractStruct { /* Tract structure */ @@ -47,7 +44,7 @@ typedef struct TractStruct { /* Tract structure */ void *p; /* pointer for use of owning pool */ Addr base; /* Base address of the tract */ TraceSet white : TraceLIMIT; /* traces for which tract is white */ - unsigned hasSeg : 1; /* does tract have a seg in p? See .bool */ + BOOLFIELD(hasSeg); /* does tract have a seg in p? */ } TractStruct; diff --git a/mps/code/tree.h b/mps/code/tree.h index 69ee841d3c3..5d9a6206670 100644 --- a/mps/code/tree.h +++ b/mps/code/tree.h @@ -25,6 +25,8 @@ typedef struct TreeStruct { Tree left, right; } TreeStruct; +typedef Res (*TreeDescribeMethod)(Tree tree, mps_lib_FILE *stream); + /* TreeKey and TreeCompare -- ordered binary trees * diff --git a/mps/code/vman.c b/mps/code/vman.c index 6ba3d0b9dff..2e68118e27d 100644 --- a/mps/code/vman.c +++ b/mps/code/vman.c @@ -63,11 +63,12 @@ Res VMParamFromArgs(void *params, size_t paramSize, ArgList args) /* VMCreate -- reserve some virtual address space, and create a VM structure */ -Res VMCreate(VM *vmReturn, Size size) +Res VMCreate(VM *vmReturn, Size size, void *params) { VM vm; AVER(vmReturn != NULL); + AVER(params != NULL); /* Note that because we add VMANPageALIGNMENT rather than */ /* VMANPageALIGNMENT-1 we are not in danger of overflowing */ diff --git a/mps/code/w3i3mv.nmk b/mps/code/w3i3mv.nmk index 97a669700cc..458fd033484 100644 --- a/mps/code/w3i3mv.nmk +++ b/mps/code/w3i3mv.nmk @@ -7,13 +7,22 @@ PFM = w3i3mv PFMDEFS = /DCONFIG_PF_STRING="w3i3mv" /DCONFIG_PF_W3I3MV /DWIN32 /D_WINDOWS +# MPM platform-specific sources. +MPMPF = \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + + !INCLUDE commpre.nmk !INCLUDE mv.nmk -# MPM sources: core plus platform-specific. -MPM = $(MPMCOMMON) - - # Source to object file mappings and CFLAGS amalgamation # @@ -32,18 +41,12 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFHOT) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFHOT) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSHOT) MPMOBJ0 = $(MPM:<=w3i3mv\hot\) -PLINTHOBJ0 = $(PLINTH:<=w3i3mv\hot\) -AMSOBJ0 = $(AMS:<=w3i3mv\hot\) -AMCOBJ0 = $(AMC:<=w3i3mv\hot\) -AWLOBJ0 = $(AWL:<=w3i3mv\hot\) -LOOBJ0 = $(LO:<=w3i3mv\hot\) -SNCOBJ0 = $(SNC:<=w3i3mv\hot\) -MVFFOBJ0 = $(MVFF:<=w3i3mv\hot\) -DWOBJ0 = $(DW:<=w3i3mv\hot\) +FMTDYOBJ0 = $(FMTDY:<=w3i3mv\hot\) FMTTESTOBJ0 = $(FMTTEST:<=w3i3mv\hot\) FMTSCHEMEOBJ0 = $(FMTSCHEME:<=w3i3mv\hot\) POOLNOBJ0 = $(POOLN:<=w3i3mv\hot\) TESTLIBOBJ0 = $(TESTLIB:<=w3i3mv\hot\) +TESTTHROBJ0 = $(TESTTHR:<=w3i3mv\hot\) !ELSEIF "$(VARIETY)" == "cool" CFLAGS=$(CFLAGSCOMMONPRE) $(CFCOOL) $(CFLAGSCOMMONPOST) @@ -51,18 +54,12 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFCOOL) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFCOOL) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSCOOL) MPMOBJ0 = $(MPM:<=w3i3mv\cool\) -PLINTHOBJ0 = $(PLINTH:<=w3i3mv\cool\) -AMSOBJ0 = $(AMS:<=w3i3mv\cool\) -AMCOBJ0 = $(AMC:<=w3i3mv\cool\) -AWLOBJ0 = $(AWL:<=w3i3mv\cool\) -LOOBJ0 = $(LO:<=w3i3mv\cool\) -SNCOBJ0 = $(SNC:<=w3i3mv\cool\) -MVFFOBJ0 = $(MVFF:<=w3i3mv\cool\) -DWOBJ0 = $(DW:<=w3i3mv\cool\) +FMTDYOBJ0 = $(FMTDY:<=w3i3mv\cool\) FMTTESTOBJ0 = $(FMTTEST:<=w3i3mv\cool\) FMTSCHEMEOBJ0 = $(FMTSCHEME:<=w3i3mv\cool\) POOLNOBJ0 = $(POOLN:<=w3i3mv\cool\) TESTLIBOBJ0 = $(TESTLIB:<=w3i3mv\cool\) +TESTTHROBJ0 = $(TESTTHR:<=w3i3mv\cool\) !ELSEIF "$(VARIETY)" == "rash" CFLAGS=$(CFLAGSCOMMONPRE) $(CFRASH) $(CFLAGSCOMMONPOST) @@ -70,43 +67,12 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFRASH) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFRASH) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSRASH) MPMOBJ0 = $(MPM:<=w3i3mv\rash\) -PLINTHOBJ0 = $(PLINTH:<=w3i3mv\rash\) -AMSOBJ0 = $(AMS:<=w3i3mv\rash\) -AMCOBJ0 = $(AMC:<=w3i3mv\rash\) -AWLOBJ0 = $(AWL:<=w3i3mv\rash\) -LOOBJ0 = $(LO:<=w3i3mv\rash\) -SNCOBJ0 = $(SNC:<=w3i3mv\rash\) -MVFFOBJ0 = $(MVFF:<=w3i3mv\rash\) -DWOBJ0 = $(DW:<=w3i3mv\rash\) +FMTDYOBJ0 = $(FMTDY:<=w3i3mv\rash\) FMTTESTOBJ0 = $(FMTTEST:<=w3i3mv\rash\) FMTSCHEMEOBJ0 = $(FMTSCHEME:<=w3i3mv\rash\) POOLNOBJ0 = $(POOLN:<=w3i3mv\rash\) TESTLIBOBJ0 = $(TESTLIB:<=w3i3mv\rash\) - -#!ELSEIF "$(VARIETY)" == "cv" -#CFLAGS=$(CFLAGSCOMMON) $(CFCV) -#LINKFLAGS=$(LINKFLAGSCOMMON) $(LFCV) -#LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSCV) -#MPMOBJ0 = $(MPM:<=w3i3mv\cv\) -#MPMOBJ = $(MPMOBJ0:>=.obj) -#PLINTHOBJ0 = $(PLINTH:<=w3i3mv\cv\) -#PLINTHOBJ = $(PLINTHOBJ0:>=.obj) -#AMSOBJ0 = $(AMS:<=w3i3mv\cv\) -#AMSOBJ = $(AMSOBJ0:>=.obj) -#AMCOBJ0 = $(AMC:<=w3i3mv\cv\) -#AMCOBJ = $(AMCOBJ0:>=.obj) -#AWLOBJ0 = $(AWL:<=w3i3mv\cv\) -#AWLOBJ = $(AWLOBJ0:>=.obj) -#LOOBJ0 = $(LO:<=w3i3mv\cv\) -#LOOBJ = $(LOOBJ0:>=.obj) -#SNCOBJ0 = $(SNC:<=w3i3mv\cv\) -#SNCOBJ = $(SNCOBJ0:>=.obj) -#DWOBJ0 = $(DW:<=w3i3mv\cv\) -#DWOBJ = $(DWOBJ0:>=.obj) -#POOLNOBJ0 = $(POOLN:<=w3i3mv\cv\) -#POOLNOBJ = $(POOLNOBJ0:>=.obj) -#TESTLIBOBJ0 = $(TESTLIB:<=w3i3mv\cv\) -#TESTLIBOBJ = $(TESTLIBOBJ0:>=.obj) +TESTTHROBJ0 = $(TESTTHR:<=w3i3mv\rash\) !ENDIF @@ -114,18 +80,12 @@ TESTLIBOBJ0 = $(TESTLIB:<=w3i3mv\rash\) # files included in the part MPMOBJ = $(MPMOBJ0:>=.obj) -PLINTHOBJ = $(PLINTHOBJ0:>=.obj) -AMSOBJ = $(AMSOBJ0:>=.obj) -AMCOBJ = $(AMCOBJ0:>=.obj) -AWLOBJ = $(AWLOBJ0:>=.obj) -LOOBJ = $(LOOBJ0:>=.obj) -SNCOBJ = $(SNCOBJ0:>=.obj) -MVFFOBJ = $(MVFFOBJ0:>=.obj) -DWOBJ = $(DWOBJ0:>=.obj) +FMTDYOBJ = $(FMTDYOBJ0:>=.obj) FMTTESTOBJ = $(FMTTESTOBJ0:>=.obj) FMTSCHEMEOBJ = $(FMTSCHEMEOBJ0:>=.obj) POOLNOBJ = $(POOLNOBJ0:>=.obj) TESTLIBOBJ = $(TESTLIBOBJ0:>=.obj) +TESTTHROBJ = $(TESTTHROBJ0:>=.obj) !INCLUDE commpost.nmk diff --git a/mps/code/w3i3pc.nmk b/mps/code/w3i3pc.nmk index d6d424b669d..f3e848a94dd 100644 --- a/mps/code/w3i3pc.nmk +++ b/mps/code/w3i3pc.nmk @@ -7,13 +7,22 @@ PFM = w3i3pc PFMDEFS = /DCONFIG_PF_STRING="w3i3pc" /DCONFIG_PF_W3I3PC /DWIN32 /D_WINDOWS +# MPM platform-specific sources. +MPMPF = \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + + !INCLUDE commpre.nmk !INCLUDE pc.nmk -# MPM sources: core plus platform-specific. -MPM = $(MPMCOMMON) - - # Source to object file mappings and CFLAGS amalgamation # @@ -32,17 +41,11 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFHOT) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFHOT) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSHOT) MPMOBJ0 = $(MPM:<=w3i3pc\hot\) -PLINTHOBJ0 = $(PLINTH:<=w3i3pc\hot\) -AMSOBJ0 = $(AMS:<=w3i3pc\hot\) -AMCOBJ0 = $(AMC:<=w3i3pc\hot\) -AWLOBJ0 = $(AWL:<=w3i3pc\hot\) -LOOBJ0 = $(LO:<=w3i3pc\hot\) -SNCOBJ0 = $(SNC:<=w3i3pc\hot\) -MVFFOBJ0 = $(MVFF:<=w3i3pc\hot\) -DWOBJ0 = $(DW:<=w3i3pc\hot\) +FMTDYOBJ0 = $(FMTDY:<=w3i3pc\hot\) FMTTESTOBJ0 = $(FMTTEST:<=w3i3pc\hot\) POOLNOBJ0 = $(POOLN:<=w3i3pc\hot\) TESTLIBOBJ0 = $(TESTLIB:<=w3i3pc\hot\) +TESTTHROBJ0 = $(TESTTHR:<=w3i3pc\hot\) !ELSEIF "$(VARIETY)" == "cool" CFLAGS=$(CFLAGSCOMMONPRE) $(CFCOOL) $(CFLAGSCOMMONPOST) @@ -50,17 +53,11 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFCOOL) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFCOOL) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSCOOL) MPMOBJ0 = $(MPM:<=w3i3pc\cool\) -PLINTHOBJ0 = $(PLINTH:<=w3i3pc\cool\) -AMSOBJ0 = $(AMS:<=w3i3pc\cool\) -AMCOBJ0 = $(AMC:<=w3i3pc\cool\) -AWLOBJ0 = $(AWL:<=w3i3pc\cool\) -LOOBJ0 = $(LO:<=w3i3pc\cool\) -SNCOBJ0 = $(SNC:<=w3i3pc\cool\) -MVFFOBJ0 = $(MVFF:<=w3i3pc\cool\) -DWOBJ0 = $(DW:<=w3i3pc\cool\) +FMTDYOBJ0 = $(FMTDY:<=w3i3pc\cool\) FMTTESTOBJ0 = $(FMTTEST:<=w3i3pc\cool\) POOLNOBJ0 = $(POOLN:<=w3i3pc\cool\) TESTLIBOBJ0 = $(TESTLIB:<=w3i3pc\cool\) +TESTTHROBJ0 = $(TESTTHR:<=w3i3pc\cool\) !ELSEIF "$(VARIETY)" == "rash" CFLAGS=$(CFLAGSCOMMONPRE) $(CFRASH) $(CFLAGSCOMMONPOST) @@ -68,42 +65,11 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFRASH) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFRASH) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSRASH) MPMOBJ0 = $(MPM:<=w3i3pc\rash\) -PLINTHOBJ0 = $(PLINTH:<=w3i3pc\rash\) -AMSOBJ0 = $(AMS:<=w3i3pc\rash\) -AMCOBJ0 = $(AMC:<=w3i3pc\rash\) -AWLOBJ0 = $(AWL:<=w3i3pc\rash\) -LOOBJ0 = $(LO:<=w3i3pc\rash\) -SNCOBJ0 = $(SNC:<=w3i3pc\rash\) -MVFFOBJ0 = $(MVFF:<=w3i3pc\rash\) -DWOBJ0 = $(DW:<=w3i3pc\rash\) +FMTDYOBJ0 = $(FMTDY:<=w3i3pc\rash\) FMTTESTOBJ0 = $(FMTTEST:<=w3i3pc\rash\) POOLNOBJ0 = $(POOLN:<=w3i3pc\rash\) TESTLIBOBJ0 = $(TESTLIB:<=w3i3pc\rash\) - -#!ELSEIF "$(VARIETY)" == "cv" -#CFLAGS=$(CFLAGSCOMMON) $(CFCV) -#LINKFLAGS=$(LINKFLAGSCOMMON) $(LFCV) -#LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSCV) -#MPMOBJ0 = $(MPM:<=w3i3pc\cv\) -#MPMOBJ = $(MPMOBJ0:>=.obj) -#PLINTHOBJ0 = $(PLINTH:<=w3i3pc\cv\) -#PLINTHOBJ = $(PLINTHOBJ0:>=.obj) -#AMSOBJ0 = $(AMS:<=w3i3pc\cv\) -#AMSOBJ = $(AMSOBJ0:>=.obj) -#AMCOBJ0 = $(AMC:<=w3i3pc\cv\) -#AMCOBJ = $(AMCOBJ0:>=.obj) -#AWLOBJ0 = $(AWL:<=w3i3pc\cv\) -#AWLOBJ = $(AWLOBJ0:>=.obj) -#LOOBJ0 = $(LO:<=w3i3pc\cv\) -#LOOBJ = $(LOOBJ0:>=.obj) -#SNCOBJ0 = $(SNC:<=w3i3pc\cv\) -#SNCOBJ = $(SNCOBJ0:>=.obj) -#DWOBJ0 = $(DW:<=w3i3pc\cv\) -#DWOBJ = $(DWOBJ0:>=.obj) -#POOLNOBJ0 = $(POOLN:<=w3i3pc\cv\) -#POOLNOBJ = $(POOLNOBJ0:>=.obj) -#TESTLIBOBJ0 = $(TESTLIB:<=w3i3pc\cv\) -#TESTLIBOBJ = $(TESTLIBOBJ0:>=.obj) +TESTTHROBJ0 = $(TESTTHR:<=w3i3pc\rash\) !ENDIF @@ -111,17 +77,11 @@ TESTLIBOBJ0 = $(TESTLIB:<=w3i3pc\rash\) # files included in the part MPMOBJ = $(MPMOBJ0:>=.obj) -PLINTHOBJ = $(PLINTHOBJ0:>=.obj) -AMSOBJ = $(AMSOBJ0:>=.obj) -AMCOBJ = $(AMCOBJ0:>=.obj) -AWLOBJ = $(AWLOBJ0:>=.obj) -LOOBJ = $(LOOBJ0:>=.obj) -SNCOBJ = $(SNCOBJ0:>=.obj) -MVFFOBJ = $(MVFFOBJ0:>=.obj) -DWOBJ = $(DWOBJ0:>=.obj) +FMTDYOBJ = $(FMTDYOBJ0:>=.obj) FMTTESTOBJ = $(FMTTESTOBJ0:>=.obj) POOLNOBJ = $(POOLNOBJ0:>=.obj) TESTLIBOBJ = $(TESTLIBOBJ0:>=.obj) +TESTTHROBJ = $(TESTTHROBJ0:>=.obj) !INCLUDE commpost.nmk diff --git a/mps/code/w3i6mv.nmk b/mps/code/w3i6mv.nmk index 6d31a81452d..5cfa511db91 100644 --- a/mps/code/w3i6mv.nmk +++ b/mps/code/w3i6mv.nmk @@ -1,18 +1,28 @@ # w3i6mv.nmk: WINDOWS (x86-64) NMAKE FILE -*- makefile -*- # # $Id$ -# Copyright (c) 2001-2013 Ravenbrook Limited. See end of file for license. +# Copyright (c) 2001-2014 Ravenbrook Limited. See end of file for license. PFM = w3i6mv PFMDEFS = /DCONFIG_PF_STRING="w3i6mv" /DCONFIG_PF_W3I6MV /DWIN32 /D_WINDOWS MASM = ml64 -# MPM sources: core plus platform-specific. -MPM = $(MPMCOMMON) - +# MPM platform-specific sources. +MPMPF = \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + !INCLUDE commpre.nmk +!INCLUDE mv.nmk # Source to object file mappings and CFLAGS amalgamation @@ -32,18 +42,12 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFHOT) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFHOT) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSHOT) MPMOBJ0 = $(MPM:<=w3i6mv\hot\) -PLINTHOBJ0 = $(PLINTH:<=w3i6mv\hot\) -AMSOBJ0 = $(AMS:<=w3i6mv\hot\) -AMCOBJ0 = $(AMC:<=w3i6mv\hot\) -AWLOBJ0 = $(AWL:<=w3i6mv\hot\) -LOOBJ0 = $(LO:<=w3i6mv\hot\) -SNCOBJ0 = $(SNC:<=w3i6mv\hot\) -MVFFOBJ0 = $(MVFF:<=w3i6mv\hot\) -DWOBJ0 = $(DW:<=w3i6mv\hot\) +FMTDYOBJ0 = $(FMTDY:<=w3i6mv\hot\) FMTTESTOBJ0 = $(FMTTEST:<=w3i6mv\hot\) FMTSCHEMEOBJ0 = $(FMTSCHEME:<=w3i6mv\hot\) POOLNOBJ0 = $(POOLN:<=w3i6mv\hot\) TESTLIBOBJ0 = $(TESTLIB:<=w3i6mv\hot\) +TESTTHROBJ0 = $(TESTTHR:<=w3i6mv\hot\) !ELSEIF "$(VARIETY)" == "cool" CFLAGS=$(CFLAGSCOMMONPRE) $(CFCOOL) $(CFLAGSCOMMONPOST) @@ -51,18 +55,12 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFCOOL) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFCOOL) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSCOOL) MPMOBJ0 = $(MPM:<=w3i6mv\cool\) -PLINTHOBJ0 = $(PLINTH:<=w3i6mv\cool\) -AMSOBJ0 = $(AMS:<=w3i6mv\cool\) -AMCOBJ0 = $(AMC:<=w3i6mv\cool\) -AWLOBJ0 = $(AWL:<=w3i6mv\cool\) -LOOBJ0 = $(LO:<=w3i6mv\cool\) -SNCOBJ0 = $(SNC:<=w3i6mv\cool\) -MVFFOBJ0 = $(MVFF:<=w3i6mv\cool\) -DWOBJ0 = $(DW:<=w3i6mv\cool\) +FMTDYOBJ0 = $(FMTDY:<=w3i6mv\cool\) FMTTESTOBJ0 = $(FMTTEST:<=w3i6mv\cool\) FMTSCHEMEOBJ0 = $(FMTSCHEME:<=w3i6mv\cool\) POOLNOBJ0 = $(POOLN:<=w3i6mv\cool\) TESTLIBOBJ0 = $(TESTLIB:<=w3i6mv\cool\) +TESTTHROBJ0 = $(TESTTHR:<=w3i6mv\cool\) !ELSEIF "$(VARIETY)" == "rash" CFLAGS=$(CFLAGSCOMMONPRE) $(CFRASH) $(CFLAGSCOMMONPOST) @@ -70,43 +68,12 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFRASH) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFRASH) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSRASH) MPMOBJ0 = $(MPM:<=w3i6mv\rash\) -PLINTHOBJ0 = $(PLINTH:<=w3i6mv\rash\) -AMSOBJ0 = $(AMS:<=w3i6mv\rash\) -AMCOBJ0 = $(AMC:<=w3i6mv\rash\) -AWLOBJ0 = $(AWL:<=w3i6mv\rash\) -LOOBJ0 = $(LO:<=w3i6mv\rash\) -SNCOBJ0 = $(SNC:<=w3i6mv\rash\) -MVFFOBJ0 = $(MVFF:<=w3i6mv\rash\) -DWOBJ0 = $(DW:<=w3i6mv\rash\) +FMTDYOBJ0 = $(FMTDY:<=w3i6mv\rash\) FMTTESTOBJ0 = $(FMTTEST:<=w3i6mv\rash\) FMTSCHEMEOBJ0 = $(FMTSCHEME:<=w3i6mv\rash\) POOLNOBJ0 = $(POOLN:<=w3i6mv\rash\) TESTLIBOBJ0 = $(TESTLIB:<=w3i6mv\rash\) - -#!ELSEIF "$(VARIETY)" == "cv" -#CFLAGS=$(CFLAGSCOMMON) $(CFCV) -#LINKFLAGS=$(LINKFLAGSCOMMON) $(LFCV) -#LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSCV) -#MPMOBJ0 = $(MPM:<=w3i6mv\cv\) -#MPMOBJ = $(MPMOBJ0:>=.obj) -#PLINTHOBJ0 = $(PLINTH:<=w3i6mv\cv\) -#PLINTHOBJ = $(PLINTHOBJ0:>=.obj) -#AMSOBJ0 = $(AMS:<=w3i6mv\cv\) -#AMSOBJ = $(AMSOBJ0:>=.obj) -#AMCOBJ0 = $(AMC:<=w3i6mv\cv\) -#AMCOBJ = $(AMCOBJ0:>=.obj) -#AWLOBJ0 = $(AWL:<=w3i6mv\cv\) -#AWLOBJ = $(AWLOBJ0:>=.obj) -#LOOBJ0 = $(LO:<=w3i6mv\cv\) -#LOOBJ = $(LOOBJ0:>=.obj) -#SNCOBJ0 = $(SNC:<=w3i6mv\cv\) -#SNCOBJ = $(SNCOBJ0:>=.obj) -#DWOBJ0 = $(DW:<=w3i6mv\cv\) -#DWOBJ = $(DWOBJ0:>=.obj) -#POOLNOBJ0 = $(POOLN:<=w3i6mv\cv\) -#POOLNOBJ = $(POOLNOBJ0:>=.obj) -#TESTLIBOBJ0 = $(TESTLIB:<=w3i6mv\cv\) -#TESTLIBOBJ = $(TESTLIBOBJ0:>=.obj) +TESTTHROBJ0 = $(TESTTHR:<=w3i6mv\rash\) !ENDIF @@ -114,18 +81,12 @@ TESTLIBOBJ0 = $(TESTLIB:<=w3i6mv\rash\) # files included in the part MPMOBJ = $(MPMOBJ0:>=.obj) -PLINTHOBJ = $(PLINTHOBJ0:>=.obj) -AMSOBJ = $(AMSOBJ0:>=.obj) -AMCOBJ = $(AMCOBJ0:>=.obj) -AWLOBJ = $(AWLOBJ0:>=.obj) -LOOBJ = $(LOOBJ0:>=.obj) -SNCOBJ = $(SNCOBJ0:>=.obj) -MVFFOBJ = $(MVFFOBJ0:>=.obj) -DWOBJ = $(DWOBJ0:>=.obj) +FMTDYOBJ = $(FMTDYOBJ0:>=.obj) FMTTESTOBJ = $(FMTTESTOBJ0:>=.obj) FMTSCHEMEOBJ = $(FMTSCHEMEOBJ0:>=.obj) POOLNOBJ = $(POOLNOBJ0:>=.obj) TESTLIBOBJ = $(TESTLIBOBJ0:>=.obj) +TESTTHROBJ = $(TESTTHROBJ0:>=.obj) !INCLUDE commpost.nmk @@ -133,7 +94,7 @@ TESTLIBOBJ = $(TESTLIBOBJ0:>=.obj) # C. COPYRIGHT AND LICENSE # -# Copyright (C) 2001-2013 Ravenbrook Limited . +# Copyright (C) 2001-2014 Ravenbrook Limited . # All rights reserved. This is an open source license. Contact # Ravenbrook for commercial licensing options. # diff --git a/mps/code/w3i6pc.nmk b/mps/code/w3i6pc.nmk index 1decdde0083..58488b757f7 100644 --- a/mps/code/w3i6pc.nmk +++ b/mps/code/w3i6pc.nmk @@ -9,14 +9,23 @@ PFM = w3i6pc PFMDEFS = /DCONFIG_PF_STRING="w3i6pc" /DCONFIG_PF_W3I6PC /DWIN32 /D_WINDOWS -!INCLUDE commpre.nmk -!INCLUDE pc.nmk - CFLAGSCOMMONPRE = $(CFLAGSCOMMONPRE) /Tamd64-coff -# MPM sources: core plus platform-specific. -MPM = $(MPMCOMMON) +# MPM platform-specific sources. +MPMPF = \ + \ + \ + \ + \ + \ + \ + \ + \ + \ + +!INCLUDE commpre.nmk +!INCLUDE pc.nmk # Source to object file mappings and CFLAGS amalgamation @@ -36,17 +45,11 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFHOT) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFHOT) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSHOT) MPMOBJ0 = $(MPM:<=w3i6pc\hot\) -PLINTHOBJ0 = $(PLINTH:<=w3i6pc\hot\) -AMSOBJ0 = $(AMS:<=w3i6pc\hot\) -AMCOBJ0 = $(AMC:<=w3i6pc\hot\) -AWLOBJ0 = $(AWL:<=w3i6pc\hot\) -LOOBJ0 = $(LO:<=w3i6pc\hot\) -SNCOBJ0 = $(SNC:<=w3i6pc\hot\) -MVFFOBJ0 = $(MVFF:<=w3i6pc\hot\) -DWOBJ0 = $(DW:<=w3i6pc\hot\) +FMTDYOBJ0 = $(FMTDY:<=w3i6pc\hot\) FMTTESTOBJ0 = $(FMTTEST:<=w3i6pc\hot\) POOLNOBJ0 = $(POOLN:<=w3i6pc\hot\) TESTLIBOBJ0 = $(TESTLIB:<=w3i6pc\hot\) +TESTTHROBJ0 = $(TESTTHR:<=w3i6pc\hot\) !ELSEIF "$(VARIETY)" == "cool" CFLAGS=$(CFLAGSCOMMONPRE) $(CFCOOL) $(CFLAGSCOMMONPOST) @@ -54,17 +57,11 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFCOOL) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFCOOL) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSCOOL) MPMOBJ0 = $(MPM:<=w3i6pc\cool\) -PLINTHOBJ0 = $(PLINTH:<=w3i6pc\cool\) -AMSOBJ0 = $(AMS:<=w3i6pc\cool\) -AMCOBJ0 = $(AMC:<=w3i6pc\cool\) -AWLOBJ0 = $(AWL:<=w3i6pc\cool\) -LOOBJ0 = $(LO:<=w3i6pc\cool\) -SNCOBJ0 = $(SNC:<=w3i6pc\cool\) -MVFFOBJ0 = $(MVFF:<=w3i6pc\cool\) -DWOBJ0 = $(DW:<=w3i6pc\cool\) +FMTDYOBJ0 = $(FMTDY:<=w3i6pc\cool\) FMTTESTOBJ0 = $(FMTTEST:<=w3i6pc\cool\) POOLNOBJ0 = $(POOLN:<=w3i6pc\cool\) TESTLIBOBJ0 = $(TESTLIB:<=w3i6pc\cool\) +TESTTHROBJ0 = $(TESTTHR:<=w3i6pc\cool\) !ELSEIF "$(VARIETY)" == "rash" CFLAGS=$(CFLAGSCOMMONPRE) $(CFRASH) $(CFLAGSCOMMONPOST) @@ -72,42 +69,11 @@ CFLAGSSQL=$(CFLAGSSQLPRE) $(CFRASH) $(CFLAGSSQLPOST) LINKFLAGS=$(LINKFLAGSCOMMON) $(LFRASH) LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSRASH) MPMOBJ0 = $(MPM:<=w3i6pc\rash\) -PLINTHOBJ0 = $(PLINTH:<=w3i6pc\rash\) -AMSOBJ0 = $(AMS:<=w3i6pc\rash\) -AMCOBJ0 = $(AMC:<=w3i6pc\rash\) -AWLOBJ0 = $(AWL:<=w3i6pc\rash\) -LOOBJ0 = $(LO:<=w3i6pc\rash\) -SNCOBJ0 = $(SNC:<=w3i6pc\rash\) -MVFFOBJ0 = $(MVFF:<=w3i6pc\rash\) -DWOBJ0 = $(DW:<=w3i6pc\rash\) +FMTDYOBJ0 = $(FMTDY:<=w3i6pc\rash\) FMTTESTOBJ0 = $(FMTTEST:<=w3i6pc\rash\) POOLNOBJ0 = $(POOLN:<=w3i6pc\rash\) TESTLIBOBJ0 = $(TESTLIB:<=w3i6pc\rash\) - -#!ELSEIF "$(VARIETY)" == "cv" -#CFLAGS=$(CFLAGSCOMMON) $(CFCV) -#LINKFLAGS=$(LINKFLAGSCOMMON) $(LFCV) -#LIBFLAGS=$(LIBFLAGSCOMMON) $(LIBFLAGSCV) -#MPMOBJ0 = $(MPM:<=w3i6pc\cv\) -#MPMOBJ = $(MPMOBJ0:>=.obj) -#PLINTHOBJ0 = $(PLINTH:<=w3i6pc\cv\) -#PLINTHOBJ = $(PLINTHOBJ0:>=.obj) -#AMSOBJ0 = $(AMS:<=w3i6pc\cv\) -#AMSOBJ = $(AMSOBJ0:>=.obj) -#AMCOBJ0 = $(AMC:<=w3i6pc\cv\) -#AMCOBJ = $(AMCOBJ0:>=.obj) -#AWLOBJ0 = $(AWL:<=w3i6pc\cv\) -#AWLOBJ = $(AWLOBJ0:>=.obj) -#LOOBJ0 = $(LO:<=w3i6pc\cv\) -#LOOBJ = $(LOOBJ0:>=.obj) -#SNCOBJ0 = $(SNC:<=w3i6pc\cv\) -#SNCOBJ = $(SNCOBJ0:>=.obj) -#DWOBJ0 = $(DW:<=w3i6pc\cv\) -#DWOBJ = $(DWOBJ0:>=.obj) -#POOLNOBJ0 = $(POOLN:<=w3i6pc\cv\) -#POOLNOBJ = $(POOLNOBJ0:>=.obj) -#TESTLIBOBJ0 = $(TESTLIB:<=w3i6pc\cv\) -#TESTLIBOBJ = $(TESTLIBOBJ0:>=.obj) +TESTTHROBJ0 = $(TESTTHR:<=w3i6pc\rash\) !ENDIF @@ -115,17 +81,11 @@ TESTLIBOBJ0 = $(TESTLIB:<=w3i6pc\rash\) # files included in the part MPMOBJ = $(MPMOBJ0:>=.obj) -PLINTHOBJ = $(PLINTHOBJ0:>=.obj) -AMSOBJ = $(AMSOBJ0:>=.obj) -AMCOBJ = $(AMCOBJ0:>=.obj) -AWLOBJ = $(AWLOBJ0:>=.obj) -LOOBJ = $(LOOBJ0:>=.obj) -SNCOBJ = $(SNCOBJ0:>=.obj) -MVFFOBJ = $(MVFFOBJ0:>=.obj) -DWOBJ = $(DWOBJ0:>=.obj) +FMTDYOBJ = $(FMTDYOBJ0:>=.obj) FMTTESTOBJ = $(FMTTESTOBJ0:>=.obj) POOLNOBJ = $(POOLNOBJ0:>=.obj) TESTLIBOBJ = $(TESTLIBOBJ0:>=.obj) +TESTTHROBJ = $(TESTTHROBJ0:>=.obj) !INCLUDE commpost.nmk diff --git a/mps/code/walkt0.c b/mps/code/walkt0.c index 929408fc4b4..6b0002000d4 100644 --- a/mps/code/walkt0.c +++ b/mps/code/walkt0.c @@ -191,11 +191,14 @@ static void *test(mps_arena_t arena, mps_class_t pool_class) /* Note: stepper finds more than we expect, due to pad objects */ /* printf("stepper found %ld objs\n", sd->count); */ + + mps_arena_park(arena); mps_ap_destroy(ap); mps_root_destroy(exactRoot); mps_pool_destroy(pool); mps_chain_destroy(chain); mps_fmt_destroy(format); + mps_arena_release(arena); return NULL; } diff --git a/mps/code/zcoll.c b/mps/code/zcoll.c index 06ef499f5b7..220368f39d3 100644 --- a/mps/code/zcoll.c +++ b/mps/code/zcoll.c @@ -774,6 +774,7 @@ static void *testscriptB(void *arg, size_t s) testscriptC(arena, ap, script); printf(" Destroy roots, pools, arena etc.\n\n"); + mps_arena_park(arena); mps_root_destroy(root_stackreg); mps_ap_destroy(ap); mps_root_destroy(root_table_Exact); diff --git a/mps/code/zmess.c b/mps/code/zmess.c index 555e7377d2c..4cfaf9944f3 100644 --- a/mps/code/zmess.c +++ b/mps/code/zmess.c @@ -381,6 +381,7 @@ static void *testscriptB(void *arg, size_t s) testscriptC(arena, script); + mps_arena_park(arena); mps_ap_destroy(ap); mps_root_destroy(root_table); mps_pool_destroy(amc); diff --git a/mps/configure b/mps/configure index e491a01208f..edbc7320304 100755 --- a/mps/configure +++ b/mps/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for Memory Pool System Kit release/1.113.0. +# Generated by GNU Autoconf 2.69 for Memory Pool System Kit release/1.114.0. # # Report bugs to . # @@ -580,8 +580,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='Memory Pool System Kit' PACKAGE_TARNAME='mps-kit' -PACKAGE_VERSION='release/1.113.0' -PACKAGE_STRING='Memory Pool System Kit release/1.113.0' +PACKAGE_VERSION='release/1.114.0' +PACKAGE_STRING='Memory Pool System Kit release/1.114.0' PACKAGE_BUGREPORT='mps-questions@ravenbrook.com' PACKAGE_URL='http://www.ravenbrook.com/project/mps/' @@ -629,7 +629,9 @@ TEST_TARGET INSTALL_TARGET CLEAN_TARGET BUILD_TARGET -MPS_TARGET_NAME +MPS_BUILD_NAME +MPS_ARCH_NAME +MPS_OS_NAME MAKE host_os host_vendor @@ -1243,7 +1245,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures Memory Pool System Kit release/1.113.0 to adapt to many kinds of systems. +\`configure' configures Memory Pool System Kit release/1.114.0 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1308,7 +1310,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of Memory Pool System Kit release/1.113.0:";; + short | recursive ) echo "Configuration of Memory Pool System Kit release/1.114.0:";; esac cat <<\_ACEOF @@ -1389,7 +1391,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -Memory Pool System Kit configure release/1.113.0 +Memory Pool System Kit configure release/1.114.0 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -1691,7 +1693,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by Memory Pool System Kit $as_me release/1.113.0, which was +It was created by Memory Pool System Kit $as_me release/1.114.0, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -3455,25 +3457,33 @@ case $host/$CLANG in i*86-*-linux*/no) { $as_echo "$as_me:${as_lineno-$LINENO}: result: Linux x86" >&5 $as_echo "Linux x86" >&6; } - MPS_TARGET_NAME=lii3gc + MPS_OS_NAME=li + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=gc PFMCFLAGS="$CFLAGS_GC" ;; x86_64-*-linux*/no) { $as_echo "$as_me:${as_lineno-$LINENO}: result: Linux x86_64" >&5 $as_echo "Linux x86_64" >&6; } - MPS_TARGET_NAME=lii6gc + MPS_OS_NAME=li + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=gc PFMCFLAGS="$CFLAGS_GC" ;; x86_64-*-linux*/yes) { $as_echo "$as_me:${as_lineno-$LINENO}: result: Linux x86_64" >&5 $as_echo "Linux x86_64" >&6; } - MPS_TARGET_NAME=lii6ll + MPS_OS_NAME=li + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll PFMCFLAGS="$CFLAGS_LL" ;; i*86-*-darwin*/*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: Mac OS X x86" >&5 $as_echo "Mac OS X x86" >&6; } - MPS_TARGET_NAME=xci3ll + MPS_OS_NAME=xc + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=ll BUILD_TARGET=build-via-xcode CLEAN_TARGET=clean-xcode-build INSTALL_TARGET=install-xcode-build @@ -3483,7 +3493,9 @@ $as_echo "Mac OS X x86" >&6; } x86_64-apple-darwin*/*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: Mac OS X x86_64" >&5 $as_echo "Mac OS X x86_64" >&6; } - MPS_TARGET_NAME=xci6ll + MPS_OS_NAME=xc + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll BUILD_TARGET=build-via-xcode CLEAN_TARGET=clean-xcode-build INSTALL_TARGET=install-xcode-build @@ -3493,7 +3505,9 @@ $as_echo "Mac OS X x86_64" >&6; } i*86-*-freebsd*/no) { $as_echo "$as_me:${as_lineno-$LINENO}: result: FreeBSD x86" >&5 $as_echo "FreeBSD x86" >&6; } - MPS_TARGET_NAME=fri3gc + MPS_OS_NAME=fr + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=gc # Need /usr/local/include in order to find sqlite3.h CFLAGS="-I/usr/local/include" CPP="$CC -I/usr/local/include -E" @@ -3502,7 +3516,9 @@ $as_echo "FreeBSD x86" >&6; } amd64-*-freebsd*/no | x86_64-*-freebsd*/no) { $as_echo "$as_me:${as_lineno-$LINENO}: result: FreeBSD x86_64" >&5 $as_echo "FreeBSD x86_64" >&6; } - MPS_TARGET_NAME=fri6gc + MPS_OS_NAME=fr + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=gc # Need /usr/local/include in order to find sqlite3.h CFLAGS="-I/usr/local/include" CPP="$CC -I/usr/local/include -E" @@ -3581,6 +3597,8 @@ CFLAGS="$CFLAGS $PFMCFLAGS" + + ac_config_files="$ac_config_files Makefile example/scheme/Makefile" @@ -4126,7 +4144,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by Memory Pool System Kit $as_me release/1.113.0, which was +This file was extended by Memory Pool System Kit $as_me release/1.114.0, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -4180,7 +4198,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -Memory Pool System Kit config.status release/1.113.0 +Memory Pool System Kit config.status release/1.114.0 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/mps/configure.ac b/mps/configure.ac index 6ee47ca05a6..f564f047d5e 100644 --- a/mps/configure.ac +++ b/mps/configure.ac @@ -47,22 +47,30 @@ TEST_TARGET=test-make-build case $host/$CLANG in i*86-*-linux*/no) AC_MSG_RESULT([Linux x86]) - MPS_TARGET_NAME=lii3gc + MPS_OS_NAME=li + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=gc PFMCFLAGS="$CFLAGS_GC" ;; x86_64-*-linux*/no) AC_MSG_RESULT([Linux x86_64]) - MPS_TARGET_NAME=lii6gc + MPS_OS_NAME=li + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=gc PFMCFLAGS="$CFLAGS_GC" ;; x86_64-*-linux*/yes) AC_MSG_RESULT([Linux x86_64]) - MPS_TARGET_NAME=lii6ll + MPS_OS_NAME=li + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll PFMCFLAGS="$CFLAGS_LL" ;; i*86-*-darwin*/*) AC_MSG_RESULT([Mac OS X x86]) - MPS_TARGET_NAME=xci3ll + MPS_OS_NAME=xc + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=ll BUILD_TARGET=build-via-xcode CLEAN_TARGET=clean-xcode-build INSTALL_TARGET=install-xcode-build @@ -71,7 +79,9 @@ case $host/$CLANG in ;; x86_64-apple-darwin*/*) AC_MSG_RESULT([Mac OS X x86_64]) - MPS_TARGET_NAME=xci6ll + MPS_OS_NAME=xc + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=ll BUILD_TARGET=build-via-xcode CLEAN_TARGET=clean-xcode-build INSTALL_TARGET=install-xcode-build @@ -80,7 +90,9 @@ case $host/$CLANG in ;; i*86-*-freebsd*/no) AC_MSG_RESULT([FreeBSD x86]) - MPS_TARGET_NAME=fri3gc + MPS_OS_NAME=fr + MPS_ARCH_NAME=i3 + MPS_BUILD_NAME=gc # Need /usr/local/include in order to find sqlite3.h CFLAGS="-I/usr/local/include" CPP="$CC -I/usr/local/include -E" @@ -88,7 +100,9 @@ case $host/$CLANG in ;; amd64-*-freebsd*/no | x86_64-*-freebsd*/no) AC_MSG_RESULT([FreeBSD x86_64]) - MPS_TARGET_NAME=fri6gc + MPS_OS_NAME=fr + MPS_ARCH_NAME=i6 + MPS_BUILD_NAME=gc # Need /usr/local/include in order to find sqlite3.h CFLAGS="-I/usr/local/include" CPP="$CC -I/usr/local/include -E" @@ -111,7 +125,9 @@ AC_CHECK_HEADER([sqlite3.h], [EXTRA_TARGETS="$EXTRA_TARGETS mpseventsql"]) # those flags. CFLAGS="$CFLAGS $PFMCFLAGS" -AC_SUBST(MPS_TARGET_NAME) +AC_SUBST(MPS_OS_NAME) +AC_SUBST(MPS_ARCH_NAME) +AC_SUBST(MPS_BUILD_NAME) AC_SUBST(BUILD_TARGET) AC_SUBST(CLEAN_TARGET) AC_SUBST(INSTALL_TARGET) diff --git a/mps/design/arena.txt b/mps/design/arena.txt index a1fae81ce5e..0532213c294 100644 --- a/mps/design/arena.txt +++ b/mps/design/arena.txt @@ -237,7 +237,7 @@ _`.tract.structure`: The tract structure definition looks like this:: void *p; /* pointer for use of owning pool */ Addr base; /* Base address of the tract */ TraceSet white : TRACE_MAX; /* traces for which tract is white */ - unsigned int hasSeg : 1; /* does tract have a seg in p? */ + BOOLFIELD(hasSeg); /* does tract have a seg in p? */ } TractStruct; _`.tract.field.pool`: The pool.pool field indicates to which pool the tract @@ -262,10 +262,9 @@ use it for any purpose. _`.tract.field.hasSeg`: The ``hasSeg`` bit-field is a Boolean which indicates whether the ``p`` field is being used by the segment module. -If this field is ``TRUE``, then the value of ``p`` is a ``Seg``. -``hasSeg`` is typed as an ``unsigned int``, rather than a ``Bool``. -This ensures that there won't be sign conversion problems when -converting the bit-field value. +If this field is ``TRUE``, then the value of ``p`` is a ``Seg``. See +design.mps.type.bool.bitfield for why this is declared using the +``BOOLFIELD`` macro. _`.tract.field.base`: The base field contains the base address of the memory represented by the tract. @@ -273,7 +272,7 @@ memory represented by the tract. _`.tract.field.white`: The white bit-field indicates for which traces the tract is white (`.req.fun.trans.white`_). This information is also stored in the segment, but is duplicated here for efficiency during a -call to ``TraceFix`` (see design.mps.trace.fix). +call to ``TraceFix()`` (see design.mps.trace.fix). _`.tract.limit`: The limit of the tract's memory may be determined by adding the arena alignment to the base address. diff --git a/mps/design/cbs.txt b/mps/design/cbs.txt index 0d8c81df74e..496cc5ea246 100644 --- a/mps/design/cbs.txt +++ b/mps/design/cbs.txt @@ -20,7 +20,10 @@ eager coalescence. _`.readership`: This document is intended for any MM developer. -_`.source`: design.mps.poolmv2, design.mps.poolmvff. +_`.source`: design.mps.poolmvt_, design.mps.poolmvff_. + +.. _design.mps.poolmvt: poolmvt +.. _design.mps.poolmvff: poolmvff _`.overview`: The "coalescing block structure" is a set of addresses (or a subset of address space), with provision for efficient @@ -29,50 +32,27 @@ high level communication with the client about the size of contiguous ranges, and detection of protocol violations. -Definitions ------------ - -_`.def.range`: A (contiguous) *range* of addresses is a semi-open -interval on address space. - -_`.def.isolated`: A contiguous range is *isolated* with respect to -some property it has, if adjacent elements do not have that property. - - Requirements ------------ -_`.req.set`: Must maintain a set of addresses. +In addition to the generic land requirements (see +design.mps.land_), the CBS must satisfy: + +.. _design.mps.land: land _`.req.fast`: Common operations must have a low amortized cost. -_`.req.add`: Must be able to add address ranges to the set. - -_`.req.remove`: Must be able to remove address ranges from the set. - -_`.req.size`: Must report concisely to the client when isolated -contiguous ranges of at least a certain size appear and disappear. - -_`.req.iterate`: Must support the iteration of all isolated -contiguous ranges. This will not be a common operation. - -_`.req.protocol`: Must detect protocol violations. - -_`.req.debug`: Must support debugging of client code. - _`.req.small`: Must have a small space overhead for the storage of typical subsets of address space and not have abysmal overhead for the storage of any subset of address space. -_`.req.align`: Must support an alignment (the alignment of all -addresses specifying ranges) of down to ``sizeof(void *)`` without -losing memory. - Interface --------- -_`.header`: CBS is used through impl.h.cbs. +_`.land`: CBS is an implementation of the *land* abstract data type, +so the interface consists of the generic functions for lands. See +design.mps.land_. External types @@ -80,179 +60,111 @@ External types ``typedef struct CBSStruct *CBS`` -_`.type.cbs`: ``CBS`` is the main data structure for manipulating a -CBS. It is intended that a ``CBSStruct`` be embedded in another -structure. No convenience functions are provided for the allocation or -deallocation of the CBS. - -``typedef Bool (*CBSIterateMethod)(CBS cbs, Range range, void *closureP, Size closureS)`` - -_`.type.cbs.iterate.method`: Type ``CBSIterateMethod`` is a callback -function that may be passed to ``CBSIterate()``. It is called for -every isolated contiguous range in address order. The function must -returns a ``Bool`` indicating whether to continue with the iteration. +_`.type.cbs`: The type of coalescing block structures. A ``CBSStruct`` +may be embedded in another structure, or you can create it using +``LandCreate()``. External functions .................. -``Res CBSInit(Arena arena, CBS cbs, void *owner, Align alignment, Bool fastFind, ArgList args)`` +``LandClass CBSLandClassGet(void)`` -_`.function.cbs.init`: ``CBSInit()`` is the function that initialises -the CBS structure. It performs allocation in the supplied arena. The -parameter ``owner`` is passed to ``MeterInit()``, an ``alignment`` -indicates the alignment of ranges to be maintained. An initialised CBS -contains no ranges. +_`.function.class`: The function ``CBSLandClassGet()`` returns the CBS +class, a subclass of ``LandClass`` suitable for passing to +``LandCreate()`` or ``LandInit()``. -``fastFind``, if set, causes the CBS to maintain, for each subtree, -the size of the largest block in that subtree. This must be true if -any of the ``CBSFindFirst()``, ``CBSFindLast()``, or -``CBSFindLargest()`` functions are going to be used on the CBS. +``LandClass CBSFastLandClassGet(void)`` -``CBSInit()`` may take one keyword argument: +_`.function.class`: Returns a subclass of ``CBSLandClass`` that +maintains, for each subtree, the size of the largest block in that +subtree. This enables the ``LandFindFirst()``, ``LandFindLast()``, and +``LandFindLargest()`` generic functions. -* ``MPS_KEY_CBS_EXTEND_BY`` (type ``Size``; default 4096) is the size - of segment that the CBS will request from the arena in which to - allocate its ``CBSBlock`` structures. +``LandClass CBSZonedLandClassGet(void)`` -``void CBSFinish(CBS cbs)`` - -_`.function.cbs.finish`: ``CBSFinish()`` is the function that finishes -the CBS structure and discards any other resources associated with the -CBS. - -``Res CBSInsert(Range rangeReturn, CBS cbs, Range range)`` - -_`.function.cbs.insert`: If any part of ``range`` is already in the -CBS, then leave it unchanged and return ``ResFAIL``. Otherwise, -attempt to insert ``range`` into the CBS. If the insertion succeeds, -then update ``rangeReturn`` to describe the contiguous isolated range -containing the inserted range (this may differ from ``range`` if there -was coalescence on either side) and return ``ResOK``. If the insertion -fails, return a result code indicating allocation failure. - -_`.function.cbs.insert.fail`: Insertion of a valid range (that is, one -that does not overlap with any range in the CBS) can only fail if the -new range is isolated and the allocation of the necessary data -structure to represent it failed. +_`.function.class`: Returns a subclass of ``CBSFastLandClass`` that +maintains, for each subtree, the union of the zone sets of all ranges +in that subtree. This enables the ``LandFindInZones()`` generic +function. -``Res CBSDelete(Range rangeReturn, CBS cbs, Range range)`` -_`.function.cbs.delete`: If any part of the range is not in the CBS, -then leave the CBS unchanged and return ``ResFAIL``. Otherwise, update -``rangeReturn`` to describe the contiguous isolated range that -contains ``range`` (this may differ from ``range`` if there are -fragments on either side) and attempt to delete the range from the -CBS. If the deletion succeeds, return ``ResOK``. If the deletion -fails, return a result code indicating allocation failure. +Keyword arguments +................. -_`.function.cbs.delete.fail`: Deletion of a valid range (that is, one -that is wholly contained in the CBS) can only fail if there are -fragments on both sides and the allocation of the necessary data -structures to represent them fails. +When initializing a CBS, ``LandCreate()`` and ``LandInit()`` take the +following optional keyword arguments: -_`.function.cbs.delete.return`: ``CBSDelete()`` returns the contiguous -isolated range that contains ``range`` even if the deletion fails. -This is so that the caller can try deleting the whole block (which is -guaranteed to succeed) and managing the fragments using a fallback -strategy. +* ``CBSBlockPool`` (type ``Pool``) is the pool from which the CBS + block descriptors will be allocated. If omitted, a new MFS pool is + created for this purpose. -``void CBSIterate(CBS cbs, CBSIterateMethod iterate, void *closureP, Size closureS)`` +* ``MPS_KEY_CBS_EXTEND_BY`` (type ``Size``; default 4096) is passed as + the ``MPS_KEY_EXTEND_BY`` keyword argument to ``PoolCreate()`` if a + block descriptor pool is created. It specifies the size of segment + that the block descriptor pool will request from the arena. -_`.function.cbs.iterate`: ``CBSIterate()`` is the function used to -iterate all isolated contiguous ranges in a CBS. It receives a -pointer, ``Size`` closure pair to pass on to the iterator method, -and an iterator method to invoke on every range in address order. If -the iterator method returns ``FALSE``, then the iteration is -terminated. +* ``MFSExtendSelf`` (type ``Bool``; default ``TRUE``) is passed to + ``PoolCreate()`` if a block descriptor pool is created. If ``TRUE``, + the block descriptor pool automatically extends itself when out of + space; if ``FALSE``, the pool returns ``ResLIMIT`` in this case. + (This feature is used by the arena to bootstrap its own CBS of free + memory.) -``Res CBSDescribe(CBS cbs, mps_lib_FILE *stream, Count depth)`` -_`.function.cbs.describe`: ``CBSDescribe()`` prints a textual -representation of the CBS to the given stream, indicating the -contiguous ranges in order, as well as the structure of the underlying -splay tree implementation. It is provided for debugging only. +Limitations +........... -``Bool CBSFindFirst(Range rangeReturn, Range oldRangeReturn, CBS cbs, Size size, FindDelete findDelete)`` +_`.limit.find`: ``CBSLandClass`` does not support the +``LandFindFirst()``, ``LandFindLast()``, and ``LandFindLargest()`` +generic functions (the subclasses do support these operations). -_`.function.cbs.find.first`: Locate the first block (in address order) -within the CBS of at least the specified size, update ``rangeReturn`` -to describe that range, and return ``TRUE``. If there is no such -block, it returns ``FALSE``. +_`.limit.zones`: ``CBSLandClass`` and ``CBSFastLandClass`` do not +support the ``LandFindInZones()`` generic function (the subclass +``CBSZonedLandClass`` does support this operation). -In addition, optionally delete the top, bottom, or all of the found -range, depending on the ``findDelete`` argument. This saves a separate -call to ``CBSDelete()``, and uses the knowledge of exactly where we -found the range. The value of ``findDelete`` must come from this -enumeration:: +_`.limit.iterate`: CBS does not provide an implementation for the +``LandIterateAndDelete()`` generic function. This is because +``TreeTraverse()`` does not permit modification, for speed and to +avoid perturbing the splay tree balance. - enum { - FindDeleteNONE, /* don't delete after finding */ - FindDeleteLOW, /* delete size bytes from low end of block */ - FindDeleteHIGH, /* delete size bytes from high end of block */ - FindDeleteENTIRE /* delete entire range */ - }; - -The original contiguous isolated range in which the range was found is -returned via the ``oldRangeReturn`` argument. (If ``findDelete`` is -``FindDeleteNONE`` or ``FindDeleteENTIRE``, then this will be -identical to the range returned via the ``rangeReturn`` argument.) - -``CBSFindFirst()`` requires that ``fastFind`` was true when -``CBSInit()`` was called. - -``Bool CBSFindLast(Range rangeReturn, Range oldRangeReturn, CBS cbs, Size size, FindDelete findDelete)`` - -_`.function.cbs.find.last`: Like ``CBSFindFirst()``, except that it -finds the last block in address order. - -``Bool CBSFindLargest(Range rangeReturn, Range oldRangeReturn, CBS cbs, Size size, FindDelete findDelete)`` - -_`.function.cbs.find.largest`: Locate the largest block within the -CBS, and if that block is at least as big as ``size``, return its -range via the ``rangeReturn`` argument, and return ``TRUE``. If there -are no blocks in the CBS at least as large as ``size``, return -``FALSE``. Pass 0 for ``size`` if you want the largest block -unconditionally. - -Like ``CBSFindFirst()``, optionally delete the range (specifying -``FindDeleteLOW`` or ``FindDeleteHIGH`` has the same effect as -``FindDeleteENTIRE``). This feature requires that ``fastFind`` was -true when ``CBSInit()`` was called. +_`.limit.flush`: CBS cannot be used as the source in a call to +``LandFlush()``. (Because of `.limit.iterate`_.) Implementation -------------- -_`.impl`: This section is concerned with describing various aspects of -the implementation. It does not form part of the interface definition. - - - Splay tree .......... -_`.impl.splay`: The CBS is principally implemented using a splay tree -(see design.mps.splay_). Each splay tree node is embedded in a -``CBSBlock`` that represents a semi-open address range. The key passed +_`.impl.splay`: The CBS is implemented using a splay tree (see +design.mps.splay_). Each splay tree node is embedded in a block +structure that represents a semi-open address range. The key passed for comparison is the base of another range. .. _design.mps.splay: splay -_`.impl.splay.fast-find`: ``CBSFindFirst()`` and ``CBSFindLast()`` use -the update/refresh facility of splay trees to store, in each -``CBSBlock``, an accurate summary of the maximum block size in the -tree rooted at the corresponding splay node. This allows rapid -location of the first or last suitable block, and very rapid failure -if there is no suitable block. +_`.impl.splay.fast-find`: In the ``CBSFastLandClass`` class, +``cbsFindFirst()`` and ``cbsFindLast()`` use the update/refresh +facility of splay trees to store, in each block, an accurate summary +of the maximum block size in the tree rooted at the corresponding +splay node. This allows rapid location of the first or last suitable +block, and very rapid failure if there is no suitable block. -_`.impl.find-largest`: ``CBSFindLargest()`` simply finds out the size +_`.impl.find-largest`: ``cbsFindLargest()`` simply finds out the size of the largest block in the CBS from the root of the tree, using ``SplayRoot()``, and does ``SplayFindFirst()`` for a block of that size. This takes time proportional to the logarithm of the size of the free list, so it's about the best you can do without maintaining a -separate priority queue, just to do ``CBSFindLargest()``. +separate priority queue, just to do ``cbsFindLargest()``. + +_`.impl.splay.zones`: In the ``CBSZonedLandClass`` class, +``cbsFindInZones()`` uses the update/refresh facility of splay trees +to store, in each block, the union of the zones of the ranges in the +tree rooted at the corresponding splay node. This allows rapid +location of a block in a set of zones. Low memory behaviour @@ -260,10 +172,10 @@ Low memory behaviour _`.impl.low-mem`: When the CBS tries to allocate a new ``CBSBlock`` structure for a new isolated range as a result of either -``CBSInsert()`` or ``CBSDelete()``, and there is insufficient memory -to allocation the ``CBSBlock`` structure, then the range is not added -to the CBS or deleted from it, and the call to ``CBSInsert()`` or -``CBSDelete()`` returns ``ResMEMORY``. +``LandInsert()`` or ``LandDelete()``, and there is insufficient memory +to allocate the block structure, then the range is not added to the +CBS or deleted from it, and the call to ``LandInsert()`` or +``LandDelete()`` returns ``ResMEMORY``. The CBS block @@ -284,19 +196,12 @@ Testing _`.test`: The following testing will be performed on this module: -_`.test.cbstest`: There is a stress test for this module in -impl.c.cbstest. This allocates a large block of memory and then -simulates the allocation and deallocation of ranges within this block -using both a ``CBS`` and a ``BT``. It makes both valid and invalid -requests, and compares the ``CBS`` response to the correct behaviour -as determined by the ``BT``. It also iterates the ranges in the -``CBS``, comparing them to the ``BT``. It also invokes -``CBSDescribe()``, but makes no automatic test of the resulting -output. It does not currently test the callbacks. +_`.test.land`: A generic test for land implementations. See +design.mps.land.test. -_`.test.pool`: Several pools (currently MVT_ and MVFF_) are implemented -on top of a CBS. These pool are subject to testing in development, QA, -and are/will be heavily exercised by customers. +_`.test.pool`: The arena and two pools (MVT_ and MVFF_) are +implemented on top of a CBS. These are subject to testing in +development, QA, and are heavily exercised by customers. .. _MVT: poolmvt .. _MVFF: poolmvff @@ -305,9 +210,9 @@ and are/will be heavily exercised by customers. Notes for future development ---------------------------- -_`.future.not-splay`: The initial implementation of CBSs is based on -splay trees. It could be revised to use any other data structure that -meets the requirements (especially `.req.fast`_). +_`.future.not-splay`: The implementation of CBSs is based on splay +trees. It could be revised to use other data structures that meet the +requirements (especially `.req.fast`_). _`.future.hybrid`: It would be possible to attenuate the problem of `.risk.overhead`_ (below) by using a single word bit set to represent @@ -317,6 +222,11 @@ converting them when they reach all free in the bit set. Note that this would make coalescence slightly less eager, by up to ``(word-width - 1)``. +_`.future.iterate.and.delete`: It would be possible to provide an +implementation for the ``LandIterateAndDelete()`` generic function by +calling ``TreeToVine()`` first, and then iterating over the vine +(where deletion is straightforward). + Risks ----- @@ -329,7 +239,6 @@ the size of that area. [Four words per two grains.] The CBS structure is thus suitable only for managing large enough ranges. - Document History ---------------- @@ -358,6 +267,9 @@ Document History talking about the deleted "emergency" free list allocator. Documented ``fastFind`` argument to ``CBSInit()``. +- 2014-04-01 GDR_ Moved generic material to design.mps.land_. + Documented new keyword arguments. + .. _RB: http://www.ravenbrook.com/consultants/rb/ .. _GDR: http://www.ravenbrook.com/consultants/gdr/ diff --git a/mps/design/class-interface.txt b/mps/design/class-interface.txt index 1f716703dfd..70f83e01753 100644 --- a/mps/design/class-interface.txt +++ b/mps/design/class-interface.txt @@ -24,215 +24,196 @@ the MPM and the pool class implementations. Pirinen, 1999-07-20. +Fields +------ + +_`.field`: These fields are provided by pool classes as part of the +``PoolClass`` object (see impl.h.mpmst.class). They form part of the +interface which allows the MPM to treat pools in a uniform manner. + +_`.field.name`: The ``name`` field should be a short, pithy, cryptic +name for the pool class. It should typically start with ``"A"`` if +memory is managed by the garbage collector, and ``"M"`` if memory is +managed by alloc/free. Examples are "AMC", "MV". + +_`.field.attr`: The ``attr`` field must be a bitset of pool class +attributes. See `design.mps.type.attr`_. + +.. _design.mps.type.attr: type + +_`.field.size`: The ``size`` field is the size of the pool instance +structure. For the ``PoolFoo`` class this can reasonably be expected +to be ``sizeof(PoolFooStruct)``. + +_`.field.offset`: The ``offset`` field is the offset into the pool +instance structure of the generic ``PoolStruct``. Typically this field +is called ``poolStruct``, so something like ``offsetof(PoolFooStruct, +poolStruct)`` is typical. If possible, arrange for this to be zero. + + Methods ------- -_`.methods`: These methods are provided by pool classes as part of the -``PoolClass`` object (see impl.h.mpmst.class). They form the interface -which allows the MPM to treat pools in a uniform manner. +_`.method`: These methods are provided by pool classes as part of the +``PoolClass`` object (see impl.h.mpmst.class). They form part of the +interface which allows the MPM to treat pools in a uniform manner. -The following description is based on the definition of the -``PoolClassStruct`` (impl.h.mpmst.class). +_`.method.unused`: If a pool class is not required to provide a +certain method, the class should assign the appropriate ``PoolNo`` +method for that method to ensure that erroneous calls are detected. It +is not acceptable to use ``NULL``. -If a class is not required to provide a certain method then it should -set the appropriate ``PoolNo*`` method for that method. It is not -acceptable to use ``NULL``. +_`.method.trivial`: If a pool class if required to provide a certain +method, but the class provides no special behaviour in this case, it +should assign the appropriate ``PoolTriv`` method. -.. note:: +_`.method.init`: The ``init`` field is the pool class's init method. +This method is called via the generic function ``PoolInit()``, which +is in turn called by ``PoolCreate()``. The generic function allocates +the pool's structure (using the ``size`` and ``offset`` fields), +initializes the ``PoolStruct`` (generic part), then calls the ``init`` +method to do any class-specific initialization. Typically this means +initializing the fields in the pool instance structure. If ``init`` +returns a non-OK result code the instance structure will be +deallocated and the code returned to the caller of ``PoolInit()`` or +``PoolCreate()``. Note that the ``PoolStruct`` isn't made fully valid +until ``PoolInit()`` returns, so the ``init`` method must not call +``PoolCheck()``. - There are also some ``PoolTriv*`` methods. David Jones, 1997-08-19. +_`.method.finish`: The ``finish`` field is the pool class's finish +method. This method is called via the generic function +``PoolFinish()``, which is in turn called by ``PoolDestroy()``. It is +expected to finalise the pool instance structure, release any +resources allocated to the pool, and release the memory associated +with the pool instance structure. Note that the pool is valid when it +is passed to ``finish``. The ``PoolStruct`` (generic part) is finished +when the pool class's ``finish`` method returns. -_`.method.name`: The name field should be a short, pithy, cryptic name -for the pool class. Examples are "AMC", "MV". +_`.method.alloc`: The ``alloc`` field is the pool class's allocation +method. This method is called via the generic function +``PoolAlloc()``. It is expected to return a pointer to a fresh (that +is, not overlapping with any other live object) object of the required +size. Failure to allocate should be indicated by returning an +appropriate error code, and in such a case, ``*pReturn`` should not be +updated. Pool classes are not required to provide this method. -The ``size`` field is the size of the pool instance structure. For the -``Foo`` ``PoolClass`` this can reasonably be expected to be -``sizeof(FooStruct)``. - -The ``offset`` field is the offset into the pool instance structure of -the generic ``PoolStruct``. Typically this field is called -``poolStruct``, so something like ``offsetof(FooStruct, poolStruct)`` -is typical. If possible, arrange for this to be zero. - -The ``init`` field is the class's init method. This method is called -via the generic function ``PoolInit()``, which is in turn called by -``PoolCreate()``. The generic function allocates the pool's structure -(using the size and offset information), initializes the -``PoolStruct`` (generic part) then calls the ``init`` method to do any -class-specific initialization. Typically this means initializing the -fields in the class instance structure. If ``init`` returns a non-OK -result code the instance structure will be deallocated and the code -returned to the caller of ``PoolInit()``` or ``PoolCreate()``. Note that -the ``PoolStruct`` isn't made fully valid until ``PoolInit()`` returns. - -The ``finish`` field is the class's finish method. This method is -called via the generic function ``PoolFinish()``, which is in turn -called by ``PoolDestroy()``. It is expected to finalise the pool -instance structure and release any resources allocated to the pool, it -is expected to release the memory associated with the pool instance -structure. Note that the pool is valid when it is passed to -``finish``. The ``PoolStruct`` (generic part) is finished off when the -class's ``finish`` method returns. - -The ``alloc`` field is the class's allocation method. This method is -called via the generic function ``PoolAlloc()``. It is expected to -return a pointer to a fresh (that is, not overlapping with any other -live object) object of the required size. Failure to allocate should -be indicated by returning an appropriate Error code, and in such a -case, ``*pReturn`` should not be updated. Classes are not required to -provide this method, but they should provide at least one of ``alloc`` -and ``bufferCreate``. - -.. note:: - - There is no ``bufferCreate``. Gareth Rees, 2013-04-14. - -The ``free_`` field is the class's free method. This is intended -primarily for manual style pools. this method is called via the -generic function ``PoolFree()``. The parameters to this method are +_`.method.free`: The ``free`` method is the pool class's free method. +This is intended primarily for manual style pools. This method is +called via the generic function ``PoolFree()``. The parameters are required to correspond to a previous allocation request (possibly via a buffer). It is an assertion by the client that the indicated object is no longer required and the resources associated with it can be -recycled. Pools are not required to provide this method. +recycled. Pool classes are not required to provide this method. -The ``bufferInit`` field is the class's buffer initialization method. -It is called by the generic function ``BufferCreate()``, which allocates -the buffer descriptor and initializes the generic fields. The pool may -optionally adjust these fields or fill in extra values when -``bufferInit`` is called, but often pools set ``bufferInit`` to -``PoolTrivBufferInit()`` because they don't need to do any. If -``bufferInit`` returns a result code other than ``ResOK``, the buffer -structure is deallocated and the code is returned to the called of -``BufferCreate()``. Note that the ``BufferStruct`` isn't fully valid -until ``BufferCreate()`` returns. +_`.method.bufferInit`: The ``bufferInit`` method is the pool class's +buffer initialization method. It is called by the generic function +``BufferCreate()``, which allocates the buffer descriptor and +initializes the generic fields. The pool may optionally adjust these +fields or fill in extra values. If ``bufferInit`` returns a result +code other than ``ResOK``, the buffer structure is deallocated and the +result code is returned to the caller of ``BufferCreate()``. Note that +the ``BufferStruct`` isn't fully valid until ``BufferCreate()`` +returns. Pool classes are not required to provide this method. -The ``bufferFinish`` field is the class's buffer finishing method. It -is called by the the generic function ``BufferDestroy()``. The pool is -expected to detach the buffer from any memory and prepare the buffer -for destruction. The class is expected to release the resources -associated with the buffer structure, and any unreserved memory in the -buffer may be recycled. It is illegal for a buffer to be destroyed -when there are pending allocations on it (that is, an allocation has -been reserved, but not committed) and this is checked in the generic -function. This method should be provided if and only if -``bufferCreate`` is provided. [there is no ``bufferCreate`` -- drj -1997-08-19] +_`.method.bufferFinish`: The ``bufferFinish`` method is the pool +class's buffer finishing method. It is called by the the generic +function ``BufferDestroy()``. The pool is expected to detach the +buffer from any memory and prepare the buffer for destruction. The +pool is expected to release the resources associated with the buffer +structure, and any unreserved memory in the buffer may be recycled. It +is illegal for a buffer to be destroyed when there are pending +allocations on it (that is, an allocation has been reserved, but not +committed) and this is checked in the generic function. This method +must be provided if and only if ``bufferInit`` is provided. -The ``condemn`` field is used to condemn a pool. This method is called -via the generic function ``PoolCondemn()``. The class is expected to -condemn a subset (possible the whole set) of objects it manages and -participate in a global trace to determine liveness. The class should -register the refsig of the condemned set with the trace using -``TraceCondemn()``. The class should expect fix requests (via the fix -method below) during a global trace. Classes are not required to -provide this method, but it is expected that automatic style classes -will. This interface is expected to change in the future. +_`.method.access`: The ``access`` method is used to handle client +access. This method is called via the generic functions +``ArenaAccess()`` and ``PoolAccess()``. It indicates that the client +has attempted to access the specified region, but has been denied and +the request trapped due to a protection state. The pool should perform +any work necessary to remove the protection whilst still preserving +appropriate invariants (typically this will be scanning work). Pool +classes are not required to provide this method, and not doing so +indicates they never protect any memory managed by the pool. -.. note:: +_`.method.whiten`: The ``whiten`` method is used to condemn a segment +belonging to a pool. This method is called via the generic function +``PoolWhiten()``. The pool is expected to condemn a subset (but +typically all) of the objects in the segment and prepare the segment +for participation in a global trace to determine liveness. The pool +should expect fix requests (via the ``fix`` method below) during a +global trace. Pool classes that automatically reclaim dead objects +must provide this method, and must additionally set the ``AttrGC`` +attribute. - ``condemn`` now takes an action and a segment and should condemn - the segment (turn it white) if it corresponds to the - interpretation of the action. David Jones, 1997-08-19. +_`.method.grey`: The ``grey`` method is used to greyen a segment +belonging to a pool. This method is called via the generic function +``PoolGrey()``. The pool should set all of the objects in the segment +(excepting any set that has been condemned in this trace) to be grey, +that is, ready for scanning. The pool should arrange that any +appropriate invariants are preserved, possibly by using the protection +interface (see `design.mps.prot`_). Pool classes are not required to +provide this method, and not doing so indicates that all instances of +this class will have no fixable or traceable references in them. - It is now called ``whiten``. David Jones, 1998-02-02. +.. _design.mps.prot: prot -The ``mark`` field is used to mark an entire pool. This method is -called via the generic function ``PoolMark()``. The class should -consider all of its objects, except any set that has been condemned in -this trace, to be marked, that is ready for scanning. The class should -arrange that any appropriate invariants are preserved possibly by the -Protection interface. Classes are not required to provide this method, -and not doing so indicates that all instances of this class will have -no fixable or traceable references in them. +_`.method.blacken`: The ``blacken`` method is used to blacken a +segment belonging to a pool. This method is called via the generic +function ``PoolBlacken()`` when it is known that the segment cannot +refer to the white set. The pool must blacken all grey objects in the +segment. Pool classes are not required to provide this method, and not +doing so indicates that all instances of this class will have no +fixable or traceable references in them. -.. note:: +_`.method.scan`: The ``scan`` method is used to scan a segment. This +method is called via the generic function ``PoolScan()``. The pool +must scan all the known grey objects on the segment and it may also +accumulate a summary of *all* the objects on the segment. If it +succeeds in accumulating such a summary it must indicate that it has +done so by setting the ``totalReturn`` parameter to ``TRUE``. Pool +classes are not required to provide this method, and not doing so +indicates that all instances of this class will have no fixable or +traceable reference in them. - ``mark`` is no longer present: ``grey`` turns an entire segment - grey. David Jones, 1997-08-19. +_`.method.fix`: The ``fix`` method is used to perform fixing. This +method is called via the generic function ``TraceFix()``. It indicates +that the specified reference has been found and the pool should +consider the object to be live. There is provision for adjusting the +value of the reference (to allow for classes that move objects). not +required to provide this method. Pool classes that automatically +reclaim dead objects must provide this method, and must additionally +set the ``AttrGC`` attribute. Pool classes that may move objects must +also set the ``AttrMOVINGGC`` attribute. -The ``scan`` field is used to perform scanning. This method is called -via the generic function ``PoolScan()``. The class should scan the -segment specified. It should scan all the known live (marked, that is, -those objects on which fix has been called) on the segment and -accumulate a summary of *all* the objects on the segment. This means -that mark and sweep pools may have to jump through hoops a little bit -(see design.mps.poolasm.summary for a pedagogical example). Classes -are not required to provide this method, and not doing so indicates -that all instances of this class will have no fixable or traceable -reference in them. +_`.method.fixEmergency`: The ``fixEmergency`` method is used to +perform fixing in "emergency" situations. It must complete its work +without allocating memory (perhaps by using some approximation, or by +running more slowly). Pool classes must provide this method if they +provide the ``fix`` method. -.. note:: +_`.method.reclaim`: The ``reclaim`` method is used to reclaim memory +in a segment. This method is called via the generic function +``PoolReclaim()``. It indicates that any remaining white objects in +the segment have now been proved unreachable, hence are dead. The pool +should reclaim the resources associated with the dead objects. Pool +classes are not required to provide this method. If they do, they must +set the ``AttrGC`` attribute. - The ``scan`` method now takes an extra return parameter which - classes should use to indicate whether they scanned all objects in - segment or not. Classes should return summary only of object they - scanned. Caller of this method (``TraceScan()``) is responsible - for updating summaries correctly when not a total scan. Hence no - jumping through hoops required. David Jones, 1998-01-30. +_`.method.walk`: The ``walk`` method is used by the heap walker. The +``walk`` method should apply the visitor function (along with its +closure parameters and the object format) to all *black* objects in +the segment. Padding objects may or may not be included in the walk at +the classes discretion, in any case in will be the responsibility of +the client to do something sensible with padding objects. Forwarding +objects are never included in the walk. Pool classes need not provide +this method. If they do, they must set the ``AttrFMT`` attribute. -The ``fix`` field is used to perform fixing. This method is called via -the generic function ``TraceFix()``. It indicates that the specified -reference has been found and the class should consider the object -live. There is provision for adjusting the value of the reference (to -allow for classes that move objects). Classes are not required to -provide this method, and not doing so indicates that the class is not -automatic style (ie it does not use global tracing to determine -liveness). - -The ``reclaim`` field is used to reclaim memory. This method is called -via the generic function ``PoolReclaim()``. It indicates that the trace -has fixed all references to reachable objects. - -.. note:: - - Actually it indicates that any remaining white objects have now - been proved unreachable, hence are dead. David Jones, 1997-08-19. - -The class should consider objects that have been condemned and not -fixed in this trace to be dead and may reclaim the resources -associated with them. Classes are not required to provide this method. - -.. note:: - - ``reclaim`` is now called on each segment. David Jones, - 1997-08-19. - -The ``access`` field is used to indicate client access. This method is -called via the generic functions ``SpaceAccess()`` and -``PoolAccess()``. It indicates that the client has attempted to access -the specified region, but has been denied and the request trapped due -to a protection state. The class should perform any work necessary to -remove the protection whilst still preserving appropriate invariants -(typically this will be scanning work). Classes are not required to -provide this method, and not doing so indicates they never protect any -memory managed by the pool. - -.. note:: - - ``access`` is no longer present. David Jones, 1997-08-19. - -_`.method.act`: ``act`` is called when the MPM has decided to execute -an action that the class declared. The Class should arrange execution -of the associated work (usually by beginning an incremental trace). - -_`.method.walk`: ``walk`` is used by the heap walker. ``walk`` is only -required to be implemented by classes which specify the AttrFMT -attribute (formatted pools). The ``walk`` method should apply the -passed in function (along with its closure variables (which are also -passed in) and the object format) to all *black* objects in the -segment. Padding objects may or may not be included in the walk at the -classes discretion, in any case in will be the responsibility of the -client to do something sensible with padding objects. - -.. note:: - - What about broken hearts? David Jones, 1998-01-30. - -The ``describe`` field is used to print out a description of a pool. -This method is called via the generic function ``PoolDescribe()``. The -class should emit an textual description of the pool's contents onto -the specified stream. Each line should begin with two spaces. Classes -are not required to provide this method. +_`.method.describe`: The ``describe`` field is used to print out a +description of a pool. This method is called via the generic function +``PoolDescribe()``. The class should emit an textual description of +the pool's contents onto the specified stream. Each line should begin +with two spaces. Classes are not required to provide this method. Events @@ -270,6 +251,8 @@ Document history - 2013-03-12 GDR_ Converted to reStructuredText. +- 2014-06-08 GDR_ Bring method descriptions up to date. + .. _RB: http://www.ravenbrook.com/consultants/rb/ .. _GDR: http://www.ravenbrook.com/consultants/gdr/ diff --git a/mps/design/config.txt b/mps/design/config.txt index 03bfec5109a..b99191d7ab6 100644 --- a/mps/design/config.txt +++ b/mps/design/config.txt @@ -97,6 +97,10 @@ as a dimension of configuration since `.req.prod`_ has been retired. _`.def.target`: The *target* is the result of the build. +_`.def.option`: An *option* is a feature of the MPS that is not +selected via the *platform* and *variety*. See `.opt`_. + + Overview -------- @@ -150,7 +154,7 @@ _`.build.cc`: A consequence of this approach is that it should always be possible to build a complete target with a single UNIX command line calling the compiler driver (usually "cc" or "gcc"), for example:: - cc -o main -DCONFIG_VAR_DF foo.c bar.c baz.s -lz + cc -o main -DCONFIG_VAR_COOL foo.c bar.c baz.s -lz _`.build.defs`: The "defs" are the set of preprocessor macros which are to be predefined when compiling the module sources:: @@ -319,12 +323,14 @@ _`.pf.form`: This file consists of sets of directives of the form:: #elif #define MPS_PF_ + #define MPS_PF_STRING "" #define MPS_OS_ #define MPS_ARCH_ #define MPS_BUILD_ #define MPS_T_WORD #define MPS_T_ULONGEST - #define MPS_WORD_SHIFT + #define MPS_WORD_WIDTH + #define MPS_WORD_SHIFT #define MPS_PF_ALIGN _`.pf.detect`: The conjunction of builder predefinitions is a constant @@ -513,6 +519,33 @@ For example, this sort of thing:: This violates `.no-spaghetti`_. +Configuration options +--------------------- + +_`.opt`: Options select features of the MPS that are not selected by the *platform* and the *variety*. + +_`.opt.support`: The features selected by options are not supported or +documented in the public interface. This is to keep the complexity of +the MPS manageable: at present the number of supported configuration +is *platforms* × *varieties* (at time of writing, 9 × 3 = 27). Each +supported option would double (or worse) the number of supported +configurations. + +_`.opt.ansi`: ``CONFIG_PF_ANSI`` tells ``mps.c`` to exclude the +sources for the auto-detected platform, and use the generic ("ANSI") +platform instead. + +_`.opt.thread`: ``CONFIG_THREAD_SINGLE`` causes the MPS to be built +for single-threaded execution only, where locks are not needed and so +lock operations can be defined as no-ops by ``lock.h``. + +_`.opt.poll`: ``CONFIG_POLL_NONE`` causes the MPS to be built without +support for polling. This means that garbage collections will only +happen if requested explicitly via ``mps_arena_collect()`` or +``mps_arena_step()``, but it also means that protection is not needed, +and so shield operations can be replaced with no-ops in ``mpm.h``. + + To document ----------- - What about constants in config.h? diff --git a/mps/design/failover.txt b/mps/design/failover.txt new file mode 100644 index 00000000000..5fdb5a9b76d --- /dev/null +++ b/mps/design/failover.txt @@ -0,0 +1,150 @@ +.. mode: -*- rst -*- + +Fail-over allocator +=================== + +:Tag: design.mps.failover +:Author: Gareth Rees +:Date: 2014-04-01 +:Status: complete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. + + +Introduction +------------ + +_`.intro`: This is the design of the fail-over allocator, a data +structure for the management of address ranges. + +_`.readership`: This document is intended for any MPS developer. + +_`.source`: design.mps.land_, design.mps.poolmvt_, design.mps.poolmvff_. + +_`.overview`: The fail-over allocator combines two *land* instances. +It stores address ranges in one of the lands (the *primary*) unless +insertion fails, in which case it falls back to the other (the +*secondary*). The purpose is to be able to combine two lands with +different properties: with a CBS_ for the primary and a Freelist_ for +the secondary, operations are fast so long as there is memory to +allocate new nodes in the CBS, but operations can continue using the +Freelist when memory is low. + +.. _CBS: cbs +.. _Freelist: freelist +.. _design.mps.land: land +.. _design.mps.poolmvt: poolmvt +.. _design.mps.poolmvff: poolmvff + + +Interface +--------- + +_`.land`: The fail-over allocator is an implementation of the *land* +abstract data type, so the interface consists of the generic functions +for lands. See design.mps.land_. + + +External types +.............. + +``typedef struct FailoverStruct *Failover`` + +_`.type.failover`: The type of fail-over allocator structures. A +``FailoverStruct`` may be embedded in another structure, or you can +create it using ``LandCreate()``. + + +External functions +.................. + +``LandClass FailoverLandClassGet(void)`` + +_`.function.class`: The function ``FailoverLandClassGet()`` returns +the fail-over allocator class, a subclass of ``LandClass`` suitable +for passing to ``LandCreate()`` or ``LandInit()``. + + +Keyword arguments +................. + +When initializing a fail-over allocator, ``LandCreate()`` and +``LandInit()`` require these two keyword arguments: + +* ``FailoverPrimary`` (type ``Land``) is the primary land. + +* ``FailoverSecondary`` (type ``Land``) is the secondary land. + + +Implementation +-------------- + +_`.impl.assume`: The implementation assumes that the primary is fast +but space-hungry (a CBS) and the secondary is slow but space-frugal (a +Freelist). This assumption is used in the following places: + +_`.impl.assume.flush`: The fail-over allocator attempts to flush the +secondary to the primary before any operation, in order to benefit +from the speed of the primary wherever possible. In the normal case +where the secondary is empty this is cheap. + +_`.impl.assume.delete`: When deletion of a range on the primary fails +due to lack of memory, we assume that this can only happen when there +are splinters on both sides of the deleted range, one of which needs +to be allocated a new node (this is the case for CBS), and that +therefore the following procedure will be effective: first, delete the +enclosing range from the primary (leaving no splinters and thus +requiring no allocation), and re-insert the splinters (failing over to +the secondary if necessary). + + + +Document History +---------------- + +- 2014-04-03 GDR_ Created. + +.. _GDR: http://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2014 Ravenbrook Limited. All rights reserved. +. This is an open source license. Contact +Ravenbrook for commercial licensing options. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +#. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +#. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +#. Redistributions in any form must be accompanied by information on how + to obtain complete source code for this software and any + accompanying software that uses this software. The source code must + either be included in the distribution or be available for no more than + the cost of distribution plus a nominal fee, and must be freely + redistributable under reasonable conditions. For an executable file, + complete source code means the source code for all modules it contains. + It does not include source code for modules or files that typically + accompany the major components of the operating system on which the + executable file runs. + +**This software is provided by the copyright holders and contributors +"as is" and any express or implied warranties, including, but not +limited to, the implied warranties of merchantability, fitness for a +particular purpose, or non-infringement, are disclaimed. In no event +shall the copyright holders and contributors be liable for any direct, +indirect, incidental, special, exemplary, or consequential damages +(including, but not limited to, procurement of substitute goods or +services; loss of use, data, or profits; or business interruption) +however caused and on any theory of liability, whether in contract, +strict liability, or tort (including negligence or otherwise) arising in +any way out of the use of this software, even if advised of the +possibility of such damage.** diff --git a/mps/design/freelist.txt b/mps/design/freelist.txt index 455816cf623..b0654468de1 100644 --- a/mps/design/freelist.txt +++ b/mps/design/freelist.txt @@ -41,174 +41,53 @@ When memory becomes available again to allocate control structures, the free lists can be "flushed" back into the more efficient data structures. -_`.bg`: The free list allocator was formerly part of the Coalescing -Block Structure module (see design.mps.cbs) but it was split into its -own module because this makes it: - -#. simpler (no need to interact with CBS) and thus more maintainable; -#. possible to test directly (no need to create a CBS and then force - its control pool to run out of memory); and -#. usable as a fallback allocator in other pools (not just in pools - that use CBS). - - -Definitions ------------ - -_`.def.range`: A (contiguous) *range* of addresses is a semi-open -interval on address space. - -_`.def.isolated`: A contiguous range is *isolated* with respect to -some property it has, if adjacent elements do not have that property. - Requirements ------------ -_`.req.set`: Must maintain a set of free address ranges. +In addition to the generic land requirements (see design.mps.land_), +free lists must satisfy: -_`.req.add`: Must be able to add free address ranges to the set. - -_`.req.remove`: Must be able to remove address ranges from the set (in -particular, when memory is allocated). - -_`.req.iterate`: Must support the iteration of all isolated contiguous -ranges. - -_`.req.protocol`: Must detect protocol violations. - -_`.req.align`: Must support an alignment (the alignment of all -addresses specifying ranges) of down to ``sizeof(void *)`` without -losing memory. +.. _design.mps.land: land _`.req.zero-overhead`: Must have zero space overhead for the storage of any set of free blocks, so that it can be used to manage memory when no memory can be allocated for control structures. -_`.req.source`: This set of requirements is derived from those of the -CBS module (see design.mps.cbs.req), except that there is no -equivalent of design.mps.cbs.req.fast, and design.mps.cbs.req.small -has been replaced with `.req.zero-overhead`_. - Interface --------- +_`.land`: Free lists are an implementation of the *land* abstract data +type, so the interface consists of the generic functions for lands. +See design.mps.land_. + Types ..... ``typedef struct FreelistStruct *Freelist`` -_`.type.freelist`: The type of free lists. The structure -``FreelistStruct`` is declared in the header so that it can be inlined -in other structures, but you should not depend on its details. - -``typedef Bool (*FreelistIterateMethod)(Bool *deleteReturn, Freelist fl, Range range, void *closureP, Size closureS)`` - -_`.type.iterate.method`: A callback function that may be passed to -``FreelistIterate()``. It is called for every isolated contiguous -range in address order, and with the closure arguments that were -originally passed to ``FreelistIterate()``. It must update -``*deleteReturn`` to ``TRUE`` if the range must be deleted from the -free lists, or ``FALSE`` if the range must be kept. The function must -return ``TRUE`` if the iteration must continue, and ``FALSE`` if the -iteration must stop (after possibly deleting the current range). +_`.type.freelist`: The type of free lists. A ``FreelistStruct`` may be +embedded in another structure, or you can create it using +``LandCreate()``. -Functions -......... +External functions +.................. -``Res FreelistInit(Freelist fl, Align alignment)`` +``LandClass FreelistLandClassGet(void)`` -_`.function.init`: Initialize the ``Freelist`` structure pointed to by -``fl``. The argument ``alignment`` is the alignment of address ranges -to be maintained. An initialised free list contains no address ranges. +_`.function.class`: The function ``FreelistLandClassGet()`` returns +the free list class, a subclass of ``LandClass`` suitable for passing +to ``LandCreate()`` or ``LandInit()``. -``void FreelistFinish(Freelist fl)`` -_`.function.finish`: Finish the free list pointed to by ``fl``. - -``Res FreelistInsert(Range rangeReturn, Freelist fl, Range range)`` - -_`.function.insert`: If any part of ``range`` is already in the free -list ``fl``, then leave the free list unchanged and return -``ResFAIL``. Otherwise, insert ``range`` into the free list ``fl``; -update ``rangeReturn`` to describe the contiguous isolated range -containing the inserted range (this may differ from ``range`` if there -was coalescence on either side) and return ``ResOK``. - -``Res FreelistDelete(Range rangeReturn, Freelist fl, Range range)`` - -_`.function.delete`: If any part of the range is not in the free list, -then leave the free list unchanged and return ``ResFAIL``. Otherwise, -remove ``range`` from the free list and update ``rangeReturn`` to -describe the contiguous isolated range that formerly contained the -deleted range (this may differ from ``range`` if there were fragments -left on either side), and return ``ResOK``. - -``void FreelistIterate(Freelist fl, FreelistIterateMethod iterate, void *closureP, Size closureS)`` - -_`.function.iterate`: Iterate all isolated contiguous ranges in the -free list ``fl`` in address order, calling ``iterate`` for each one. -See ``FreelistIterateMethod`` for details. - -``Bool FreelistFindFirst(Range rangeReturn, Range oldRangeReturn, Freelist fl, Size size, FindDelete findDelete)`` - -_`.function.find.first`: Locate the first isolated contiguous range in -address order, within the free list ``fl``, of at least ``size`` -bytes, update ``rangeReturn`` to that range, and return ``TRUE``. If -there is no such continuous range, return ``FALSE``. - -In addition, optionally delete the found range from the free list, -depending on the ``findDelete`` argument. This saves a separate call -to ``FreelistDelete()``, and uses the knowledge of exactly where we -found the range. The value of ``findDelete`` must come from this -enumeration:: - - enum { - FindDeleteNONE, /* don't delete after finding */ - FindDeleteLOW, /* delete size bytes from low end of block */ - FindDeleteHIGH, /* delete size bytes from high end of block */ - FindDeleteENTIRE /* delete entire range */ - }; - -The original contiguous isolated range in which the range was found is -returned via the ``oldRangeReturn`` argument. (If ``findDelete`` is -``FindDeleteNONE`` or ``FindDeleteENTIRE``, then this will be -identical to the range returned via the ``rangeReturn`` argument.) - -``Bool FreelistFindLast(Range rangeReturn, Range oldRangeReturn, Freelist fl, Size size, FindDelete findDelete)`` - -_`.function.find.last`: Like ``FreelistFindFirst()``, except that it -finds the last block in address order. - -``Bool FreelistFindLargest(Range rangeReturn, Range oldRangeReturn, Freelist fl, Size, size, FindDelete findDelete)`` - -_`.function.find.largest`: Locate the largest block within the free -list ``fl``, and if that block is at least as big as ``size``, return -its range via the ``rangeReturn`` argument, and return ``TRUE``. If -there are no blocks in the free list at least as large as ``size``, -return ``FALSE``. Pass 0 for ``size`` if you want the largest block -unconditionally. - -Like ``FreelistFindFirst()``, optionally delete the range from the -free list. (Always the whole range: specifying ``FindDeleteLOW`` or -``FindDeleteHIGH`` has the same effect as ``FindDeleteENTIRE``). - -``void FreelistFlushToCBS(Freelist fl, CBS cbs)`` - -Remove free address ranges from the free list ``fl`` and add them to -the Coalescing Block Structure ``cbs``. Continue until a call to -``CBSInsert()`` fails, or until the free list is empty, whichever -happens first. - -``Res FreelistDescribe(Freelist fl, mps_lib_FILE *stream, Count depth)`` - -_`.function.describe`: Print a textual representation of the free -list ``fl`` to the given stream, indicating the contiguous ranges in -order. It is provided for debugging only. +Keyword arguments +................. +When initializing a free list, ``LandCreate()`` and ``LandInit()`` +take no keyword arguments. Pass ``mps_args_none``. Implementation @@ -221,12 +100,13 @@ an address-ordered singly linked free list. (As in traditional _`.impl.block`: If the free address range is large enough to contain an inline block descriptor consisting of two pointers, then the two pointers stored are to the next free range in address order (or -``NULL`` if there are no more ranges), and to the limit of current -free address range, in that order. +``freelistEND`` if there are no more ranges), and to the limit of the +current free address range, in that order. _`.impl.grain`: Otherwise, the free address range must be large enough to contain a single pointer. The pointer stored is to the next free -range in address order, or ``NULL`` if there are no more ranges. +range in address order, or ``freelistEND`` if there are no more +ranges. _`.impl.tag`: Grains and blocks are distinguished by a one-bit tag in the low bit of the first word (the one containing the pointer to the @@ -239,14 +119,31 @@ _`.impl.merge`: When a free address range is added to the free list, it is merged with adjacent ranges so as to maintain `.impl.invariant`_. -_`.impl.rule.break`: The use of ``NULL`` to mark the end of the list -violates the rule that exceptional values should not be used to +_`.impl.rule.break`: The use of ``freelistEND`` to mark the end of the +list violates the rule that exceptional values should not be used to distinguish exeptional situations. This infraction allows the implementation to meet `.req.zero-overhead`_. (There are other ways to do this, such as using another tag to indicate the last block in the list, but these would be more complicated.) +Testing +------- + +_`.test`: The following testing will be performed on this module: + +_`.test.land`: A generic test for land implementations. See +design.mps.land.test. + +_`.test.pool`: Two pools (MVT_ and MVFF_) use free lists as a fallback +when low on memory. These are subject to testing in development, QA, +and are heavily exercised by customers. + +.. _MVT: poolmvt +.. _MVFF: poolmvff + + + Opportunities for improvement ----------------------------- @@ -256,7 +153,7 @@ exceed the recorded size of the list. _`.improve.maxsize`: We could maintain the maximum size of any range on the list, and use that to make an early exit from -``FreelistFindLargest()``. It's not clear that this would actually be +``freelistFindLargest()``. It's not clear that this would actually be an improvement. @@ -266,6 +163,8 @@ Document History - 2013-05-18 GDR_ Initial draft based on CBS "emergency block" design. +- 2014-04-01 GDR_ Moved generic material to design.mps.land_. + .. _GDR: http://www.ravenbrook.com/consultants/gdr/ diff --git a/mps/design/index.txt b/mps/design/index.txt index de5db6963fd..39594723074 100644 --- a/mps/design/index.txt +++ b/mps/design/index.txt @@ -45,21 +45,23 @@ arena_ The design of the MPS arena arenavm_ Virtual memory arena bt_ Bit tables buffer_ Allocation buffers and allocation points -cbs_ Coalescing block structures +cbs_ Coalescing Block Structure land implementation check_ Design of checking in MPS class-interface_ Design of the pool class interface collection_ The collection framework config_ The design of MPS configuration critical-path_ The critical path through the MPS diag_ The design of MPS diagnostic feedback +failover_ Fail-over land implementation finalize_ Finalization fix_ The Design of the Generic Fix Function -freelist_ Free list allocator +freelist_ Free list land implementation guide.hex.trans_ Guide to transliterating the alphabet into hexadecimal guide.impl.c.format_ Coding standard: conventions for the general format of C source code in the MPS interface-c_ The design of the Memory Pool System interface to C io_ The design of the MPS I/O subsystem keyword-arguments_ The design of the MPS mechanism for passing arguments by keyword. +land_ Lands (collections of address ranges) lib_ The design of the Memory Pool System library interface lock_ The design of the lock module locus_ The design for the locus manager @@ -68,15 +70,15 @@ message-gc_ Messages sent when garbage collection begins or ends nailboard_ Nailboards for ambiguously referenced segments object-debug_ Debugging Features for Client Objects pool_ The design of the pool and pool class mechanisms -poolamc_ The design of the automatic mostly-copying memory pool class -poolams_ The design of the automatic mark-and-sweep pool class -poolawl_ Automatic weak linked -poollo_ Leaf object pool class -poolmfs_ The design of the manual fixed small memory pool class -poolmrg_ Guardian poolclass -poolmv_ The design of the manual variable memory pool class -poolmvt_ The design of a new manual-variable memory pool class -poolmvff_ Design of the manually-managed variable-size first-fit pool +poolamc_ Automatic Mostly-Copying pool class +poolams_ Automatic Mark-and-Sweep pool class +poolawl_ Automatic Weak Linked pool class +poollo_ Leaf Object pool class +poolmfs_ Manual Fixed Small pool class +poolmrg_ Manual Rank Guardian pool class +poolmv_ Manual Variable pool class +poolmvt_ Manual Variable Temporal pool class +poolmvff_ Manual Variable First-Fit pool class prot_ Generic design of the protection module protan_ ANSI implementation of protection module protli_ Linux implementation of protection module @@ -122,6 +124,7 @@ writef_ The design of the MPS writef function .. _config: config .. _critical-path: critical-path .. _diag: diag +.. _failover: failover .. _finalize: finalize .. _fix: fix .. _freelist: freelist @@ -130,6 +133,7 @@ writef_ The design of the MPS writef function .. _interface-c: interface-c .. _io: io .. _keyword-arguments: keyword-arguments +.. _land: land .. _lib: lib .. _lock: lock .. _locus: locus diff --git a/mps/design/land.txt b/mps/design/land.txt new file mode 100644 index 00000000000..b4b8bd212a1 --- /dev/null +++ b/mps/design/land.txt @@ -0,0 +1,352 @@ +.. mode: -*- rst -*- + +Lands +===== + +:Tag: design.mps.land +:Author: Gareth Rees +:Date: 2014-04-01 +:Status: complete design +:Revision: $Id$ +:Copyright: See section `Copyright and License`_. + + +Introduction +------------ + +_`.intro`: This is the design of the *land* abstract data type, which +represents a collection of contiguous address ranges. + +_`.readership`: This document is intended for any MPS developer. + +_`.source`: design.mps.cbs_, design.mps.freelist_. + +_`.overview`: Collections of address ranges are used in several places +in the MPS: the arena stores a set of mapped address ranges; pools +store sets of address ranges which have been acquired from the arena +and sets of address ranges that are available for allocation. The +*land* abstract data type makes it easy to try out different +implementations with different performance characteristics and other +attributes. + +_`.name`: The name is inspired by *rangeland* meaning *group of +ranges* (where *ranges* is used in the sense *grazing areas*). + + +Definitions +----------- + +_`.def.range`: A (contiguous) *range* of addresses is a semi-open +interval on address space. + +_`.def.isolated`: A contiguous range is *isolated* with respect to +some property it has, if adjacent elements do not have that property. + + +Requirements +------------ + +_`.req.set`: Must maintain a set of addresses. + +_`.req.add`: Must be able to add address ranges to the set. + +_`.req.remove`: Must be able to remove address ranges from the set. + +_`.req.size`: Must report concisely to the client when isolated +contiguous ranges of at least a certain size appear and disappear. + +_`.req.iterate`: Must support the iteration of all isolated +contiguous ranges. + +_`.req.protocol`: Must detect protocol violations. + +_`.req.debug`: Must support debugging of client code. + +_`.req.align`: Must support an alignment (the alignment of all +addresses specifying ranges) of down to ``sizeof(void *)`` without +losing memory. + + +Interface +--------- + +Types +..... + +``typedef LandStruct *Land;`` + +_`.type.land`: The type of a generic land instance. + +``typedef Bool (*LandVisitor)(Land land, Range range, void *closureP, Size closureS);`` + +_`.type.visitor`: Type ``LandVisitor`` is a callback function that may +be passed to ``LandIterate()``. It is called for every isolated +contiguous range in address order. The function must return a ``Bool`` +indicating whether to continue with the iteration. + +``typedef Bool (*LandDeleteVisitor)(Bool *deleteReturn, Land land, Range range, void *closureP, Size closureS);`` + +_`.type.visitor`: Type ``LandDeleteVisitor`` is a callback function that may +be passed to ``LandIterateAndDelete()``. It is called for every isolated +contiguous range in address order. The function must return a ``Bool`` +indicating whether to continue with the iteration. It may additionally +update ``*deleteReturn`` to ``TRUE`` if the range must be deleted from +the land, or ``FALSE`` if the range must be kept. (The default is to +keep the range.) + + +Generic functions +................. + +``Res LandInit(Land land, LandClass class, Arena arena, Align alignment, void *owner, ArgList args)`` + +_`.function.init`: ``LandInit()`` initializes the land structure for +the given class. The land will perform allocation (if necessary -- not +all land classes need to allocate) in the supplied arena. The +``alignment`` parameter is the alignment of the address ranges that +will be stored and retrieved from the land. The parameter ``owner`` is +output as a parameter to the ``LandInit`` event. The newly initialized +land contains no ranges. + +``Res LandCreate(Land *landReturn, Arena arena, LandClass class, Align alignment, void *owner, ArgList args)`` + +_`.function.create`: ``LandCreate()`` allocates memory for a land +structure of the given class in ``arena``, and then passes all +parameters to ``LandInit()``. + +``void LandDestroy(Land land)`` + +_`.function.destroy`: ``LandDestroy()`` calls ``LandFinish()`` to +finish the land structure, and then frees its memory. + +``void LandFinish(Land land)`` + +_`.function.finish`: ``LandFinish()`` finishes the land structure and +discards any other resources associated with the land. + +``void LandSize(Land land)`` + +_`.function.size`: ``LandSize()`` returns the total size of the ranges +stored in the land. + +``Res LandInsert(Range rangeReturn, Land land, Range range)`` + +_`.function.insert`: If any part of ``range`` is already in the +land, then leave it unchanged and return ``ResFAIL``. Otherwise, +attempt to insert ``range`` into the land. If the insertion succeeds, +then update ``rangeReturn`` to describe the contiguous isolated range +containing the inserted range (this may differ from ``range`` if there +was coalescence on either side) and return ``ResOK``. If the insertion +fails, return a result code indicating allocation failure. + +_`.function.insert.fail`: Insertion of a valid range (that is, one +that does not overlap with any range in the land) can only fail if the +new range is isolated and the allocation of the necessary data +structure to represent it failed. + +_`.function.insert.alias`: It is acceptable for ``rangeReturn`` and +``range`` to share storage. + +``Res LandDelete(Range rangeReturn, Land land, Range range)`` + +_`.function.delete`: If any part of the range is not in the land, +then leave the land unchanged and return ``ResFAIL``. Otherwise, update +``rangeReturn`` to describe the contiguous isolated range that +contains ``range`` (this may differ from ``range`` if there are +fragments on either side) and attempt to delete the range from the +land. If the deletion succeeds, return ``ResOK``. If the deletion +fails, return a result code indicating allocation failure. + +_`.function.delete.fail`: Deletion of a valid range (that is, one +that is wholly contained in the land) can only fail if there are +fragments on both sides and the allocation of the necessary data +structures to represent them fails. + +_`.function.delete.return`: ``LandDelete()`` returns the contiguous +isolated range that contains ``range`` even if the deletion fails. +This is so that the caller can try deleting the whole block (which is +guaranteed to succeed) and managing the fragments using a fallback +strategy. + +_`.function.delete.alias`: It is acceptable for ``rangeReturn`` and +``range`` to share storage. + +``Bool LandIterate(Land land, LandVisitor visitor, void *closureP, Size closureS)`` + +_`.function.iterate`: ``LandIterate()`` is the function used to +iterate all isolated contiguous ranges in a land. It receives a +visitor function to invoke on every range, and a pointer, ``Size`` +closure pair to pass on to the visitor function. If the visitor +function returns ``FALSE``, then iteration is terminated and +``LandIterate()`` returns ``FALSE``. If all iterator method calls +return ``TRUE``, then ``LandIterate()`` returns ``TRUE`` + +``Bool LandIterateAndDelete(Land land, LandDeleteVisitor visitor, void *closureP, Size closureS)`` + +_`.function.iterate.and.delete`: As ``LandIterate()``, but the visitor +function additionally returns a Boolean indicating whether the range +should be deleted from the land. + +``Bool LandFindFirst(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete)`` + +_`.function.find.first`: Locate the first block (in address order) +within the land of at least the specified size, update ``rangeReturn`` +to describe that range, and return ``TRUE``. If there is no such +block, it returns ``FALSE``. + +In addition, optionally delete the top, bottom, or all of the found +range, depending on the ``findDelete`` argument. This saves a separate +call to ``LandDelete()``, and uses the knowledge of exactly where we +found the range. The value of ``findDelete`` must come from this +enumeration:: + + enum { + FindDeleteNONE, /* don't delete after finding */ + FindDeleteLOW, /* delete size bytes from low end of block */ + FindDeleteHIGH, /* delete size bytes from high end of block */ + FindDeleteENTIRE /* delete entire range */ + }; + +The original contiguous isolated range in which the range was found is +returned via the ``oldRangeReturn`` argument. (If ``findDelete`` is +``FindDeleteNONE`` or ``FindDeleteENTIRE``, then this will be +identical to the range returned via the ``rangeReturn`` argument.) + +``Bool LandFindLast(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete)`` + +_`.function.find.last`: Like ``LandFindFirst()``, except that it +finds the last block in address order. + +``Bool LandFindLargest(Range rangeReturn, Range oldRangeReturn, Land land, Size size, FindDelete findDelete)`` + +_`.function.find.largest`: Locate the largest block within the +land, and if that block is at least as big as ``size``, return its +range via the ``rangeReturn`` argument, and return ``TRUE``. If there +are no blocks in the land at least as large as ``size``, return +``FALSE``. Pass 0 for ``size`` if you want the largest block +unconditionally. + +Like ``LandFindFirst()``, optionally delete the range (specifying +``FindDeleteLOW`` or ``FindDeleteHIGH`` has the same effect as +``FindDeleteENTIRE``), and return the original contiguous isolated +range in which the range was found via the ``oldRangeReturn`` +argument. + +``Res LandFindInZones(Bool *foundReturn, Range rangeReturn, Range oldRangeReturn, Land land, Size size, ZoneSet zoneSet, Bool high)`` + +_`.function.find.zones`: Locate a block at least as big as ``size`` +that lies entirely within the ``zoneSet``, return its range via the +``rangeReturn`` argument, set ``*foundReturn`` to ``TRUE``, and return +``ResOK``. (The first such block, if ``high`` is ``FALSE``, or the +last, if ``high`` is ``TRUE``.) If there is no such block, set +``*foundReturn`` to ``TRUE``, and return ``ResOK``. + +Delete the range as for ``LandFindFirst()`` and ``LastFindLast()`` +(with the effect of ``FindDeleteLOW`` if ``high`` is ``FALSE`` and the +effect of ``FindDeleteHIGH`` if ``high`` is ``TRUE``), and return the +original contiguous isolated range in which the range was found via +the ``oldRangeReturn`` argument. + +_`.function.find.zones.fail`: It's possible that the range can't be +deleted from the land because that would require allocation, in which +case the result code indicates the cause of the failure. + +``Res LandDescribe(Land land, mps_lib_FILE *stream)`` + +_`.function.describe`: ``LandDescribe()`` prints a textual +representation of the land to the given stream, indicating the +contiguous ranges in order, as well as the structure of the underlying +splay tree implementation. It is provided for debugging purposes only. + +``void LandFlush(Land dest, Land src)`` + +_`.function.flush`: Delete ranges of addresses from ``src`` and insert +them into ``dest``, so long as ``LandInsert()`` remains successful. + + +Implementations +--------------- + +There are three land implementations: + +#. CBS (Coalescing Block Structure) stores ranges in a splay tree. It + has fast (logarithmic in the number of ranges) insertion, deletion + and searching, but has substantial space overhead. See + design.mps.cbs_. + +#. Freelist stores ranges in an address-ordered free list, as in + traditional ``malloc()`` implementations. Insertion, deletion, and + searching are slow (proportional to the number of ranges) but it + does not need to allocate. See design.mps.freelist_. + +#. Failover combines two lands, using one (the *primary*) until it + fails, and then falls back to the other (the *secondary*). See + design.mps.failover_. + +.. _design.mps.cbs: cbs +.. _design.mps.freelist: freelist +.. _design.mps.failover: failover + + +Testing +------- + +_`.test`: There is a stress test for implementations of this interface +in impl.c.landtest. This allocates a large block of memory and then +simulates the allocation and deallocation of ranges within this block +using both a ``Land`` and a ``BT``. It makes both valid and invalid +requests, and compares the ``Land`` response to the correct behaviour +as determined by the ``BT``. It iterates the ranges in the ``Land``, +comparing them to the ``BT``. It invokes the ``LandDescribe()`` +generic function, but makes no automatic test of the resulting output. + + +Document History +---------------- + +- 2014-04-01 GDR_ Created based on design.mps.cbs_. + +.. _GDR: http://www.ravenbrook.com/consultants/gdr/ + + +Copyright and License +--------------------- + +Copyright © 2014 Ravenbrook Limited. All rights reserved. +. This is an open source license. Contact +Ravenbrook for commercial licensing options. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +#. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +#. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +#. Redistributions in any form must be accompanied by information on how + to obtain complete source code for this software and any + accompanying software that uses this software. The source code must + either be included in the distribution or be available for no more than + the cost of distribution plus a nominal fee, and must be freely + redistributable under reasonable conditions. For an executable file, + complete source code means the source code for all modules it contains. + It does not include source code for modules or files that typically + accompany the major components of the operating system on which the + executable file runs. + +**This software is provided by the copyright holders and contributors +"as is" and any express or implied warranties, including, but not +limited to, the implied warranties of merchantability, fitness for a +particular purpose, or non-infringement, are disclaimed. In no event +shall the copyright holders and contributors be liable for any direct, +indirect, incidental, special, exemplary, or consequential damages +(including, but not limited to, procurement of substitute goods or +services; loss of use, data, or profits; or business interruption) +however caused and on any theory of liability, whether in contract, +strict liability, or tort (including negligence or otherwise) arising in +any way out of the use of this software, even if advised of the +possibility of such damage.** diff --git a/mps/design/object-debug.txt b/mps/design/object-debug.txt index 300a4e261a8..b99049a9d1d 100644 --- a/mps/design/object-debug.txt +++ b/mps/design/object-debug.txt @@ -70,6 +70,11 @@ an ``AVER()`` has fired. Naturally, if the information required for the dump has been corrupted, it will fail, as softly as possible (source @@@@). +_`.req.portable`: Client code that uses these features must be easily +portable to all the supported platforms. (Source: job003749_.) + +.. _job003749: http://www.ravenbrook.com/project/mps/issue/job003749/ + .. note:: There are more requirements, especially about memory dumps and @@ -90,6 +95,11 @@ specified as a byte/word which used repeatedly to fill the fencepost. _`.fence.content.template`: The content could be given as a template which is of the right size and is simply copied onto the fencepost. +_`.fence.content.template.repeat`: The content could be given as a +template which is copied repeatedly until the fencepost is full. (This +would avoid the need to specify different templates on different +architectures, and so help meet `.req.portable`_.) + _`.fence.walk`: `.req.fencepost.check`_ requires the ability to find all the allocated objects. In formatted pools, this is not a problem. In unformatted pools, we could use the walker. It's a feasible @@ -233,14 +243,14 @@ to pools. In particular, clients will be able to use tagging and fenceposting separately on each pool. _`.fence.size`: Having fenceposts of adjustable size and pattern is -quite useful. We feel that restricting the size to an integral -multiple of the [pool or format?] alignment is harmless and simplifies -the implementation enormously. +useful. Restricting the size to an integral multiple of the [pool or +format?] alignment would simplify the implementation but breaks +`.req.portable`_. _`.fence.template`: We use templates (`.fence.content.template`_) to fill in the fenceposts, but we do not give any guarantees about the -location of the fenceposts, only that they're properly aligned. This -leaves us the opportunity to do tail-only fenceposting, if we choose. +location of the fenceposts. This leaves us the opportunity to do +tail-only fenceposting, if we choose. _`.fence.slop`: [see impl.c.dbgpool.FenceAlloc @@@@] @@ -416,6 +426,8 @@ Document History - 2013-04-14 GDR_ Converted to reStructuredText. +- 2014-04-09 GDR_ Added newly discovered requirement `.req.portable`_. + .. _RB: http://www.ravenbrook.com/consultants/rb/ .. _GDR: http://www.ravenbrook.com/consultants/gdr/ diff --git a/mps/design/poolmvff.txt b/mps/design/poolmvff.txt index 2c16b80c4a0..c842fe03a6c 100644 --- a/mps/design/poolmvff.txt +++ b/mps/design/poolmvff.txt @@ -120,11 +120,13 @@ Implementation -------------- _`.impl.free-list`: The pool stores its free list in a CBS (see -//gdr-peewit/info.ravenbrook.com/project/mps/branch/2013-05-17/emergency/design/poolmvff.txt -`design.mps.cbs `_), failing over in emergencies to a Freelist -(see design.mps.freelist) when the CBS cannot allocate new control +design.mps.cbs_), failing over in emergencies to a Freelist (see +design.mps.freelist_) when the CBS cannot allocate new control structures. This is the reason for the alignment restriction above. +.. _design.mps.cbs: cbs +.. _design.mps.freelist: freelist + Details ------- diff --git a/mps/design/range.txt b/mps/design/range.txt index 1b42d69ac22..9e6d4ce34a4 100644 --- a/mps/design/range.txt +++ b/mps/design/range.txt @@ -25,8 +25,8 @@ Requirements ------------ _`.req.range`: A range object must be able to represent an arbitrary -range of addresses that does not include the top grain of the address -space. +range of addresses that neither starts at ``NULL`` nor includes the +top grain of the address space. _`.req.empty`: A range object must be able to represent the empty range. @@ -55,6 +55,12 @@ empty. Initialize ``dest`` to be a copy of ``src``. +``void RangeInitSize(Range range, Addr base, Size size)`` + +Initialize a range object to represent the half-open address range +between ``base`` (inclusive) and ``base + size`` (exclusive). If +``size == 0`` then the range is empty. + ``void RangeFinish(Range range)`` Finish a range object. Because a range object uses no heap resources diff --git a/mps/design/splay-assemble.svg b/mps/design/splay-assemble.svg new file mode 100644 index 00000000000..45956b496ea --- /dev/null +++ b/mps/design/splay-assemble.svg @@ -0,0 +1,427 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + A + + B + + + R + + assemble + + x + y + + L + + + A + + B + + + R + x + + L + + + + diff --git a/mps/design/splay-link-left.svg b/mps/design/splay-link-left.svg new file mode 100644 index 00000000000..f94d145743a --- /dev/null +++ b/mps/design/splay-link-left.svg @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + B + + A + + + L + + linkleft + + x + y + + R + + + B + + A + + + L + + x + y + + R + + diff --git a/mps/design/splay-link-right.svg b/mps/design/splay-link-right.svg new file mode 100644 index 00000000000..95c8c31e0a0 --- /dev/null +++ b/mps/design/splay-link-right.svg @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + A + + B + + + R + + linkright + + x + y + + L + + + A + + B + + + R + + x + y + + L + + diff --git a/mps/design/splay-rotate-left.svg b/mps/design/splay-rotate-left.svg new file mode 100644 index 00000000000..f5e50922aa8 --- /dev/null +++ b/mps/design/splay-rotate-left.svg @@ -0,0 +1,405 @@ + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + A + + B + + + + C + + + + C + + B + + + + A + + rotateleft + + x + y + x + y + + diff --git a/mps/design/splay-rotate-right.svg b/mps/design/splay-rotate-right.svg new file mode 100644 index 00000000000..39fea1b2a20 --- /dev/null +++ b/mps/design/splay-rotate-right.svg @@ -0,0 +1,391 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + A + + B + + + + C + + + + C + + B + + + + A + + rotateright + + x + y + x + y + + diff --git a/mps/design/splay.txt b/mps/design/splay.txt index 84116cce54d..05554152ff9 100644 --- a/mps/design/splay.txt +++ b/mps/design/splay.txt @@ -6,7 +6,7 @@ Splay trees :Tag: design.mps.splay :Author: Gavin Matthews :Date: 1998-05-01 -:Status: draft document +:Status: complete design :Revision: $Id$ :Copyright: See `Copyright and License`_. :Index terms: pair: splay trees; design @@ -22,9 +22,13 @@ implementation. _`.readership`: This document is intended for any MM developer. _`.source`: The primary sources for this design are [ST85]_ and -[Sleator96]_. Also as CBS is a client, design.mps.cbs. As -PoolMVFF is an indirect client, design.mps.poolmvff(1). Also, as -PoolMV2 is an (obsolescent?) indirect client, design.mps.poolmv2. +[Sleator96]_. As CBS is a client, design.mps.cbs_. As PoolMVFF is an +indirect client, design.mps.poolmvff_. Also, as PoolMVT is an indirect +client, design.mps.poolmvt_. + +.. _design.mps.cbs: cbs +.. _design.mps.poolmvt: poolmvt +.. _design.mps.poolmvff: poolmvff _`.background`: The following background documents influence the design: guide.impl.c.adt(0). @@ -43,42 +47,46 @@ usage patterns. Unused nodes have essentially no time overhead. Definitions ----------- -_`.def.splay-tree`: A "Splay Tree" is a self-adjusting binary tree as -described in paper.st85(0), paper.sleator96(0). +_`.def.splay-tree`: A *splay tree* is a self-adjusting binary tree as +described in [ST85]_ and [Sleator96]_. -_`.def.node`: A "node" is used in the typical datastructure sense to -mean an element of a tree (see also `.type.splay.node`_). +_`.def.node`: A *node* is used in the typical data structure sense to +mean an element of a tree (see also `.type.tree`_). -_`.def.key`: A "key" is a value associated with each node; the keys +_`.def.key`: A *key* is a value associated with each node; the keys are totally ordered by a client provided comparator. -_`.def.comparator`: A "comparator" is a function that compares keys to -determine their ordering (see also `.type.splay.compare.method`_). +_`.def.comparator`: A *comparator* is a function that compares keys to +determine their ordering (see also `.type.tree.compare.method`_). -_`.def.successor`: Node *N1* is the "successor" of node *N2* if *N1* -and *N2* are both in the same tree, and the key of *N1* immediately -follows the key of *N2* in the ordering of all keys for the tree. +_`.def.successor`: Node *N*\ :subscript:`2` is the *successor* of node +*N*\ :subscript:`1` if *N*\ :subscript:`1` and *N*\ :subscript:`2` are +both in the same tree, and the key of *N*\ :subscript:`2` immediately +follows the key of *N*\ :subscript:`1` in the ordering of all keys for +the tree. -_`.def.left-child`: Each node *N* contains a "left child", which is a +_`.def.left-child`: Each node *N* contains a *left child*, which is a (possibly empty) sub-tree of nodes. The key of *N* is ordered after the keys of all nodes in this sub-tree. -_`.def.right-child`: Each node *N* contains a "right child", which is +_`.def.right-child`: Each node *N* contains a *right child*, which is a (possibly empty) sub-tree of nodes. The key of *N* is ordered before the keys of all nodes in this sub-tree. -_`.def.neighbour`: A node *N* which has key *Kn* is a "neighbour" of a -key *K* if either *Kn* is the first key in the total order which -compares greater than *K* or if *Kn* is the last key in the total -order which compares less than *K*. +_`.def.neighbour`: The *left neighbour* of a key *K* is the node *N* +with the largest key that compares less than *K* in the total order. +The *right neighbour* of a key *K* is the node *N* with the smaller +key that compares greater than *K* in the total order. A node is a +*neighbour* of a key if it is either the left or right neighbour of +the key. -_`.def.first`: A node is the "first" node in a set of nodes if its key +_`.def.first`: A node is the *first* node in a set of nodes if its key compares less than the keys of all other nodes in the set. -_`.def.last`: A node is the "last" node in a set of nodes if its key +_`.def.last`: A node is the *last* node in a set of nodes if its key compares greater than the keys of all other nodes in the set. -_`.def.client-property`: A "client property" is a value that the +_`.def.client-property`: A *client property* is a value that the client may associate with each node in addition to the key (a block size, for example). This splay tree implementation provides support for efficiently finding the first or last nodes with suitably large @@ -89,32 +97,27 @@ Requirements ------------ _`.req`: These requirements are drawn from those implied by -design.mps.poolmv2, design.mps.poolmvff(1), design.mps.cbs(2) and +design.mps.poolmvt_, design.mps.poolmvff_, design.mps.cbs_, and general inferred MPS requirements. _`.req.order`: Must maintain a set of abstract keys which is totally ordered for a comparator. -_`.req.tree`: The keys must be associated with nodes arranged in a -Splay Tree. +_`.req.fast`: Common operations must have low amortized cost. -_`.req.splay`: Common operations must balance the tree by splaying it, -to achieve low amortized cost (see paper.st85(0)). - -_`.req.add`: Must be able to add new members. This is a common +_`.req.add`: Must be able to add new nodes. This is a common operation. -_`.req.remove`: Must be able to remove members. This is a common +_`.req.remove`: Must be able to remove nodes. This is a common operation. -_`.req.locate`: Must be able to locate a member, given a key. This is +_`.req.locate`: Must be able to locate a node, given a key. This is a common operation. -_`.req.neighbours`: Must be able to locate the neighbouring members -(in order) of a non-member, given a key (see `.def.neighbour`_). This -is a common operation. +_`.req.neighbours`: Must be able to locate the neighbouring nodes of a +key (see `.def.neighbour`_). This is a common operation. -_`.req.iterate`: Must be able to iterate over all members in order +_`.req.iterate`: Must be able to iterate over all nodes in key order with reasonable efficiency. _`.req.protocol`: Must support detection of protocol violations. @@ -141,10 +144,73 @@ _`.req.root`: Must be able to find the root of a splay tree (if one exists). -External types --------------- +Generic binary tree interface +----------------------------- + +Types +..... + +``typedef struct TreeStruct *Tree`` + +_`.type.tree`: ``Tree`` is the type of a node in a binary tree. +``Tree`` contains no fields to store the key associated with the node, +or the client property. Again, it is intended that the ``TreeStruct`` +can be embedded in another structure, and that this is how the +association will be made (see `.usage.client-node`_ for an example). +No convenience functions are provided for allocation or deallocation. + +``typedef void *TreeKey`` + +_`.type.treekey`: ``TreeKey`` is the type of a key associated with a +node in a binary tree. It is an alias for ``void *`` but expresses the +intention. + +``typedef TreeKey (*TreeKeyMethod)(Tree tree)`` + +_`.type.tree.key.method`: A function of type ``TreeKey`` returns the +key associated with a node in a binary tree. (Since there is no space +in a ``TreeStruct`` to store a key, it is expected that the +``TreeStruct`` is embedded in another structure from which the key can +be extracted.) + +``typedef Compare (*TreeCompare)(Tree tree, TreeKey key)`` + +_`.type.tree.compare.method`: A function of type ``TreeCompare`` is +required to compare ``key`` with the key the client associates with +that splay tree node ``tree``, and return the appropriate Compare +value (see `.usage.compare`_ for an example). The function compares a +key with a node, rather than a pair of keys or nodes as might seem +more obvious. This is because the details of the mapping between nodes +and keys is left to the client (see `.type.tree`_), and the splaying +operations compare keys with nodes (see `.impl.splay`_). + +``typedef Res (*TreeDescribeMethod)(Tree tree, mps_lib_FILE *stream)`` + +_`.type.tree.describe.method`: A function of type +``TreeDescribeMethod`` is required to write (via ``WriteF()``) a +client-oriented representation of the splay node. The output should be +non-empty, short, and without newline characters. This is provided for +debugging only. + + +Functions +......... + +``Bool TreeCheck(Tree tree)`` + +_`.function.tree.check`: This is a check function for the +``Tree`` type (see guide.impl.c.adt.method.check and +design.mps.check_). + +.. _design.mps.check: check + + +Splay tree interface +-------------------- + +Types +..... -``typedef struct SplayTreeStruct SplayTreeStruct`` ``typedef struct SplayTreeStruct *SplayTree`` _`.type.splay.tree`: ``SplayTree`` is the type of the main object at @@ -153,39 +219,7 @@ the root of the splay tree. It is intended that the `.usage.client-tree`_ for an example). No convenience functions are provided for allocation or deallocation. -``typedef struct TreeStruct TreeStruct`` -``typedef struct TreeStruct *Tree`` - -_`.type.splay.node`: ``Tree`` is the type of a binary tree, used as the -representation of the nodes of the splay tree. -``Tree`` contains no fields to store the key -associated with the node, or the client property. Again, it is -intended that the ``TreeStruct`` can be embedded in another -structure, and that this is how the association will be made (see -`.usage.client-node`_ for an example). No convenience functions are -provided for allocation or deallocation. - -``typedef Compare (*TreeCompare)(Tree tree, TreeKey key)`` - -_`.type.splay.compare.method`: A function of type -``TreeCompare`` is required to compare ``key`` with the key the -client associates with that splay tree node ``tree``, and return the -appropriate Compare value (see `.usage.compare`_ for an example). The -function compares a key with a node, rather than a pair of keys or -nodes as might seem more obvious. This is because the details of the -mapping between nodes and keys is left to the client (see -`.type.splay.node`_), and the splaying operations compare keys with -nodes (see `.impl.splay`_). - -``typedef Res (*SplayNodeDescribeMethod)(Tree tree, mps_lib_FILE *stream)`` - -_`.type.splay.node.describe.method`: A function of type -``SplayNodeDescribeMethod`` is required to write (via ``WriteF()``) a -client-oriented representation of the splay node. The output should be -non-empty, short, and without return characters. This is provided for -debugging only. - -``typedef Bool (*SplayTestNodeMethod)(SplayTree splay, Tree tree, void *closureP, unsigned long closureS)`` +``typedef Bool (*SplayTestNodeMethod)(SplayTree splay, Tree tree, void *closureP, Size closureS)`` _`.type.splay.test.node.method`: A function of type ``SplayTestNodeMethod`` required to determine whether the node itself @@ -194,7 +228,7 @@ meets some client determined property (see `.prop`_ and ``closureS`` describe the environment for the function (see `.function.splay.find.first`_ and `.function.splay.find.last`_). -``typedef Bool (*SplayTestTreeMethod)(SplayTree splay, Tree tree, void *closureP, unsigned long closureS)`` +``typedef Bool (*SplayTestTreeMethod)(SplayTree splay, Tree tree, void *closureP, Size closureS)`` _`.type.splay.test.tree.method`: A function of type ``SplayTestTreeMethod`` is required to determine whether any of the @@ -210,46 +244,39 @@ environment for the function (see `.function.splay.find.first`_ and ``typedef void (*SplayUpdateNodeMethod)(SplayTree splay, Tree tree)`` _`.type.splay.update.node.method`: A function of type -``SplayUpdateNodeMethod`` is required to update any client -datastructures associated with a node to maintain some client -determined property (see `.prop`_) given that the children of the node -have changed. (See -`.usage.callback`_ for an example) +``SplayUpdateNodeMethod`` is required to update any client data +structures associated with a node to maintain some client determined +property (see `.prop`_) given that the children of the node have +changed. (See `.usage.callback`_ for an example) -External functions ------------------- +Functions +......... _`.function.no-thread`: The interface functions are not designed to be either thread-safe or re-entrant. Clients of the interface are responsible for synchronization, and for ensuring that client-provided -methods invoked by the splay module (`.type.splay.compare.method`_, -`.type.splay.test.node.method`_, `.type.splay.test.tree.method`_, -`.type.splay.update.node.method`_) do not call functions of the splay -module. +methods invoked by the splay module (`.type.tree.compare.method`_, +`.type.tree.key.method`_, `.type.splay.test.node.method`_, +`.type.splay.test.tree.method`_, `.type.splay.update.node.method`_) do +not call functions of the splay module. ``Bool SplayTreeCheck(SplayTree splay)`` _`.function.splay.tree.check`: This is a check function for the -SplayTree type (see guide.impl.c.adt.method.check & -design.mps.check(0)). +``SplayTree`` type (see guide.impl.c.adt.method.check and +design.mps.check_). -``Bool SplayNodeCheck(Tree tree)`` - -_`.function.splay.node.check`: This is a check function for the -``Tree`` type (see guide.impl.c.adt.method.check & -design.mps.check(0)). - -``void SplayTreeInit(SplayTree splay, SplayCompareMethod compare, SplayUpdateNodeMethod updateNode)`` +``void SplayTreeInit(SplayTree splay, TreeCompareMethod compare, TreeKeyMethod nodeKey, SplayUpdateNodeMethod updateNode)`` _`.function.splay.tree.init`: This function initialises a -``SplayTree`` (see guide.impl.c.adt.method.init). It requires a -``compare`` method that defines a total ordering on nodes (see -`.req.order`_); the effect of supplying a compare method that does not -implement a total ordering is undefined. It also requires an -``updateNode`` method, which will be used to keep client properties up -to date when the tree structure changes; the value -``SplayTrivUpdate`` may be used for this method if there is no +``SplayTree`` (see guide.impl.c.adt.method.init). The ``nodeKey`` +function extracts a key from a tree node, and the ``compare`` function +defines a total ordering on keys of nodes (see `.req.order`_). The +effect of supplying a compare method that does not implement a total +ordering is undefined. The ``updateNode`` method is used to keep +client properties up to date when the tree structure changes; the +value ``SplayTrivUpdate`` may be used for this method if there is no need to maintain client properties. (See `.usage.initialization`_ for an example use). @@ -259,7 +286,7 @@ _`.function.splay.tree.finish`: This function clears the fields of a ``SplayTree`` (see guide.impl.c.adt.method.finish). Note that it does not attempt to finish or deallocate any associated ``Tree`` objects; clients wishing to destroy a non-empty ``SplayTree`` must -first explicitly descend the tree and call ``SplayNodeFinish()`` on +first explicitly descend the tree and call ``TreeFinish()`` on each node from the bottom up. ``Bool SplayTreeInsert(SplayTree splay, Tree tree, void *key)`` @@ -281,85 +308,76 @@ given node does not compare ``CompareEQUAL`` with the given key, then function first splays the tree at the given key. (See `.usage.delete`_ for an example use). -``Bool SplayTreeFind(Tree *nodeReturn, SplayTree splay, void *key)`` +``Bool SplayTreeFind(Tree *nodeReturn, SplayTree splay, TreeKey key)`` -_`.function.splay.tree.search`: This function searches the splay tree -for a node that compares ``CompareEQUAL`` to the given key (see -`.req.locate`_). It splays the tree at the key. It returns ``FALSE`` -if there is no such node in the tree, otherwise ``*nodeReturn`` will -be set to the node. +_`.function.splay.tree.find`: Search the splay tree for a node that +compares ``CompareEQUAL`` to the given key (see `.req.locate`_), and +splay the tree at the key. Return ``FALSE`` if there is no such node +in the tree, otherwise set ``*nodeReturn`` to the node and return +``TRUE``. -``Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, SplayTree splay, void *key)`` +``Bool SplayTreeNeighbours(Tree *leftReturn, Tree *rightReturn, SplayTree splay, TreeKey key)`` -_`.function.splay.tree.neighbours`: This function searches a splay -tree for the two nodes that are the neighbours of the given key (see -`.req.neighbours`_). It splays the tree at the key. ``*leftReturn`` -will be the neighbour which compares less than the key if such a -neighbour exists; otherwise it will be ``TreeEMPTY``. ``*rightReturn`` will -be the neighbour which compares greater than the key if such a -neighbour exists; otherwise it will be ``TreeEMPTY``. The function returns -``FALSE`` if any node in the tree compares ``CompareEQUAL`` with the -given key. (See `.usage.insert`_ for an example use). +_`.function.splay.tree.neighbours`: Search a splay tree for the two +nodes that are the neighbours of the given key (see +`.req.neighbours`_). Splay the tree at the key. If any node in the +tree compares ``CompareEQUAL`` with the given key, return ``FALSE``. +Otherwise return ``TRUE``, set ``*leftReturn`` to the left neighbour +of the key (or ``TreeEMPTY`` if the key has no left neighbour), and +set ``*rightReturn`` to the right neighbour of the key (or +``TreeEMPTY`` if the key has no right neighbour). See `.usage.insert`_ +for an example of use. -``Tree SplayTreeFirst(SplayTree splay, void *zeroKey)`` +``Tree SplayTreeFirst(SplayTree splay)`` -_`.function.splay.tree.first`: This function splays the tree at the -first node, and returns that node (see `.req.iterate`_). The supplied -key should compare ``CompareLESS`` with all nodes in the tree. It will -return ``TreeEMPTY`` if the tree has no nodes. +_`.function.splay.tree.first`: If the tree has no nodes, return +``TreeEMPTY``. Otherwise, splay the tree at the first node, and return +that node (see `.req.iterate`_). -``Tree SplayTreeNext(SplayTree splay, Tree oldNode, void *oldKey)`` +``Tree SplayTreeNext(SplayTree splay, TreeKey key)`` -_`.function.splay.tree.next`: This function receives a node and key -and returns the successor node to that node (see `.req.iterate`_). -This function is intended for use in iteration when the received node -will be the current root of the tree, but is robust against being -interspersed with other splay operations (provided the old node still -exists). The supplied key must compare ``CompareEQUAL`` to the -supplied node. Note that use of this function rebalances the tree for -each node accessed. If many nodes are accessed as a result of multiple -uses, the resultant tree will be generally well balanced. But if the -tree was previously beneficially balanced for a small working set of -accesses, then this local optimization will be lost. (see -`.future.parent`_). +_`.function.splay.tree.next`: If the tree contains a right neighbour +for ``key``, splay the tree at that node and return it. Otherwise +return ``TreeEMPTY``. See `.req.iterate`_. -``Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, Count depth, SplayNodeDescribeMethod nodeDescribe)`` +``Res SplayTreeDescribe(SplayTree splay, mps_lib_FILE *stream, Count depth, TreeDescribeMethod nodeDescribe)`` _`.function.splay.tree.describe`: This function prints (using ``WriteF()``) to the stream a textual representation of the given -splay tree, using ``nodeDescribe()`` to print client-oriented +splay tree, using ``nodeDescribe`` to print client-oriented representations of the nodes (see `.req.debug`_). Provided for debugging only. -``Bool SplayFindFirst(Tree *nodeReturn, SplayTree splay, SplayTestNodeMethod testNode, SplayTestTreeMethod testTree, void *closureP, unsigned long closureS)`` +``Bool SplayFindFirst(Tree *nodeReturn, SplayTree splay, SplayTestNodeMethod testNode, SplayTestTreeMethod testTree, void *closureP, Size closureS)`` -_`.function.splay.find.first`: ``SplayFindFirst()`` finds the first node -in the tree that satisfies some client property (as determined by the -``testNode`` and ``testTree`` methods) (see `.req.property.find`_). -``closureP`` and ``closureS`` are arbitrary values, and are passed to -the ``testNode`` and ``testTree`` methods which may use the values as -closure environments. If there is no satisfactory node, then ``FALSE`` -is returned, otherwise ``*nodeReturn`` is set to the node. (See -`.usage.delete`_ for an example use). +_`.function.splay.find.first`: Find the first node in the tree that +satisfies some client property, as determined by the ``testNode`` and +``testTree`` methods (see `.req.property.find`_). ``closureP`` and +``closureS`` are arbitrary values, and are passed to the ``testNode`` +and ``testTree`` methods which may use the values as closure +environments. If there is no satisfactory node, return ``FALSE``; +otherwise set ``*nodeReturn`` to the node and return ``TRUE``. See +`.usage.delete`_ for an example. -``Bool SplayFindFirst(Tree *nodeReturn, SplayTree splay, SplayTestNodeMethod testNode, SplayTestTreeMethod testTree, void *closureP, unsigned long closureS)`` +``Bool SplayFindLast(Tree *nodeReturn, SplayTree splay, SplayTestNodeMethod testNode, SplayTestTreeMethod testTree, void *closureP, Size closureS)`` -_`.function.splay.find.last`: ``SplayFindLast()`` finds the last node -in the tree that satisfies some client property (as determined by the -``testNode`` and ``testTree`` methods) (see `.req.property.find`_). -``closureP`` and ``closureS`` are arbitrary values, and are passed to -the ``testNode`` and ``testTree`` methods which may use the values as -closure environments. If there is no satisfactory node, then ``FALSE`` -is returned, otherwise ``*nodeReturn`` is set to the node. +_`.function.splay.find.last`: As ``SplayFindFirst()``, but find the +last node in the tree that satisfies the client property. -``void SplayNodeRefresh(SplayTree splay, Tree tree, void *key)`` +``void SplayNodeRefresh(SplayTree splay, Tree tree, TreeKey key)`` -_`.function.splay.node.refresh`: ``SplayNodeRefresh()`` must be called -whenever the client property (see `.prop`_) at a node changes (see -`.req.property.change`_). It will call the ``updateNode`` method on -the given node, and any other nodes that may require update. The +_`.function.splay.node.refresh`: Call the ``updateNode`` method on the +given node, and on any other nodes that may require updating. The client key for the node must also be supplied; the function splays the -tree at this key. (See `.usage.insert`_ for an example use). +tree at this key. (See `.usage.insert`_ for an example use). This +function must be called whenever the client property (see `.prop`_) at +a node changes (see `.req.property.change`_). + +``void SplayNodeUpdate(SplayTree splay, Tree node)`` + +_`.function.splay.node.update`: Call the ``updateNode`` method on the +given node, but leave other nodes unchanged. This may be called when a +new node is created, to get the client property off the ground. Client-determined properties @@ -387,7 +405,7 @@ tree at the specified node, which may provoke calls to the ``updateNode`` method will also be called whenever a new splay node is inserted into the tree. -_`.prop.example`: For example, if implementing an address ordered tree +_`.prop.example`: For example, if implementing an address-ordered tree of free blocks using a splay tree, a client might choose to use the base address of each block as the key for each node, and the size of each block as the client property. The client can then maintain as a @@ -397,7 +415,7 @@ last block of at least a given size. See `.usage.callback`_ for an example ``updateNode`` method for such a client. _`.prop.ops`: The splay operations must cause client properties for -nodes to be updated in the following circumstances:- (see `.impl`_ for +nodes to be updated in the following circumstances (see `.impl`_ for details): _`.prop.ops.rotate`: rotate left, rotate right -- We need to update @@ -425,8 +443,7 @@ right trees. For the left tree, we traverse the right child line, reversing pointers, until we reach the node that was the last node prior to the transplantation of the root's children. Then we update from that node back to the left tree's root, restoring pointers. -Updating the right tree is the same, mutatis mutandis. (See -`.future.reverse`_ for an alternative approach). +Updating the right tree is the same, mutatis mutandis. Usage @@ -444,16 +461,16 @@ _`.usage.client-tree`: Tree structure to embed a ``SplayTree`` (see /* no obvious client fields for this simple example */ } FreeTreeStruct; -_`.usage.client-node`: Node structure to embed a Tree (see `.type.splay.node`_):: +_`.usage.client-node`: Node structure to embed a ``Tree`` (see `.type.tree`_):: typedef struct FreeBlockStruct { - TreeStruct treeStruct; /* embedded splay node */ - Addr base; /* base address of block is also the key */ - Size size; /* size of block is also the client property */ - Size maxSize; /* cached value for maximum size in subtree */ + TreeStruct treeStruct; /* embedded splay node */ + Addr base; /* base address of block is also the key */ + Size size; /* size of block is also the client property */ + Size maxSize; /* cached value for maximum size in subtree */ } FreeBlockStruct; -_`.usage.callback`: updateNode callback method (see +_`.usage.callback`: ``updateNode`` callback method (see `.type.splay.update.node.method`_):: void FreeBlockUpdateNode(SplayTree splay, Tree tree) @@ -463,18 +480,18 @@ _`.usage.callback`: updateNode callback method (see /* the cached value for the left subtree (if any) and the cached */ /* value of the right subtree (if any) */ - FreeBlock freeNode = FreeBlockOfSplayNode(tree); + FreeBlock freeNode = FreeBlockOfTree(tree); Size maxSize = freeNode.size; if (TreeHasLeft(tree)) { - FreeBlock leftNode = FreeBlockOfSplayNode(TreeLeft(tree)); + FreeBlock leftNode = FreeBlockOfTree(TreeLeft(tree)); if(leftNode.maxSize > maxSize) maxSize = leftNode->maxSize; } if (TreeHasRight(tree)) { - FreeBlock rightNode = FreeBlockOfSplayNode(TreeRight(tree)); + FreeBlock rightNode = FreeBlockOfTree(TreeRight(tree)); if(rightNode.maxSize > maxSize) maxSize = rightNode->maxSize; } @@ -482,13 +499,13 @@ _`.usage.callback`: updateNode callback method (see freeNode->maxSize = maxSize; } -_`.usage.compare`: Comparison function (see `.type.splay.compare.method`_):: +_`.usage.compare`: Comparison function (see `.type.tree.compare.method`_):: Compare FreeBlockCompare(Tree tree, TreeKey key) { Addr base1, base2, limit2; - FreeBlock freeNode = FreeBlockOfSplayNode(tree); + FreeBlock freeNode = FreeBlockOfTree(tree); - base1 = (Addr *)key; + base1 = (Addr)key; base2 = freeNode->base; limit2 = AddrAdd(base2, freeNode->size); @@ -504,13 +521,13 @@ _`.usage.test.tree`: Test tree function (see `.type.splay.test.tree.method`_):: Bool FreeBlockTestTree(SplayTree splay, Tree tree - void *closureP, unsigned long closureS) { + void *closureP, Size closureS) { /* Closure environment has wanted size as value of closureS. */ /* Look at the cached value for the node to see if any */ /* blocks in the subtree are big enough. */ - Size size = (Size)closureS; - FreeBlock freeNode = FreeBlockOfSplayNode(tree); + Size size = closureS; + FreeBlock freeNode = FreeBlockOfTree(tree); return freeNode->maxSize >= size; } @@ -518,30 +535,30 @@ _`.usage.test.node`: Test node function (see `.type.splay.test.node.method`_):: Bool FreeBlockTestNode(SplayTree splay, Tree tree - void *closureP, unsigned long closureS) { + void *closureP, Size closureS) { /* Closure environment has wanted size as value of closureS. */ /* Look at the size of the node to see if is big enough. */ - Size size = (Size)closureS; - FreeBlock freeNode = FreeBlockOfSplayNode(tree); + Size size = closureS; + FreeBlock freeNode = FreeBlockOfTree(tree); return freeNode->size >= size; } _`.usage.initialization`: Client's initialization function (see `.function.splay.tree.init`_):: - void FreeTreeInit(FreeTree tree) { + void FreeTreeInit(FreeTree freeTree) { /* Initialize the embedded splay tree. */ - SplayTreeInit(&tree->splayTree, FreeBlockCompare, FreeBlockUpdateNode); + SplayTreeInit(&freeTree->splayTree, FreeBlockCompare, FreeBlockUpdateNode); } _`.usage.insert`: Client function to add a new free block into the tree, merging it with an existing block if possible:: - void FreeTreeInsert(FreeTree tree, Addr base, Addr limit) { - SplayTree splayTree = &tree->splayTree; + void FreeTreeInsert(FreeTree freeTree, Addr base, Addr limit) { + SplayTree splayTree = &freeTree->splayTree; Tree leftNeighbour, rightNeighbour; - void *key = (void *)base; /* use the base of the block as the key */ + TreeKey key = base; /* use the base of the block as the key */ Res res; /* Look for any neighbouring blocks. (.function.splay.tree.neighbours) */ @@ -557,7 +574,7 @@ tree, merging it with an existing block if possible:: /* The client housekeeping is left as an exercise to the reader. */ /* This changes the size of a block, which is the client */ /* property of the splay node. See `.function.splay.node.refresh`_ */ - SplayNodeRefresh(tree, leftNeighbour, key); + SplayNodeRefresh(splayTree, leftNeighbour, key); } else if (rightNeighbour != TreeEMPTY && FreeBlockBaseOfSplayNode(rightNeighbour) == limit) { @@ -565,18 +582,19 @@ tree, merging it with an existing block if possible:: /* The client housekeeping is left as an exercise to the reader. */ /* This changes the size of a block, which is the client */ /* property of the splay node. See `.function.splay.node.refresh`_ */ - SplayNodeRefresh(tree, rightNeighbour, key); + SplayNodeRefresh(splayTree, rightNeighbour, key); } else { /* Not contiguous - so insert a new node */ FreeBlock newBlock = (FreeBlock)allocate(sizeof(FreeBlockStruct)); - splayNode = &newBlock->splayNode; + Tree newTree = &newBlock->treeStruct; newBlock->base = base; newBlock->size = AddrOffset(base, limit); - SplayNodeInit(splayNode); /* `.function.splay.node.init`_ */ + TreeInit(newTree); /* `.function.tree.init`_ */ + SplayNodeUpdate(splayTree, newTree); /* `.function.splay.node.update`_ */ /* `.function.splay.tree.insert`_ */ - res = SplayTreeInsert(splayTree, splayNode, key); + res = SplayTreeInsert(splayTree, newTree, key); AVER(res == ResOK); /* this client doesn't duplicate free blocks */ } } @@ -586,8 +604,8 @@ given size in address order. For simplicity, this allocates the entire block:: Bool FreeTreeAllocate(Addr *baseReturn, Size *sizeReturn, - FreeTree tree, Size size) { - SplayTree splayTree = &tree->splayTree; + FreeTree freeTree, Size size) { + SplayTree splayTree = &freeTree->splayTree; Tree splayNode; Bool found; @@ -595,10 +613,10 @@ block:: /* closureP parameter is not used. See `.function.splay.find.first.`_ */ found = SplayFindFirst(&splayNode, splayTree, FreeBlockTestNode, FreeBlockTestTree, - NULL, (unsigned long)size); + NULL, size); if (found) { - FreeBlock freeNode = FreeBlockOfSplayNode(splayNode); + FreeBlock freeNode = FreeBlockOfTree(splayNode); Void *key = (void *)freeNode->base; /* use base of block as the key */ Res res; @@ -606,7 +624,7 @@ block:: *baseReturn = freeNode->base; *sizeReturn = freeNode->size; - /* remove the node from the splay tree - `.function.splay.tree.delete`_ */ + /* `.function.splay.tree.delete`_ */ res = SplayTreeDelete(splayTree, splayNode, key); AVER(res == ResOK); /* Must be possible to delete node */ @@ -625,9 +643,9 @@ block:: Implementation -------------- -_`.impl`: For more details of how splay trees work, see paper.st85(0). +_`.impl`: For more details of how splay trees work, see [ST85]_. For more details of how to implement operations on splay trees, see -paper.sleator96(0). Here we describe the operations involved. +[Sleator96]_. Here we describe the operations involved. Top-down splaying @@ -635,22 +653,21 @@ Top-down splaying _`.impl.top-down`: The method chosen to implement the splaying operation is called "top-down splay". This is described as "procedure -top-down splay" in paper.st85(0) - although the implementation here -additionally permits attempts to access items which are not known to -be in the tree. Top-down splaying is particularly efficient for the -common case where the location of the node in a tree is not known at -the start of an operation. Tree restructuring happens as the tree is -descended, whilst looking for the node. +top-down splay" in [ST85]_, but the implementation here additionally +permits attempts to access items which are not known to be in the +tree. Top-down splaying is particularly efficient for the common case +where the location of the node in a tree is not known at the start of +an operation. Tree restructuring happens as the tree is descended, +whilst looking for the node. _`.impl.splay`: The key to the operation of the splay tree is the internal function ``SplaySplay()``. It searches the tree for a node -with a given key. In the process, it -brings the found node, or an arbitrary neighbour if not found, to the -root of the tree. This "bring-to-root" operation is performed top-down -during the search, and it is not the simplest possible bring-to-root -operation, but the resulting tree is well-balanced, and will give good -amortised cost for future calls to ``SplaySplay()``. (See -paper.st85(0)) +with a given key. In the process, it brings the found node, or an +arbitrary neighbour if not found, to the root of the tree. This +"bring-to-root" operation is performed top-down during the search, and +it is not the simplest possible bring-to-root operation, but the +resulting tree is well-balanced, and will give good amortised cost for +future calls to ``SplaySplay()``. See [ST85]_. _`.impl.splay.how`: To perform this top-down splay, the tree is broken into three parts, a left tree, a middle tree and a right tree. We @@ -664,25 +681,28 @@ they form a partition with the ordering left, middle, right. The splay is then performed by comparing the middle tree with the following six cases, and performing the indicated operations, until none apply. -_`.impl.splay.cases`: Note that paper.st85(0)(Fig. 3) describes only 3 -cases: zig, zig-zig and zig-zag. The additional cases described here -are the symmetric variants which are respectively called zag, zag-zag -and zag-zig. In the descriptions of these cases, ``root`` is the root -of the middle tree; ``node->left`` is the left child of ``node``; -``node->right`` is the right child of ``node``. The comparison -operators (``<``, ``>``, ``==``) are defined to compare a key and a -node in the obvious way by comparing the supplied key with the node's -associated key. +_`.impl.splay.cases`: Note that figure 3 of [ST85]_ describes only 3 +cases: *zig*, *zig-zig* and *zig-zag*. The additional cases described +here are the symmetric variants which are respectively called *zag*, +*zag-zag* and *zag-zig*. In the descriptions of these cases, ``root`` +is the root of the middle tree; ``node->left`` is the left child of +``node``; ``node->right`` is the right child of ``node``. The +comparison operators (``<``, ``>``, ``==``) are defined to compare a +key and a node in the obvious way by comparing the supplied key with +the node's associated key. -_`.impl.splay.zig`: The "zig" case is where ``key < root``, and either: +_`.impl.splay.zig`: The "zig" case is where ``key < root``, and +either: - ``key == root->left``; - ``key < root->left && root->left->left == NULL``; or - ``key > root->left && root->left->right == NULL``. -The operation for the zig case is: link right (see `.impl.link.right`_). +The operation for the zig case is: link right (see +`.impl.link.right`_). -_`.impl.splay.zag`: The "zag" case is where ``key > root``, and either: +_`.impl.splay.zag`: The "zag" case is where ``key > root``, and +either: - ``key == root->right``; - ``key < root->right && root->right->left == NULL``; or @@ -741,48 +761,58 @@ _`.impl.splay.terminal.not-found`: The other typical terminal cases are: - ``key < root && root->left == NULL``; and - ``key > root && root->right == NULL``. -In these cases, the splay operation is complete, the three trees are assembled -(see `.impl.assemble`_), and "not found" is returned. +In these cases, the splay operation is complete, the three trees are +assembled (see `.impl.assemble`_), and "not found" is returned. -_`.impl.rotate.left`: The "rotate left" operation (see paper.st85(0) -Fig. 1) rearranges the middle tree as follows (where any of sub-trees +_`.impl.rotate.left`: The "rotate left" operation (see [ST85]_ +figure 1) rearranges the middle tree as follows (where any of sub-trees A, B and C may be empty): -[missing diagram] +.. figure:: splay-rotate-left.svg + :align: center + :alt: Diagram: the rotate left operation. -_`.impl.rotate.right`: The "rotate right" operation (see paper.st85(0) -Fig. 1) rearranges the middle tree as follows (where any of sub-trees +_`.impl.rotate.right`: The "rotate right" operation (see [ST85]_ +figure 1) rearranges the middle tree as follows (where any of sub-trees A, B and C may be empty): -[missing diagram] +.. figure:: splay-rotate-right.svg + :align: center + :alt: Diagram: the rotate right operation. -_`.impl.link.left`: The "link left" operation (see paper.st85(0) Fig. +_`.impl.link.left`: The "link left" operation (see [ST85]_ figure 11a for symmetric variant) rearranges the left and middle trees as follows (where any of sub-trees A, B, L and R may be empty): -[missing diagram] +.. figure:: splay-link-left.svg + :align: center + :alt: Diagram: the link left operation. The last node of the left tree is now x. -_`.impl.link.right`: The "link right" operation (see paper.st85(0) -Fig. 11a) rearranges the middle and right trees as follows (where any -of sub-trees A, B, L and R may be empty): +_`.impl.link.right`: The "link right" operation (see [ST85]_ figure +11a) rearranges the middle and right trees as follows (where any of +sub-trees A, B, L and R may be empty): -[missing diagram] +.. figure:: splay-link-right.svg + :align: center + :alt: Diagram: the link left operation. The first node of the right tree is now x. -_`.impl.assemble`: The "assemble" operation (see paper.st85(0) -Fig. 12) merges the left and right trees with the middle tree as -follows (where any of sub-trees A, B, L and R may be empty): +_`.impl.assemble`: The "assemble" operation (see [ST85]_ figure 12) +merges the left and right trees with the middle tree as follows (where +any of sub-trees A, B, L and R may be empty): -[missing diagram] +.. figure:: splay-assemble.svg + :align: center + :alt: Diagram: the assemble operation. Top-level operations .................... -_`.impl.insert`: ``SplayTreeInsert()``: (See paper.sleator96(0), chapter +_`.impl.insert`: ``SplayTreeInsert()``: (See [Sleator96]_, chapter 4, function insert). If the tree has no nodes, [how does it smell?] add the inserted node and we're done; otherwise splay the tree around the supplied key. If the splay successfully found a matching node, @@ -791,7 +821,7 @@ the old (newly splayed, but non-matching) root as its left or right child as appropriate, and the opposite child of the old root as the other child of the new root. -_`.impl.delete`: ``SplayTreeDelete()``: (See paper.sleator96(0), chapter +_`.impl.delete`: ``SplayTreeDelete()``: (See [Sleator96]_, chapter 4, function delete). Splay the tree around the supplied key. Check that the newly splayed root is the same node as given by the caller, and that it matches the key; return failure if not. If the given node @@ -878,8 +908,8 @@ _`.future.parent`: The iterator could be made more efficient (in an amortized sense) if it didn't splay at each node. To implement this (whilst meeting `.req.stack`_) we really need parent pointers from the nodes. We could use the (first-child, right-sibling/parent) trick -described in paper.st85 to implement this, at a slight cost to all -other tree operations, and an increase in code complexity. paper.st85 +described in [ST85]_ to implement this, at a slight cost to all +other tree operations, and an increase in code complexity. [ST85]_ doesn't describe how to distinguish the first-child between left-child and right-child, and the right-sibling/parent between right-sibling and parent. One could either use the comparator to make these diff --git a/mps/design/strategy.txt b/mps/design/strategy.txt index 4b09c3cc004..40b88863af9 100644 --- a/mps/design/strategy.txt +++ b/mps/design/strategy.txt @@ -81,140 +81,218 @@ the client to specify preferred relative object locations ("this object should be kept in the same cache line as that one"), to improve cache locality. + Generations ----------- The largest part of the current MPS strategy implementation is the -support for generational GC. Generations are only fully supported for -AMC (and AMCZ) pools. See under "Non-AMC Pools", below, for more -information. +support for generational garbage collections. -Data Structures -............... -The fundamental structure of generational GC is the ``Chain``, -which describes a set of generations. A chain is created by client -code calling ``mps_chain_create()``, specifying the "size" and -"mortality" for each generation. When creating an AMC pool, the -client code must specify the chain which will control collections for -that pool. The same chain may be used for multiple pools. +General data structures +....................... -Each generation in a chain has a ``GenDesc`` structure, -allocated in an array pointed to from the chain. Each AMC pool has a -set of ``PoolGen`` structures, one per generation. The PoolGens -for each generation point to the GenDesc and are linked together in a -ring on the GenDesc. These structures are (solely?) used to gather +The fundamental structure of generational garbage collection is the +``Chain``, which describes a sequence of generations. + +A chain specifies the "capacity" and "mortality" for each generation. +When creating an automatically collected pool, the client code may +specify the chain which will control collections for that pool. The +same chain may be used for multiple pools. If no chain is specified, +the pool uses the arena's default generation chain. + +Each generation in a chain has a ``GenDesc`` structure, allocated in +an array pointed to from the chain. In addition to the generations in +the chains, the arena has a unique ``GenDesc`` structure, named +``topGen`` and described in comments as "the dynamic generation" +(misleadingly: in fact it is the *least* dynamic generation). + +Each automatically collected pool has a set of ``PoolGen`` structures, +one for each generation that it can allocate or promote into. The +``PoolGen`` structures for each generation point to the ``GenDesc`` +for that generation, and are linked together in a ring on the +``GenDesc``. These structures are used to gather accounting information for strategy decisions. -The arena has a unique ``GenDesc`` structure, named -``topGen`` and described in comments as "the dynamic generation" -(although in fact it is the *least* dynamic generation). Each AMC -pool has one more PoolGen than there are GenDescs in the chain. The -extra PoolGen refers to this topGen. +The non-moving automatic pool classes (AMS, AWL and LO) do not support +generational collection, so they allocate into a single generation. +The moving automatic pool classes (AMC and AMCZ) have one pool +generations for each generation in the chain, plus one pool generation +for the arena's "top generation". -AMC segments have a segment descriptor ``amcSegStruct`` which is -a ``GCSegStruct`` with two additional fields. One field -``segTypeP`` is a pointer either to the per-generation per-pool -``amcGen`` structure (a subclass of ``PoolGen``), or to a -nailboard (which then points to an amcGen). The other field -``new`` is a boolean used for keeping track of memory usage for -strategy reasons (see below under 'Accounting'). The ``amcGen`` -is used for statistics (``->segs``) and forwarding buffers -(``->forward``). -The AMC pool class only ever allocates a segment in order to fill a -buffer: either the buffer for a client Allocation Point, or a -forwarding buffer. In order to support generational collection, there -is a subclass ``amcBuf`` of ``SegBuf``, with a -``gen`` field (pointing to a ``amcGen``). So in -``AMCBufferFill()`` the generation of the new segment can be -determined. +AMC data structures +................... -When an AMC pool is created, these ``amcGen`` and -``amcBuf`` structures are all created, and the -``amcBuf->gen`` fields initialized so that the forwarding buffer -of each amcGen knows that it belongs to the next "older" amcGen (apart -from the "oldest" amcGen - that which refers to the topGen - whose -forwarding buffer belongs to itself). +An AMC pool creates an array of pool generation structures of type +``amcGen`` (a subclass of ``PoolGen``). Each pool generation points to +the *forwarding buffer* for that generation: this is the buffer that +surviving objects are copied into. -When copying an object in ``AMCFix()``, the object's current -generation is determined (``amcSegGen()``), and the object is -copied to that amcGen's forwarding buffer, using the buffer protocol. -Thus, objects are "promoted" up the chain of generations until they -end up in the topGen, which is shared between all chains and all -pools. +AMC segments point to the AMC pool generation that the segment belongs +to, and AMC buffers point to the AMC pool generation that the buffer +will be allocating into. -For statistics and reporting purposes, when ``STATISTICS`` is -on, each AMC pool has an array of ``PageRetStruct``s, one per -trace. This structure has many ``Count`` fields, and is -intended to help to assess AMC page retention code. See job001811. +The forwarding buffers are set up during AMC pool creation. Each +generation forwards into the next higher generation in the chain, +except for the top generation, which forwards to itself. Thus, objects +are "promoted" up the chain of generations until they end up in the +top generations, which is shared between all generational pools. + + +Collections +........... + +Collections in the MPS start in one of two ways: + +1. A collection of the world starts via ``traceCondemnAll()``. This + simply condemns all segments in all automatic pools. + +2. A collection of some set of generations starts via ``TracePoll()``. + This calls ``ChainDeferral()`` for each chain; this function + indicates if the chain needs collecting, and if so, how urgent it + is to collect that chain. The most urgent chain in need of + collection (if any) is then condemned by calling + ``ChainCondemnAuto()``. + + This function chooses the set of generations to condemn, computes + the zoneset corresponding to the union those generations, and + condemns those zones by calling ``TraceCondemnZones()``. + + Note that the condemnation is of every segment in an automatic pool + in any zone in the zoneset. It is not limited to the segments + actually associated with the condemned generations. + Zones ..... -All collections in the MPS start with condemnation of a complete -``ZoneSet``. Each generation in each chain has a zoneset -associated with it (``chain->gen[N].zones``); the condemned -zoneset is the union of some number of generation's zonesets. It is -condemned by code in the chain system calling -``TraceCondemnZones()``. This is either for all chains -(``ChainCondemnAll()`` called for every chain from -``traceCondemnAll()``) or for some number of generations in a -single chain (``ChainCondemnAuto()`` called from -``TracePoll()``). Note that the condemnation is of every -automatic-pool segment in any zone in the zoneset. It is not limited -to the segments actually associated with the condemned generation(s). +Each generation in each chain has a zoneset associated with it +(``gen->zones``); the condemned zoneset is the union of some number of +generation's zonesets. An attempt is made to use distinct zonesets for different generations. -Segments are allocated from ``AMCBufferFill()`` using ``ChainAlloc()`` +Segments in automatic pools are allocated using ``PoolGenAlloc()`` which creates a ``SegPref`` using the zoneset from the generation's -``GenDesc``. The zoneset for each generation number starts out -empty. If the zoneset is empty, an attempt is made to allocate from a -free zone. The ``GenDesc`` zoneset is augmented with whichever zones the -new segment occupies. +``GenDesc``. The zoneset for each generation starts out empty. If the +zoneset is empty, an attempt is made to allocate from a free zone. The +``GenDesc`` zoneset is augmented with whichever zones the new segment +occupies. Note that this zoneset can never shrink. + +Parameters +.......... + +_`.param.intro`: A generation has two parameters, *capacity* and +*mortality*, specified by the client program. + +_`.param.capacity`: The *capacity* of a generation is the amount of +*new* allocation in that generation (that is, allocation since the +last time the generation was condemned) that will cause the generation +to be collected by ``TracePoll()``. + +_`.param.capacity.misnamed`: The name *capacity* is unfortunate since +it suggests that the total amount of memory in the generation will not +exceed this value. But that will only be the case for pool classes +that always promote survivors to another generation. When there is +*old* allocation in the generation (that is, prior to the last time +the generation was condemned), as there is in the case of non-moving +pool classes, the size of a generation is unrelated to its capacity. + +_`.param.mortality`: The *mortality* of a generation is the proportion +(between 0 and 1) of memory in the generation that is expected to be +dead when the generation is collected. It is used in ``TraceStart()`` +to estimate the amount of data that will have to be scanned in order +to complete the trace. + + Accounting .......... -- ``gen[N].mortality`` +_`.accounting.intro`: Pool generations maintain the sizes of various +categories of data allocated in that generation for that pool. This +accounting information is reported via the event system, but also used +in two places: - - Specified by the client. - - TODO: fill in how this is used. +_`.accounting.poll`: ``ChainDeferral()`` uses the *new size* of each +generation to determine which generations in the chain are over +capacity and so might need to be collected via ``TracePoll()``. -- ``gen[N].capacity`` +_`.accounting.condemn`: ``ChainCondemnAuto()`` uses the *new size* of +each generation to determine which generations in the chain will be +collected; it also uses the *total size* of the generation to compute +the mortality. - - Specified by the client. - - TODO: fill in how this is used. +_`.accounting.check`: Computing the new size for a pool generation is +far from straightforward: see job003772_ for some (former) errors in +this code. In order to assist with checking that this has been +computed correctly, the locus module uses a double-entry book-keeping +system to account for every byte in each pool generation. This uses +six accounts: -- ``amcSeg->new`` +.. _job003772: http://www.ravenbrook.com/project/mps/issue/job003772/ - - TODO: fill this in +_`.account.total`: Memory acquired from the arena. -- ``pgen->totalSize``: +_`.account.total.negated`: From the point of view of the double-entry +system, the *total* should be negative as it is owing to the arena, +but it is inconvenient to represent negative sizes, and so the +positive value is stored instead. - - incremented by ``AMCBufferFill()``; - - decremented by ``amcReclaimNailed()`` and ``AMCReclaim()``; - - added up by ``GenDescTotalSize(gen)``. +_`.account.total.negated.justification`: We don't have a type for +signed sizes; but if we represented it in two's complement using the +unsigned ``Size`` type then Clang's unsigned integer overflow detector +would complain. -- ``pgen->newSize``: +_`.account.free`: Memory that is not in use (free or lost to +fragmentation). - - incremented by ``AMCBufferFill()`` (*when not ramping*) and ``AMCRampEnd()``; - - decremented by ``AMCWhiten()``, - - added up by ``GenDescNewSize(gen)``. +_`.account.new`: Memory in use by the client program, allocated +since the last time the generation was condemned. -- ``gen[N].proflow``: +_`.account.old`: Memory in use by the client program, allocated +prior to the last time the generation was condemned. - - set to 1.0 by ``ChainCreate()``; - - ``arena->topGen.proflow`` set to 0.0 by ``LocusInit(arena)``; - - *The value of this field is never used*. +_`.account.newDeferred`: Memory in use by the client program, +allocated since the last time the generation was condemned, but which +should not cause collections via ``TracePoll()``. (Due to ramping; see +below.) +_`.account.oldDeferred`: Memory in use by the client program, +allocated prior to the last time the generation was condemned, but +which should not cause collections via ``TracePoll()``. (Due to +ramping; see below.) -- ``pgen->newSizeAtCreate``: +_`.accounting.op`: The following operations are provided: + +_`.accounting.op.alloc`: Allocate a segment in a pool generation. +Debit *total*, credit *free*. (But see `.account.total.negated`_.) + +_`.accounting.op.free`: Free a segment. First, ensure that the +contents of the segment are accounted as free, by artificially ageing +any memory accounted as *new* or *newDeferred* (see +`.accounting.op.age`_) and then artifically reclaiming any memory +accounted as *old* or *oldDeferred* (see `.accounting.op.reclaim`_). +Finally, debit *free*, credit *total*. (But see +`.account.total.negated`_.) + +_`.accounting.op.fill`: Allocate memory, for example by filling a +buffer. Debit *free*, credit *new* or *newDeferred*. + +_`.accounting.op.empty`: Deallocate memory, for example by emptying +the unused portion of a buffer. Debit *new* or *newDeferred*, credit +*free*. + +_`.accounting.op.age`: Condemn memory. Debit *new* or *newDeferred*, +credit *old* or *oldDeferred*. + +_`.accounting.op.reclaim`: Reclaim dead memory. Debit *old* or +*oldDeferred*, credit *free*. + +_`.accounting.op.undefer`: Stop deferring the accounting of memory. Debit *oldDeferred*, credit *old*. Debit *newDeferred*, credit *new*. - - set by ``traceCopySizes()`` (that is its purpose); - - output in the ``TraceStartPoolGen`` telemetry event. Ramps ..... @@ -289,29 +367,31 @@ Reclaiming any AMC segment: Now, some deductions: #. When OUTSIDE, the count is always zero, because (a) it starts that -way, and the only ways to go OUTSIDE are (b) by leaving an outermost -ramp (count goes to zero) or (c) by reclaiming when the count is zero. + way, and the only ways to go OUTSIDE are (b) by leaving an + outermost ramp (count goes to zero) or (c) by reclaiming when the + count is zero. #. When BEGIN, the count is never zero (consider the transitions to -BEGIN and the transition to zero). + BEGIN and the transition to zero). -#. When RAMPING, the count is never zero (again consider transitions to -RAMPING and the transition to zero). +#. When RAMPING, the count is never zero (again consider transitions + to RAMPING and the transition to zero). -#. When FINISH, the count can be anything (the transition to FINISH has -zero count, but the Enter transition when FINISH can change that and -then it can increment to any value). +#. When FINISH, the count can be anything (the transition to FINISH + has zero count, but the Enter transition when FINISH can change + that and then it can increment to any value). #. When COLLECTING, the count can be anything (from the previous fact, -and the transition to COLLECTING). + and the transition to COLLECTING). -#. *This is a bug!!* The ramp generation is not always reset (to forward -to the after-ramp generation). If we get into FINISH and then see -another ramp before the next condemnation of the ramp generation, we -will Enter followed by Leave. The Enter will keep us in FINISH, and -the Leave will take us back to OUTSIDE, skipping the transition to the -COLLECTING state which is what resets the ramp generation forwarding -buffer. [TODO: check whether I made an issue and/or fixed it; NB 2013-06-04] +#. *This is a bug!!* The ramp generation is not always reset (to + forward to the after-ramp generation). If we get into FINISH and + then see another ramp before the next condemnation of the ramp + generation, we will Enter followed by Leave. The Enter will keep us + in FINISH, and the Leave will take us back to OUTSIDE, skipping the + transition to the COLLECTING state which is what resets the ramp + generation forwarding buffer. [TODO: check whether I made an issue + and/or fixed it; NB 2013-06-04] The simplest change to fix this is to change the behaviour of the Leave transition, which should only take us OUTSIDE if we are in BEGIN or @@ -349,25 +429,6 @@ other uses of that: - in ``AMCWhiten()``, if new is TRUE, the segment size is deducted from ``poolGen.newSize`` and new is set to FALSE. -Non-AMC Pools -............. - -The implementations of AMS, AWL, and LO pool classes are all aware of -generations (this is necessary because all tracing is driven by the -generational data structures described above), but do not make use of -them. For LO and AWL, when a pool is created, a chain with a single -generation is also created, with size and mortality parameters -hard-wired into the pool-creation function (LOInit, AWLInit). For -AMS, a chain is passed as a pool creation parameter into -``mps_pool_create()``, but this chain must also have only a -single generation (otherwise ``ResPARAM`` is returned). - -Note that these chains are separate from any chain used by an AMC pool -(except in the trivial case when a single-generation chain is used for -both AMC and AMS). Note also that these pools do not use or point to -the ``arena->topGen``, which applies only to AMC. - -Non-AMC pools have no support for ramps. Starting a Trace ................ @@ -378,15 +439,18 @@ Trace Progress .............. TODO: When do we do some tracing work? How much tracing work do we do? + Document History ---------------- - 2013-06-04 NB_ Checked this in although it's far from complete. Pasted in my 'ramping notes' from email, which mention some bugs which I may have fixed (TODO: check this). -- 2014-01-29 RB_ The arena no longer manages generation zonesets. +- 2014-01-29 RB_ The arena no longer manages generation zonesets. +- 2014-05-17 GDR_ Bring data structures and condemn logic up to date. -.. _RB: http://www.ravenbrook.com/consultants/rb +.. _GDR: http://www.ravenbrook.com/consultants/gdr/ .. _NB: http://www.ravenbrook.com/consultants/nb/ +.. _RB: http://www.ravenbrook.com/consultants/rb Copyright and License diff --git a/mps/design/type.txt b/mps/design/type.txt index baee04ef3d2..d405ee229e9 100644 --- a/mps/design/type.txt +++ b/mps/design/type.txt @@ -72,6 +72,8 @@ Interface. ``mps_addr_t`` is defined to be the same as ``void *``, so using the MPS C Interface confines the memory manager to the same address space as the client data. +_`.addr.readonly`: For read-only addresses, see `.readonlyaddr`_. + ``typedef Word Align`` @@ -89,28 +91,26 @@ C Interface. ``typedef unsigned Attr`` -_`.attr`: Pool attributes. A bitset of pool or pool class -attributes, which are: +_`.attr`: Pool attributes. A bitset of pool class attributes, which +are: =================== =================================================== Attribute Description =================== =================================================== -``AttrALLOC`` Supports the ``PoolAlloc`` interface. -``AttrBUF`` Supports the buffer interface. ``AttrFMT`` Contains formatted objects. Used to decide which pools to walk. -``AttrFREE`` Supports the ``PoolFree`` interface. ``AttrGC`` Is garbage collecting, that is, parts may be reclaimed. Used to decide which segments are condemned. ``AttrMOVINGGC`` Is moving, that is, objects may move in memory. Used to update the set of zones that might have moved and so implement location dependency. -``AttrSCAN`` Contains references and must be scanned. =================== =================================================== There is an attribute field in the pool class (``PoolClassStruct``) -which declares the attributes of that class. +which declares the attributes of that class. See `design.mps.class-interface.field.attr`_. + +.. _design.mps.class-interface.field.attr: class-interface ``typedef int Bool`` @@ -155,9 +155,17 @@ _`.bool.bitfield`: When a Boolean needs to be stored in a bitfield, the type of the bitfield must be ``unsigned:1``, not ``Bool:1``. (That's because the two values of the type ``Bool:1`` are ``0`` and ``-1``, which means that assigning ``TRUE`` would require a sign -conversion.) To avoid warnings about loss of data from GCC with the -``-Wconversion`` option, ``misc.h`` provides the ``BOOLOF`` macro for -coercing a value to an unsigned single-bit field. +conversion.) To make it clear why this is done, ``misc.h`` provides +the ``BOOLFIELD`` macro. + +_`.bool.bitfield.assign`: To avoid warnings about loss of data from +GCC with the ``-Wconversion`` option, ``misc.h`` provides the +``BOOLOF`` macro for coercing a value to an unsigned single-bit field. + +_`.bool.bitfield.check`: A Boolean bitfield cannot have an incorrect +value, and if you call ``BoolCheck()`` on such a bitfield then GCC 4.2 +issues the warning "comparison is always true due to limited range of +data type". When avoiding such a warning, reference this tag. ``typedef unsigned BufferMode`` @@ -373,6 +381,13 @@ their integer values. _`.rankset`: ``RankSet`` is a set of ranks, represented as a bitset. +``typedef const struct AddrStruct *ReadonlyAddr`` + +_`.readonlyaddr`: ``ReadonlyAddr`` is the type used for managed +addresses that an interface promises it will only read through, never +write. Otherwise it is identical to ``Addr``. + + ``typedef Addr Ref`` _`.ref`: ``Ref`` is a reference to a managed object (as opposed to any diff --git a/mps/example/scheme/scheme-advanced.c b/mps/example/scheme/scheme-advanced.c index 476b790f0a4..57ee56c9510 100644 --- a/mps/example/scheme/scheme-advanced.c +++ b/mps/example/scheme/scheme-advanced.c @@ -409,6 +409,7 @@ static void error(const char *format, ...) if (error_handler) { longjmp(*error_handler, 1); } else { + fflush(stdout); fprintf(stderr, "Fatal error during initialization: %s\n", error_message); abort(); @@ -4003,6 +4004,7 @@ static mps_res_t obj_scan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit) break; default: assert(0); + fflush(stdout); fprintf(stderr, "Unexpected object on the heap\n"); abort(); } @@ -4073,6 +4075,7 @@ static mps_addr_t obj_skip(mps_addr_t base) break; default: assert(0); + fflush(stdout); fprintf(stderr, "Unexpected object on the heap\n"); abort(); } @@ -4366,6 +4369,7 @@ static int start(int argc, char *argv[]) make_operator(optab[i].name, optab[i].entry, obj_empty, obj_empty, env, op_env)); } else { + fflush(stdout); fprintf(stderr, "Fatal error during initialization: %s\n", error_message); @@ -4375,7 +4379,9 @@ static int start(int argc, char *argv[]) if(argc >= 2) { /* Non-interactive file execution */ if(setjmp(*error_handler) != 0) { + fflush(stdout); fprintf(stderr, "%s\n", error_message); + fflush(stderr); exit_code = EXIT_FAILURE; } else { load(env, op_env, make_string(strlen(argv[1]), argv[1])); @@ -4394,12 +4400,15 @@ static int start(int argc, char *argv[]) "If you recurse too much the interpreter may crash from using too much C stack."); for(;;) { if(setjmp(*error_handler) != 0) { + fflush(stdout); fprintf(stderr, "%s\n", error_message); + fflush(stderr); } mps_chat(); printf("%lu, %lu> ", (unsigned long)total, (unsigned long)mps_collections(arena)); + fflush(stdout); obj = read(input); if(obj == obj_eof) break; obj = eval(env, op_env, obj); diff --git a/mps/example/scheme/scheme-boehm.c b/mps/example/scheme/scheme-boehm.c index 8d039433bb4..f912adfb38a 100644 --- a/mps/example/scheme/scheme-boehm.c +++ b/mps/example/scheme/scheme-boehm.c @@ -281,6 +281,7 @@ static void error(char *format, ...) if (error_handler) { longjmp(*error_handler, 1); } else { + fflush(stdout); fprintf(stderr, "Fatal error during initialization: %s\n", error_message); abort(); @@ -3599,6 +3600,7 @@ int main(int argc, char *argv[]) make_operator(optab[i].name, optab[i].entry, obj_empty, obj_empty, env, op_env)); } else { + fflush(stdout); fprintf(stderr, "Fatal error during initialization: %s\n", error_message); @@ -3608,6 +3610,7 @@ int main(int argc, char *argv[]) if(argc >= 2) { /* Non-interactive file execution */ if(setjmp(*error_handler) != 0) { + fflush(stdout); fprintf(stderr, "%s\n", error_message); return EXIT_FAILURE; } @@ -3617,9 +3620,13 @@ int main(int argc, char *argv[]) /* Interactive read-eval-print loop */ puts("Scheme Test Harness"); for(;;) { - if(setjmp(*error_handler) != 0) + if(setjmp(*error_handler) != 0) { + fflush(stdout); fprintf(stderr, "%s\n", error_message); + fflush(stderr); + } printf("%lu> ", (unsigned long)total); + fflush(stdout); obj = read(input); if(obj == obj_eof) break; obj = eval(env, op_env, obj); diff --git a/mps/example/scheme/scheme-malloc.c b/mps/example/scheme/scheme-malloc.c index 1333ce73aef..3f3d55994a8 100644 --- a/mps/example/scheme/scheme-malloc.c +++ b/mps/example/scheme/scheme-malloc.c @@ -279,6 +279,7 @@ static void error(char *format, ...) if (error_handler) { longjmp(*error_handler, 1); } else { + fflush(stdout); fprintf(stderr, "Fatal error during initialization: %s\n", error_message); abort(); @@ -3596,6 +3597,7 @@ int main(int argc, char *argv[]) make_operator(optab[i].name, optab[i].entry, obj_empty, obj_empty, env, op_env)); } else { + fflush(stdout); fprintf(stderr, "Fatal error during initialization: %s\n", error_message); @@ -3605,6 +3607,7 @@ int main(int argc, char *argv[]) if(argc >= 2) { /* Non-interactive file execution */ if(setjmp(*error_handler) != 0) { + fflush(stdout); fprintf(stderr, "%s\n", error_message); return EXIT_FAILURE; } @@ -3614,9 +3617,13 @@ int main(int argc, char *argv[]) /* Interactive read-eval-print loop */ puts("Scheme Test Harness"); for(;;) { - if(setjmp(*error_handler) != 0) + if(setjmp(*error_handler) != 0) { + fflush(stdout); fprintf(stderr, "%s\n", error_message); + fflush(stderr); + } printf("%lu> ", (unsigned long)total); + fflush(stdout); obj = read(input); if(obj == obj_eof) break; obj = eval(env, op_env, obj); diff --git a/mps/example/scheme/scheme.c b/mps/example/scheme/scheme.c index 8a8dcf48ed7..62ec16f2f63 100644 --- a/mps/example/scheme/scheme.c +++ b/mps/example/scheme/scheme.c @@ -401,6 +401,7 @@ static void error(const char *format, ...) if (error_handler) { longjmp(*error_handler, 1); } else { + fflush(stdout); fprintf(stderr, "Fatal error during initialization: %s\n", error_message); abort(); @@ -3990,6 +3991,7 @@ static mps_res_t obj_scan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit) break; default: assert(0); + fflush(stdout); fprintf(stderr, "Unexpected object on the heap\n"); abort(); } @@ -4066,6 +4068,7 @@ static mps_addr_t obj_skip(mps_addr_t base) break; default: assert(0); + fflush(stdout); fprintf(stderr, "Unexpected object on the heap\n"); abort(); } @@ -4296,6 +4299,7 @@ static int start(int argc, char *argv[]) make_operator(optab[i].name, optab[i].entry, obj_empty, obj_empty, env, op_env)); } else { + fflush(stdout); fprintf(stderr, "Fatal error during initialization: %s\n", error_message); @@ -4305,7 +4309,9 @@ static int start(int argc, char *argv[]) if(argc >= 2) { /* Non-interactive file execution */ if(setjmp(*error_handler) != 0) { + fflush(stdout); fprintf(stderr, "%s\n", error_message); + fflush(stderr); exit_code = EXIT_FAILURE; } else { load(env, op_env, make_string(strlen(argv[1]), argv[1])); @@ -4324,12 +4330,15 @@ static int start(int argc, char *argv[]) "If you recurse too much the interpreter may crash from using too much C stack."); for(;;) { if(setjmp(*error_handler) != 0) { + fflush(stdout); fprintf(stderr, "%s\n", error_message); + fflush(stderr); } mps_chat(); printf("%lu, %lu> ", (unsigned long)total, (unsigned long)mps_collections(arena)); + fflush(stdout); obj = read(input); if(obj == obj_eof) break; obj = eval(env, op_env, obj); diff --git a/mps/manual/source/bib.rst b/mps/manual/source/bib.rst new file mode 100644 index 00000000000..f32b6d33422 --- /dev/null +++ b/mps/manual/source/bib.rst @@ -0,0 +1,3877 @@ +.. _bibliography: + +Bibliography +************ + +* .. _AD97: + + Ole Agesen, David L. Detlefs. 1997. "`Finding References in Java Stacks `_". Sun Labs. OOPSLA97 Workshop on Garbage Collection and Memory Management. + + .. admonition:: Abstract + + Exact garbage collection for the strongly-typed Java language may + seem straightforward. Unfortunately, a single pair of bytecodes in + the Java Virtual Machine instruction set presents an obstacle that + has thus far not been discussed in the literature. We explain the + problem, outline the space of possible solutions, and present a + solution utilizing bytecode-preprocessing to enable exact garbage + collection while maintaining compatibility with existing compiled + Java class files. + +* .. _ADM98: + + Ole Agesen, David L. Detlefs, J. Eliot B. Moss. 1998. "`Garbage Collection and Local Variable Type-precision and Liveness in Java Virtual Machines `_". ACM. Proceedings of the ACM SIGPLAN '98 conference on Programming language design and implementation, pp. 269--279. + + .. admonition:: Abstract + + Full precision in garbage collection implies retaining only those + heap allocated objects that will actually be used in the future. + Since full precision is not computable in general, garbage + collectors use safe (i.e., conservative) approximations such as + reachability from a set of root references. Ambiguous roots + collectors (commonly called "conservative") can be overly + conservative because they overestimate the root set, and thereby + retain unexpectedly large amounts of garbage. We consider two more + precise collection schemes for Java virtual machines (JVMs). One + uses a type analysis to obtain a type-precise root set (only those + variables that contain references); the other adds a live variable + analysis to reduce the root set to only the live reference + variables. Even with the Java programming language's strong + typing, it turns out that the JVM specification has a feature that + makes type-precise root sets difficult to compute. We explain the + problem and ways in which it can be solved. + + Our experimental results include measurements of the costs of the + type and liveness analyses at load time, of the incremental + benefits at run time of the liveness analysis over the + type-analysis alone, and of various map sixes and counts. We find + that the liveness analysis often produces little or no improvement + in heap size, sometimes modest improvements, and occasionally the + improvement is dramatic. While further study is in order, we + conclude that the main benefit of the liveness analysis is + preventing bad surprises. + +* .. _AEL88: + + Andrew Appel, John R. Ellis, Kai Li. 1988. "`Real-time Concurrent Collection on Stock Multiprocessors `_". ACM, SIGPLAN. ACM PLDI 88, SIGPLAN Notices 23, 7 (July 88), pp. 11--20. + + .. admonition:: Abstract + + We've designed and implemented a copying garbage-collection + algorithm that is efficient, real-time, concurrent, runs on + commercial uniprocessors and shared-memory multiprocessors, and + requires no change to compilers. The algorithm uses standard + virtual-memory hardware to detect references to "from space" + objects and to synchronize the collector and mutator threads. + We've implemented and measured a prototype running on SRC's + 5-processor Firefly. It will be straightforward to merge our + techniques with generational collection. An incremental, + non-concurrent version could be implemented easily on many + versions of Unix. + +* .. _APPLE94: + + Apple Computer, Inc. 1994. *Inside Macintosh: Memory*. Addison-Wesley. ISBN 0-201-63240-3. + + .. admonition:: Abstract + + Inside Macintosh: Memory describes the parts of the Macintosh® + Operating System that allow you to directly allocate, release, or + otherwise manipulate memory. Everyone who programs Macintosh + computers should read this book. + + Inside Macintosh: Memory shows in detail how your application can + manage the memory partition it is allocated and perform other + memory-related operations. It also provides a complete technical + reference for the Memory Manager, the Virtual Memory Manager, and + other memory-related utilities provided by the system software. + +* .. _ATTARDI94: + + Giuseppe Attardi & Tito Flagella. 1994. "`A Customisable Memory Management Framework `_". TR-94-010. + + .. admonition:: Abstract + + Memory management is a critical issue for many large + object-oriented applications, but in C++ only explicit memory + reclamation through the delete operator is generally available. We + analyse different possibilities for memory management in C++ and + present a dynamic memory management framework which can be + customised to the need of specific applications. The framework + allows full integration and coexistence of different memory + management techniques. The Customisable Memory Management (CMM) is + based on a primary collector which exploits an evolution of + Bartlett's mostly copying garbage collector. Specialised + collectors can be built for separate memory heaps. A Heap class + encapsulates the allocation strategy for each heap. We show how to + emulate different garbage collection styles or user-specific + memory management techniques. The CMM is implemented in C++ + without any special support in the language or the compiler. The + techniques used in the CMM are general enough to be applicable + also to other languages. + +* .. _AFI98: + + Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. "`A customisable memory management framework for C++ `_". Software -- Practice and Experience. 28(11), 1143--1183. + + .. admonition:: Abstract + + Automatic garbage collection relieves programmers from the burden + of managing memory themselves and several techniques have been + developed that make garbage collection feasible in many + situations, including real time applications or within traditional + programming languages. However optimal performance cannot always + be achieved by a uniform general purpose solution. Sometimes an + algorithm exhibits a predictable pattern of memory usage that + could be better handled specifically, delaying as much as possible + the intervention of the general purpose collector. This leads to + the requirement for algorithm specific customisation of the + collector strategies. We present a dynamic memory management + framework which can be customised to the needs of an algorithm, + while preserving the convenience of automatic collection in the + normal case. The Customisable Memory Manager (CMM) organises + memory in multiple heaps. Each heap is an instance of a C++ class + which abstracts and encapsulates a particular storage discipline. + The default heap for collectable objects uses the technique of + mostly copying garbage collection, providing good performance and + memory compaction. Customisation of the collector is achieved + exploiting object orientation by defining specialised versions of + the collector methods for each heap class. The object oriented + interface to the collector enables coexistence and coordination + among the various collectors as well as integration with + traditional code unaware of garbage collection. The CMM is + implemented in C++ without any special support in the language or + the compiler. The techniques used in the CMM are general enough to + be applicable also to other languages. The performance of the CMM + is analysed and compared to other conservative collectors for + C/C++ in various configurations. + +* .. _AKPY98: + + Alain Azagury, Elliot K. Kolodner, Erez Petrank, Zvi Yehudai. 1998. "`Combining Card Marking with Remembered Sets: How to Save Scanning Time `_". ACM. ISMM'98 pp. 10--19. + + .. admonition:: Abstract + + We consider the combination of card marking with remembered sets + for generational garbage collection as suggested by Hosking and + Moss. When more than two generations are used, a naive + implementation may cause excessive and wasteful scanning of the + cards and thus increase the collection time. We offer a simple + data structure and a corresponding algorithm to keep track of + which cards need be scanned for which generation. We then extend + these ideas for the Train Algorithm of Hudson and Moss. Here, the + solution is more involved, and allows tracking of which card + should be scanned for which car-collection in the train. + +* .. _BAKER77: + + Henry G. Baker, Carl Hewitt. 1977. "`The Incremental Garbage Collection of Processes `_". ACM. SIGPLAN Notices 12, 8 (August 1977), pp. 55--59. + + .. admonition:: Abstract + + This paper investigates some problems associated with an argument + evaluation order that we call "future" order, which is different + from both call-by-name and call-by-value. In call-by-future, each + formal parameter of a function is bound to a separate process + (called a "future") dedicated to the evaluation of the + corresponding argument. This mechanism allows the fully parallel + evaluation of arguments to a function, and has been shown to + augment the expressive power of a language. + + We discuss an approach to a problem that arises in this context: + futures which were thought to be relevant when they were created + become irrelevant through being ignored in the body of the + expression where they were bound. The problem of irrelevant + processes also appears in multiprocessing problem-solving systems + which start several processors working on the same problem but + with different methods, and return with the solution which + finishes first. This "parallel method strategy" has the drawback + that the processes which are investigating the losing methods must + be identified, stopped, and reassigned to more useful tasks. + + The solution we propose is that of garbage collection. We propose + that the goal structure of the solution plan be explicitly + represented in memory as part of the graph memory (like Lisp's + heap) so that a garbage collection algorithm can discover which + processes are performing useful work, and which can be recycled + for a new task. An incremental algorithm for the unified garbage + collection of storage and processes is described. + +* .. _BAKER78: + + Henry G. Baker. 1978. "`List Processing in Real Time on a Serial Computer `_". ACM. Communications of the ACM 21, 4 (April 1978), pp. 280--294. + + .. admonition:: Abstract + + A real-time list processing system is one in which the time + required by the elementary list operations (e.g. CONS, CAR, CDR, + RPLACA, RPLACD, EQ, and ATOM in LISP) is bounded by a (small) + constant. Classical implementations of list processing systems + lack this property because allocating a list cell from the heap + may cause a garbage collection, which process requires time + proportional to the heap size to finish. A real-time list + processing system is presented which continuously reclaims + garbage, including directed cycles, while linearizing and + compacting the accessible cells into contiguous locations to avoid + fragmenting the free storage pool. The program is small and + requires no time-sharing interrupts, making it suitable for + microcode. Finally, the system requires the same average time, and + not more than twice the space, of a classical implementation, and + those space requirements can be reduced to approximately classical + proportions by compact list representation. Arrays of different + sizes, a program stack, and hash linking are simple extensions to + our system, and reference counting is found to be inferior for + many applications. + +* .. _BAKER79: + + Henry G. Baker. 1979. "`Optimizing Allocation and Garbage Collection of Spaces `_". In Winston and Brown, eds. *Artificial Intelligence: An MIT Perspective.* MIT Press. + + .. admonition:: Abstract + + MACLISP, unlike some other implementations of LISP, allocates + storage for different types of objects in noncontiguous areas + called "spaces". These spaces partition the active storage into + disjoint areas, each of which holds a different type of object. + For example, "list cells" are stored in one space, "full-word + integers" reside in another space, "full-word floating point + numbers" in another, and so on. + + Allocating space in this manner has several advantages. An + object's type can easily be computed from a pointer to it, without + any memory references to the object itself. Thus, the LISP + primitive ATOM(x) can easily compute its result without even + paging in x. Another advantage is that the type of an object does + not require any storage within the object, so that arithmetic with + hardware data types such as full-word integers can use hardware + instructions directly. + + There are problems associated with this method of storage and type + management, however. When all data types are allocated from the + same heap, there is no problem with varying demand for the + different data types; all data types require storage from the same + pool, so that only the total amount of storage is important. Once + different data types must be allocated from different spaces, + however, the relative sizes of the spaces becomes important. + +* .. _BAKER91: + + Henry G. Baker. 1991. "`Cache-Conscious Copying Collectors `_". OOPSLA'91/GC'91 Workshop on Garbage Collection. + + .. admonition:: Abstract + + Garbage collectors must minimize the scarce resources of cache + space and off-chip communications bandwidth to optimize + performance on modern single-chip computer architectures. + Strategies for achieving these goals in the context of copying + garbage collection are discussed. A multi-processor + mutator/collector system is analyzed. Finally, the Intel 80860XP + architecture is studied. + +* .. _BAKER92A: + + Henry G. Baker. 1992. "`Lively Linear Lisp -- 'Look Ma, No Garbage!' `_". ACM. SIGPLAN Notices 27, 8 (August 1992), pp. 89--98. + + .. admonition:: Abstract + + Linear logic has been proposed as one solution to the problem of + garbage collection and providing efficient "update-in-place" + capabilities within a more functional language. Linear logic + conserves accessibility, and hence provides a "mechanical + metaphor" which is more appropriate for a distributed-memory + parallel processor in which copying is explicit. However, linear + logic's lack of sharing may introduce significant inefficiencies + of its own. + + We show an efficient implementation of linear logic called "Linear + Lisp" that runs within a constant factor of non-linear logic. This + Linear Lisp allows RPLACX operations, and manages storage as + safely as a non-linear Lisp, but does not need a garbage + collector. Since it offers assignments but no sharing, it occupies + a twilight zone between functional languages and imperative + languages. Our Linear Lisp Machine offers many of the same + capabilities as combinator/graph reduction machines, but without + their copying and garbage collection problems. + +* .. _BAKER92C: + + Henry G. Baker. 1992. "`The Treadmill: Real-Time Garbage Collection Without Motion Sickness `_". ACM. SIGPLAN Notices 27, 3 (March 1992), pp. 66--70. + + .. admonition:: Abstract + + A simple real-time garbage collection algorithm is presented which + does not copy, thereby avoiding some of the problems caused by the + asynchronous motion of objects. This in-place "treadmill" garbage + collection scheme has approximately the same complexity as other + non-moving garbage collectors, thus making it usable in a + high-level language implementation where some pointers cannot be + traced. The treadmill is currently being used in a Lisp system + built in Ada. + +* .. _BAKER92: + + Henry G. Baker. 1992. "`CONS Should not CONS its Arguments, or, a Lazy Alloc is a Smart Alloc `_". ACM. SIGPLAN Notices 27, 3 (March 1992), 24--34. + + .. admonition:: Abstract + + "Lazy allocation" is a model for allocating objects on the + execution stack of a high-level language which does not create + dangling references. Our model provides safe transportation into + the heap for objects that may survive the deallocation of the + surrounding stack frame. Space for objects that do not survive the + deallocation of the surrounding stack frame is reclaimed without + additional effort when the stack is popped. Lazy allocation thus + performs a first-level garbage collection, and if the language + supports garbage collection of the heap, then our model can reduce + the amortized cost of allocation in such a heap by filtering out + the short-lived objects that can be more efficiently managed in + LIFO order. A run-time mechanism called "result expectation" + further filters out unneeded results from functions called only + for their effects. In a shared-memory multi-processor environment, + this filtering reduces contention for the allocation and + management of global memory. + + Our model performs simple local operations, and is therefore + suitable for an interpreter or a hardware implementation. Its + overheads for functional data are associated only with + *assignments*, making lazy allocation attractive for "mostly + functional" programming styles. Many existing stack allocation + optimizations can be seen as instances of this generic model, in + which some portion of these local operations have been optimized + away through static analysis techniques. + + Important applications of our model include the efficient + allocation of temporary data structures that are passed as + arguments to anonymous procedures which may or may not use these + data structures in a stack-like fashion. The most important of + these objects are functional arguments (funargs), which require + some run-time allocation to preserve the local environment. Since + a funarg is sometimes returned as a first-class value, its + lifetime can survive the stack frame in which it was created. + Arguments which are evaluated in a lazy fashion (Scheme "delays" + or "suspensions") are similarly handled. Variable-length argument + "lists" themselves can be allocated in this fashion, allowing + these objects to become "first-class". Finally, lazy allocation + correctly handles the allocation of a Scheme control stack, + allowing Scheme continuations to become first-class values. + +* .. _BAKER92B: + + Henry G. Baker. 1992. "`NREVERSAL of Fortune -- The Thermodynamics of Garbage Collection `_". Springer-Verlag. LNCS Vol. 637. + + .. admonition:: Abstract + + The need to *reverse* a computation arises in many contexts -- + debugging, editor undoing, optimistic concurrency undoing, + speculative computation undoing, trace scheduling, exception + handling undoing, database recovery, optimistic discrete event + simulations, subjunctive computing, etc. The need to *analyze* a + reversed computation arises in the context of static analysis -- + liveness analysis, strictness analysis, type inference, etc. + Traditional means for restoring a computation to a previous state + involve checkpoints; checkpoints require time to copy, as well as + space to store, the copied material. Traditional reverse abstract + interpretation produces relatively poor information due to its + inability to guess the previous values of assigned-to variables. + + We propose an abstract computer model and a programming language + -- Psi-Lisp -- whose primitive operations are injective and hence + reversible, thus allowing arbitrary undoing without the overheads + of checkpointing. Such a computer can be built from reversible + conservative logic circuits, with the serendipitous advantage of + dissipating far less heat than traditional Boolean AND/OR/NOT + circuits. Unlike functional languages, which have one "state" for + all times, Psi-Lisp has at all times one "state", with unique + predecessor and successor states. + + Compiling into a reversible pseudocode can have benefits even when + targeting a traditional computer. Certain optimizations, e.g., + update-in-place, and compile-time garbage collection may be more + easily performed, because the information may be elicited without + the difficult and time-consuming iterative abstract interpretation + required for most non-reversible models. + + In a reversible machine, garbage collection for recycling storage + can always be performed by a reversed (sub)computation. While this + "collection is reversed mutation" insight does not reduce space + requirements when used for the computation as a whole, it does + save space when used to recycle at finer scales. This insight also + provides an explanation for the fundamental importance of the + push-down stack both for recognizing palindromes and for managing + storage. + + Reversible computers are related to *Prolog*, *linear logic* and + *chemical abstract machines*. + +* .. _BAKER93: + + Henry G. Baker. 1993. "`'Infant Mortality' and Generational Garbage Collection `_". ACM. SIGPLAN Notices 28, 4 (April 1993), pp. 55--57. + + .. admonition:: Abstract + + Generation-based garbage collection has been advocated by + appealing to the intuitive but vague notion that "young objects + are more likely to die than old objects". The intuition is, that + if a generation-based garbage collection scheme focuses its effort + on scanning recently created objects, then its scanning efforts + will pay off more in the form of more recovered garbage, than if + it scanned older objects. In this note, we show a counterexample + of a system in which "infant mortality" is as high as you please, + but for which generational garbage collection is ineffective for + improving the average mark/cons ratio. Other benefits, such as + better locality and a smaller number of large delays, may still + make generational garbage collection attractive for such a system, + however. + +* .. _BAKER93A: + + Henry G. Baker. 1993. "`Equal Rights for Functional Objects or, The More Things Change, The More They Are the Same `_". ACM. OOPS Messenger 4, 4 (October 1993), pp. 2--27. + + .. admonition:: Abstract + + We argue that intensional object identity in object-oriented + programming languages and databases is best defined operationally + by side-effect semantics. A corollary is that "functional" objects + have extensional semantics. This model of object identity, which + is analogous to the normal forms of relational algebra, provides + cleaner semantics for the value-transmission operations and + built-in primitive equality predicate of a programming language, + and eliminates the confusion surrounding "call-by-value" and + "call-by-reference" as well as the confusion of multiple equality + predicates. + + Implementation issues are discussed, and this model is shown to + have significant performance advantages in persistent, parallel, + distributed and multilingual processing environments. This model + also provides insight into the "type equivalence" problem of + Algol-68, Pascal and Ada. + +* .. _BAKER94: + + Henry G. Baker. 1994. "`Minimizing Reference Count Updating with Deferred and Anchored Pointers for Functional Data Structures `_". ACM. SIGPLAN Notices 29, 9 (September 1994), pp. 38--43. + + .. admonition:: Abstract + + "Reference counting" can be an attractive form of dynamic storage + management. It recovers storage promptly and (with a garbage stack + instead of a free list) it can be made "real-time" -- i.e., all + accesses can be performed in constant time. Its major drawbacks + are its inability to reclaim cycles, its count storage, and its + count update overhead. Update overhead is especially irritating + for functional (read-only) data where updates may dirty pristine + cache lines and pages. + + We show how reference count updating can be largely eliminated for + functional data structures by using the "linear style" of + programming that is inspired by Girard's linear logic, and by + distinguishing normal pointers from "anchored pointers", which + indicate not only the object itself, but also the depth of the + stack frame that anchors the object. An "anchor" for a pointer is + essentially an enclosing data structure that is temporarily locked + from being collected for the duration of the anchored pointer's + existence by a deferred reference count. An "anchored pointer" + thus implies a reference count increment that has been deferred + until it is either cancelled or performed. + + Anchored pointers are generalizations of "borrowed" pointers and + "phantom" pointers. Anchored pointers can provide a solution to + the "derived pointer problem" in garbage collection. + +* .. _BAKER94A: + + Henry G. Baker. 1994. "`Thermodynamics and Garbage Collection `_". ACM. SIGPLAN Notices 29, 4 (April 1994), pp. 58--63. + + .. admonition:: Abstract + + We discuss the principles of statistical thermodynamics and their + application to storage management problems. We point out problems + which result from imprecise usage of the terms "information", + "state", "reversible", "conservative", etc. + +* .. _BAKER95A: + + Henry G. Baker. 1995. "`'Use-Once' Variables and Linear Objects -- Storage Management, Reflection and Multi-Threading `_". ACM. SIGPLAN Notices 30, 1 (January 1995), pp. 45--52. + + .. admonition:: Abstract + + Programming languages should have 'use-once' variables in addition + to the usual 'multiple-use' variables. 'Use-once' variables are + bound to linear (unshared, unaliased, or singly-referenced) + objects. Linear objects are cheap to access and manage, because + they require no synchronization or tracing garbage collection. + Linear objects can elegantly and efficiently solve otherwise + difficult problems of functional/mostly-functional systems -- + e.g., in-place updating and the efficient initialization of + functional objects. Use-once variables are ideal for directly + manipulating resources which are inherently linear such as + freelists and 'engine ticks' in reflective languages. + + A 'use-once' variable must be dynamically referenced exactly once + within its scope. Unreferenced use-once variables must be + explicitly killed, and multiply-referenced use-once variables must + be explicitly copied; this duplication and deletion is subject to + the constraint that some linear datatypes do not support + duplication and deletion methods. Use-once variables are bound + only to linear objects, which may reference other linear or + non-linear objects. Non-linear objects can reference other + non-linear objects, but can reference a linear object only in a + way that ensures mutual exclusion. + + Although implementations have long had implicit use-once variables + and linear objects, most languages do not provide the programmer + any help for their utilization. For example, use-once variables + allow for the safe/controlled use of reified language + implementation objects like single-use continuations. + + Linear objects and use-once variables map elegantly into dataflow + models of concurrent computation, and the graphical + representations of dataflow models make an appealing visual linear + programming language. + +* .. _BAKER95: + + Henry G. Baker. 1995. *Memory Management: International Workshop IWMM'95*. Springer-Verlag. ISBN 3-540-60368-9. + + .. admonition:: From the Preface + + The International Workshop on Memory Management 1995 (IWMM'95) is + a continuation of the excellent series started by Yves Bekkers and + Jacques Cohen with IWMM'92. The present volume assembles the + refereed and invited technical papers which were presented during + this year's workshop. + +* .. _BBW97: + + Nick Barnes, Richard Brooksby, David Jones, Gavin Matthews, Pekka P. Pirinen, Nick Dalton, P. Tucker Withington. 1997. "`A Proposal for a Standard Memory Management Interface `_". OOPSLA97 Workshop on Garbage Collection and Memory Management. + + .. admonition:: From the notes + + There is no well-defined memory-management library API which would + allow programmers to easily choose the best memory management + implementation for their application. + + Some languages allow replacement of their memory management + functions, but usually only the program API is specified, hence + replacement of the entire program interface is required. + + Few languages support multiple memory management policies within a + single program. Those that do use proprietary memory management + policies. + + We believe that the design of an abstract program API is a + prerequisite to the design of a “server” API and eventually an API + that would permit multiple cooperating memory “servers”. If the + interface is simple yet powerful enough to encompass most memory + management systems, it stands a good chance of being widely + adopted. + +* .. _ZORN93B: + + David A. Barrett, Benjamin Zorn. 1993. "`Using Lifetime Predictors to Improve Memory Allocation Performance `_". ACM. SIGPLAN'93 Conference on Programming Language Design and Implementation, pp. 187--196. + + .. admonition:: Abstract + + Dynamic storage allocation is used heavily in many application + areas including interpreters, simulators, optimizers, and + translators. We describe research that can improve all aspects of + the performance of dynamic storage allocation by predicting the + lifetimes of short-lived objects when they are allocated. Using + five significant, allocation-intensive C programs, we show that a + great fraction of all bytes allocated are short-lived (> 90% in + all cases). Furthermore, we describe an algorithm for lifetime + prediction that accurately predicts the lifetimes of 42--99% of all + objects allocated. We describe and simulate a storage allocator + that takes advantage of lifetime prediction of short-lived objects + and show that it can significantly improve a program's memory + overhead and reference locality, and even, at times, improve CPU + performance as well. + +* .. _BARRETT93: + + David A. Barrett, Benjamin Zorn. 1995. "`Garbage Collection using a Dynamic Threatening Boundary `_". ACM. SIGPLAN'95 Conference on Programming Language Design and Implementation, pp. 301--314. + + .. admonition:: Abstract + + Generational techniques have been very successful in reducing the + impact of garbage collection algorithms upon the performance of + programs. However, it is impossible for designers of collection + algorithms to anticipate the memory allocation behavior of all + applications in advance. Existing generational collectors rely + upon the applications programmer to tune the behavior of the + collector to achieve maximum performance for each application. + Unfortunately, because the many tuning parameters require detailed + knowledge of both the collection algorithm and the program + allocation behavior in order to be used effectively, such tuning + is difficult and error prone. We propose a new garbage collection + algorithm that uses just two easily understood tuning parameters + that directly reflect the maximum memory and pause time + constraints familiar to application programmers and users. + + Like generational collectors, ours divides memory into two spaces, + one for short-lived, and another for long-lived objects. Unlike + previous work, our collector dynamically adjusts the boundary + between these two spaces in order to directly meet the resource + constraints specified by the user. We describe two methods for + adjusting this boundary, compare them with several existing + algorithms, and show how effectively ours meets the specified + constraints. Our pause time collector saved memory by holding + median pause times closer to the constraint than the other pause + time constrained algorithm and, when not over-constrained, our + memory constrained collector exhibited the lowest CPU overhead of + the algorithms we measured yet was capable of maintaining a + maximum memory constraint. + +* .. _BARTLETT88: + + Joel F. Bartlett. 1988. "`Compacting Garbage Collection with Ambiguous Roots `_". Digital Equipment Corporation. + + .. admonition:: Abstract + + This paper introduces a copying garbage collection algorithm which + is able to compact most of the accessible storage in the heap + without having an explicitly defined set of pointers that contain + all the roots of all accessible storage. Using "hints" found in + the processor's registers and stack, the algorithm is able to + divide heap allocated objects into two groups: those that might be + referenced by a pointer in the stack or registers, and those that + are not. The objects which might be referenced are left in place, + and the other objects are copied into a more compact + representation. + + A Lisp compiler and runtime system which uses such a collector + need not have complete control of the processor in order to force + a certain discipline on the stack and registers. A Scheme + implementation has been done for the Digital WRL Titan processor + which uses a garbage collector based on this "mostly copying" + algorithm. Like other languages for the Titan, it uses the Mahler + intermediate language as its target. This simplifies the compiler + and allows it to take advantage of the significant machine + dependent optimizations provided by Mahler. The common + intermediate language also simplifies call-outs from Scheme + programs to functions written in other languages and call-backs + from functions in other languages. + + Measurements of the Scheme implementation show that the algorithm + is efficient, as little unneeded storage is retained and only a + very small fraction of the heap is left in place. + + Simple pointer manipulation protocols also mean that compiler + support is not needed in order to correctly handle pointers. Thus + it is reasonable to provide garbage collected storage in languages + such as C. A collector written in C which uses this algorithm is + included in the Appendix. + +* .. _BARTLETT89: + + Joel F. Bartlett. 1989. "`Mostly-Copying Garbage Collection Picks Up Generations and C++ `_". Digital Equipment Corporation. + + .. admonition:: Abstract + + The "mostly-copying" garbage collection algorithm provides a way + to perform compacting garbage collection in spite of the presence + of ambiguous pointers in the root set. As originally defined, each + collection required almost all accessible objects to be moved. + While adequate for many applications, programs that retained a + large amount of storage spent a significant amount of time garbage + collecting. To improve performance of these applications, a + generational version of the algorithm has been designed. This note + reports on this extension of the algorithm, and its application in + collectors for Scheme and C++. + +* .. _BC92: + + Yves Bekkers & Jacques Cohen. 1992. "`Memory Management, International Workshop IWMM 92 `_". Springer-Verlag. LNCS Vol. 637, ISBN 3-540-55940-X. + +* .. _BB99: + + Emery D. Berger, Robert D. Blumofe. 1999. "`Hoard: A Fast, Scalable, and Memory-Efficient Allocator for Shared-Memory Multiprocessors `_". University of Texas at Austin. UTCS TR99-22. + + .. admonition:: Abstract + + In this paper, we present Hoard, a memory allocator for + shared-memory multiprocessors. We prove that its worst-case memory + fragmentation is asymptotically equivalent to that of an optimal + uniprocessor allocator. We present experiments that demonstrate + its speed and scalability. + +* .. _BERGER01: + + Emery D. Berger, Benjamin G. Zorn, Kathryn S. McKinley. 2001. "`Composing high-performance memory allocators `_" ACM SIGPLAN Conference on Programming Language Design and Implementation 2001, pp. 114--124. + + .. admonition:: Abstract + + Current general-purpose memory allocators do not provide + sufficient speed or flexibility for modern high-performance + applications. Highly-tuned general purpose allocators have + per-operation costs around one hundred cycles, while the cost of + an operation in a custom memory allocator can be just a handful of + cycles. To achieve high performance, programmers often write + custom memory allocators from scratch -- a difficult and + error-prone process. + + In this paper, we present a flexible and efficient infrastructure + for building memory allocators that is based on C++ templates and + inheritance. This novel approach allows programmers to build + custom and general-purpose allocators as “heap layers” that can be + composed without incurring any additional runtime overhead or + additional programming cost. We show that this infrastructure + simplifies allocator construction and results in allocators that + either match or improve the performance of heavily-tuned + allocators written in C, including the Kingsley allocator and the + GNU obstack library. We further show this infrastructure can be + used to rapidly build a general-purpose allocator that has + performance comparable to the Lea allocator, one of the best + uniprocessor allocators available. We thus demonstrate a clean, + easy-to-use allocator interface that seamlessly combines the power + and efficiency of any number of general and custom allocators + within a single application. + +* .. _BW88: + + Hans-J. Boehm, Mark Weiser. 1988. "`Garbage collection in an uncooperative environment `_". Software -- Practice and Experience. 18(9):807--820. + + .. admonition:: Abstract + + We describe a technique for storage allocation and garbage + collection in the absence of significant co-operation from the + code using the allocator. This limits garbage collection overhead + to the time actually required for garbage collection. In + particular, application programs that rarely or never make use of + the collector no longer encounter a substantial performance + penalty. This approach greatly simplifies the implementation of + languages supporting garbage collection. It further allows + conventional compilers to be used with a garbage collector, either + as the primary means of storage reclamation, or as a debugging + tool. + +* .. _BDS91: + + Hans-J. Boehm, Alan J. Demers, Scott Shenker. 1991. "`Mostly Parallel Garbage Collection `_". Xerox PARC. ACM PLDI 91, SIGPLAN Notices 26, 6 (June 1991), pp. 157--164. + + .. admonition:: Abstract + + We present a method for adapting garbage collectors designed to + run sequentially with the client, so that they may run + concurrently with it. We rely on virtual memory hardware to + provide information about pages that have been updated or + "dirtied" during a given period of time. This method has been used + to construct a mostly parallel trace-and-sweep collector that + exhibits very short pause times. Performance measurements are + given. + +* .. _BC92A: + + Hans-J. Boehm, David Chase. 1992. "`A Proposal for Garbage-Collector-Safe C Compilation `_". *Journal of C Language Translation.* vol. 4, 2 (December 1992), pp. 126--141. + + .. admonition:: Abstract + + Conservative garbage collectors are commonly used in combination + with conventional C programs. Empirically, this usually works + well. However, there are no guarantees that this is safe in the + presence of "improved" compiler optimization. We propose that C + compilers provide a facility to suppress optimizations that are + unsafe in the presence of conservative garbage collection. Such a + facility can be added to an existing compiler at very minimal + cost, provided the additional analysis is done in a + machine-independent source-to-source prepass. Such a prepass may + also check the source code for garbage-collector-safety. + +* .. _BOEHM93: + + Hans-J. Boehm. 1993. "`Space Efficient Conservative Garbage Collection `_". ACM, SIGPLAN. Proceedings of the ACM SIGPLAN '91 Conference on Programming Language Design and Implementation, SIGPLAN Notices 28, 6, pp 197--206. + + .. admonition:: Abstract + + We call a garbage collector conservative if it has only partial + information about the location of pointers, and is thus forced to + treat arbitrary bit patterns as though they might be pointers, in + at least some cases. We show that some very inexpensive, but + previously unused techniques can have dramatic impact on the + effectiveness of conservative garbage collectors in reclaiming + memory. Our most significant observation is that static data that + appears to point to the heap should not result in misidentified + reference to the heap. The garbage collector has enough + information to allocate around such references. We also observe + that programming style has a significantly impact on the amount of + spuriously retained storage, typically even if the collector is + not terribly conservative. Some fairly common C and C++ + programming styles significantly decrease the effectiveness of any + garbage collector. These observations suffice to explain some of + the different assessments of conservative collection that have + appeared in the literature. + +* .. _BOEHM00: + + Hans-J. Boehm. 2000. "`Reducing Garbage Collector Cache Misses `_". ACM. ISMM'00 pp. 59--64. + + .. admonition:: Abstract + + Cache misses are currently a major factor in the cost of garbage + collection, and we expect them to dominate in the future. + Traditional garbage collection algorithms exhibit relatively litle + temporal locality; each live object in the heap is likely to be + touched exactly once during each garbage collection. We measure + two techniques for dealing with this issue: prefetch-on-grey, and + lazy sweeping. The first of these is new in this context. Lazy + sweeping has been in common use for a decade. It was introduced as + a mechanism for reducing paging and pause times; we argue that it + is also crucial for eliminating cache misses during the sweep + phase. + + Our measurements are obtained in the context of a non-moving + garbage collector. Fully copying garbage collection inherently + requires more traffic through the cache, and thus probably also + stands to benefit substantially from something like the + prefetch-on-grey technique. Generational garbage collection may + reduce the benefit of these techniques for some applications, but + experiments with a non-moving generational collector suggest that + they remain quite useful. + +* .. _BOEHM02: + + Hans-J. Boehm. 2002. "`Destructors, Finalizers, and Synchronization `_". HP Labs technical report HPL-2002-335. + + .. admonition:: Abstract + + We compare two different facilities for running cleanup actions + for objects that are about to reach the end of their life. + Destructors, such as we find in C++, are invoked synchronously + when an object goes out of scope. They make it easier to implement + cleanup actions for objects of well-known lifetime, especially in + the presence of exceptions. Languages like Java, Modula-3, and C# + provide a different kind of "finalization" facility: Cleanup + methods may be run when the garbage collector discovers a heap + object to be otherwise inaccessible. Unlike C++ destructors, such + methods run in a separate thread at some much less well-defined + time. We argue that these are fundamentally different, and + potentially complementary, language facilities. We also try to + resolve some common misunderstandings about finalization in the + process. In particular: 1. The asynchronous nature of finalizers + is not just an accident of implementation or a shortcoming of + tracing collectors; it is necessary for correctness of client + code, fundamentally affects how finalizers must be written, and + how finalization facilities should be presented to the user. 2. An + object may legitimately be finalized while one of its methods are + still running. This should and can be addressed by the language + specification and client code. + +* .. _BM77: + + Robert S. Boyer and J. Strother Moore. 1977. "`A Fast String Searching Algorithm `_". *Communications of the ACM* 20(10):762--772. + + .. admonition:: Abstract + + An algorithm is presented that searches for the location, "*i*," + of the first occurrence of a character string, "*pat*," in another + string, "*string*." During the search operation, the characters of + *pat* are matched starting with the last character of *pat*. The + information gained by starting the match at the end of the pattern + often allows the algorithm to proceed in large jumps through the + text being searched. Thus the algorithm has the unusual property + that, in most cases, not all of the first *i* characters of + *string* are inspected. The number of characters actually + inspected (on the average) decreases as a function of the length + of *pat*. For a random English pattern of length 5, the algorithm + will typically inspect *i*/4 characters of string before finding a + match at *i*. Furthermore, the algorithm has been implemented so + that (on the average) fewer than *i* + *patlen* machine + instructions are executed. These conclusions are supported with + empirical evidence and a theoretical analysis of the average + behavior of the algorithm. The worst case behavior of the + algorithm is linear in *i* + *patlen*, assuming the availability + of array space for tables linear in *patlen* plus the size of the + alphabet. + +* .. _BL72: + + P. Branquart, J. Lewi. 1972. "A scheme of storage allocation and garbage collection for ALGOL 68". Elsevier/North-Holland. ALGOL 68 Implementation -- Proceedings of the IFIP Working Conference on ALGOL 68 Implementation, July 1970. + +* .. _BROOKSBY02: + + Richard Brooksby. 2002. "`The Memory Pool System: Thirty person-years of memory management development goes Open Source `_". ISMM'02. + + .. admonition:: Abstract + + The Memory Pool System (MPS) is a very general, adaptable, + flexible, reliable, and efficient memory management system. It + permits the flexible combination of memory management techniques, + supporting manual and automatic memory management, in-line + allocation, finalization, weakness, and multiple simultaneous + co-operating incremental generational garbage collections. It also + includes a library of memory pool classes implementing specialized + memory management policies. + + Between 1994 and 2001, Harlequin (now part of Global Graphics) + invested about thirty person-years of effort developing the MPS. + The system contained many innovative techniques and abstractions + which were kept secret. In 1997 Richard Brooksby, the manager and + chief architect of the project, and Nicholas Barnes, a senior + developer, left Harlequin to form their own consultancy company, + Ravenbrook, and in 2001, Ravenbrook acquired the MPS technology + from Global Graphics. We are happy to announce that we are + publishing the source code and documentation under an open source + licence. This paper gives an overview of the system. + +* .. _C1990: + + International Standard ISO/IEC 9899:1990. "Programming languages — C". + +* .. _C1999: + + International Standard ISO/IEC 9899:1999. "`Programming languages — C `_". + +* .. _CGZ94: + + Brad Calder, Dirk Grunwald, Benjamin Zorn. 1994. "`Quantifying Behavioral Differences Between C and C++ Programs `_". *Journal of Programming Languages.* 2(4):313--351. + + .. admonition:: Abstract + + Improving the performance of C programs has been a topic of great + interest for many years. Both hardware technology and compiler + optimization research has been applied in an effort to make C + programs execute faster. In many application domains, the C++ + language is replacing C as the programming language of choice. In + this paper, we measure the empirical behavior of a group of + significant C and C++ programs and attempt to identify and + quantify behavioral differences between them. Our goal is to + determine whether optimization technology that has been successful + for C programs will also be successful in C++ programs. We + furthermore identify behavioral characteristics of C++ programs + that suggest optimizations that should be applied in those + programs. Our results show that C++ programs exhibit behavior that + is significantly different than C programs. These results should + be of interest to compiler writers and architecture designers who + are designing systems to execute object-oriented programs. + +* .. _CPC00: + + Dante J. Cannarozzi, Michael P. Plezbert, Ron K. Cytron. 2000. "`Contaminated garbage collection `_". ACM. Proceedings of the ACM SIGPLAN '00 conference on on Programming language design and implementation, pp. 264--273. + + .. admonition:: Abstract + + We describe a new method for determining when an object can be + garbage collected. The method does not require marking live + objects. Instead, each object *X* is *dynamically* associated with + a stack frame *M*, such that *X* is collectable when *M* pops. + Because *X* could have been dead earlier, our method is + conservative. Our results demonstrate that the methos nonetheless + idenitifies a large percentage of collectable objects. The method + has been implemented in Sun's Java™ Virtual Machine interpreter, + and results are presented based on this implementation. + +* .. _CW86: + + Patrick J. Caudill, Allen Wirfs-Brock. 1986. "A Third-Generation Smalltalk-80 Implementation". ACM. SIGPLAN Notices. 21(11), OOPSLA'86 ACM Conference on Object-Oriented Systems, Languages and Applications. + + .. admonition:: Abstract + + A new, high performance Smalltalk-80™ implementation is described + which builds directly upon two previous implementation efforts. + This implementation supports a large object space while retaining + compatibility with previous Smalltalk-80™ images. The + implementation utilizes a interpreter which incorporates a + generation based garbage collector and which does not have an + object table. This paper describes the design decisions which lead + to this implementation and reports preliminary performance + results. + +* .. _CHENEY70: + + C. J. Cheney. 1970. "`A non-recursive list compacting algorithm `_". CACM. 13-11 pp. 677--678. + + .. admonition:: Abstract + + A simple nonrecursive list structure compacting scheme or garbage + collector suitable for both compact and LISP-like list structures + is presented. The algorithm avoids the need for recursion by using + the partial structure as it is built up to keep track of those + lists that have been copied. + +* .. _CHL98: + + Perry Cheng, Robert Harper, Peter Lee. 1998. "`Generational stack collection and profile-driven pretenuring `_". ACM. Proceedings of SIGPLAN'98 Conference on Programming Language Design and Implementation, pp. 162--173. + + .. admonition:: Abstract + + This paper presents two techniques for improving garbage + collection performance: generational stack collection and + profile-driven pretenuring. The first is applicable to stack-based + implementations of functional languages while the second is useful + for any generational collector. We have implemented both + techniques in a generational collector used by the TIL compiler, + and have observed decreases in garbage collection times of as much + as 70% and 30%, respectively. + + Functional languages encourage the use of recursion which can lead + to a long chain of activation records. When a collection occurs, + these activation records must be scanned for roots. We show that + scanning many activation records can take so long as to become the + dominant cost of garbage collection. However, most deep stacks + unwind very infrequently, so most of the root information obtained + from the stack remains unchanged across successive garbage + collections. *Generational stack collection* greatly reduces the + stack scan cost by reusing information from previous scans. + + Generational techniques have been successful in reducing the cost + of garbage collection. Various complex heap arrangements and + tenuring policies have been proposed to increase the effectiveness + of generational techniques by reducing the cost and frequency of + scanning and copying. In contrast, we show that by using profile + information to make lifetime predictions, *pretenuring* can avoid + copying data altogether. In essence, this technique uses a + refinement of the generational hypothesis (most data die young) + with a locality principle concerning the age of data: most + allocations sites produce data that immediately dies, while a few + allocation sites consistently produce data that survives many + collections. + +* .. _CL98: + + Trishul M. Chilimbi, James R. Larus. 1998. "`Using Generational Garbage Collection To Implement Cache-Conscious Data Placement `_". ACM. ISMM'98 pp. 37--48. + + .. admonition:: Abstract + + Processor and memory technology trends show a continual increase + in the cost of accessing main memory. Machine designers have tried + to mitigate the effect of this trend through a variety of + techniques that attempt to reduce or tolerate memory latency. + These techniques, unfortunately, have only been partially + successful for pointer-manipulating programs. Recent research has + demonstrated that these programs can benefit greatly from the + complementary approach of reorganizing pointer data structures to + improve cache locality. This paper describes how a generational + garbage collector can be used to achieve a cache-conscious data + layout, in which objects with high temporal affinity are placed + next to each other, so they are likely to reside in the same cache + block. The paper demonstrates the feasibility of collecting low + overhead, real-time profiling information about data access + patterns for object-oriented languages, and describes a new + copying algorithm that utilizes this information to produce a + cache-conscious object layout. Preliminary results indicate that + this technique reduces cache miss rates by 21-42\%, and improves + program performance by 14-37\%. + +* .. _CH97: + + William D Clinger & Lars T Hansen. 1997. "`Generational Garbage Collection and the Radioactive Decay Model `_". ACM. Proceedings of PLDI 1997. + + .. admonition:: Abstract + + If a fixed exponentially decreasing probability distribution + function is used to model every object's lifetime, then the age of + an object gives no information about its future life expectancy. + This *radioactive decay model* implies that there can be no + rational basis for deciding which live objects should be promoted + to another generation. Yet there remains a rational basis for + deciding how many objects to promote, when to collect garbage, and + which generations to collect. + + Analysis of the model leads to a new kind of generational garbage + collector whose effectiveness does not depend upon heuristics that + predict which objects will live longer than others. + + This result provides insight into the computational advantages of + generational garbage collection, with implications for the + management of objects whose life expectancies are difficult to + predict. + +* .. _COHEN81: + + Jacques Cohen. 1981. "Garbage collection of linked data structures". Computing Surveys. Vol. 13, no. 3. + + .. admonition:: Abstract + + A concise and unified view of the numerous existing algorithms for + performing garbage collection of linked data structures is + presented. The emphasis is on garbage collection proper, rather + than on storage allocation. + + First, the classical garbage collection algorithms and their + marking and collecting phases, with and without compacting, are + discussed. + + Algorithms describing these phases are classified according to the + type of cells to be collected: those for collecting single-sized + cells are simpler than those for varisized cells. Recently + proposed algorithms are presented and compared with the classical + ones. Special topics in garbage collection are also covered. A + bibliography with topical annotations is included. + +* .. _CCZ98: + + Dominique Colnet, Philippe Coucaud, Olivier Zendra. 1998. "`Compiler Support to Customize the Mark and Sweep Algorithm `_". ACM. ISMM'98 pp. 154--165. + + .. admonition:: Abstract + + Mark and sweep garbage collectors (GC) are classical but still + very efficient automatic memory management systems. Although + challenged by other kinds of systems, such as copying collectors, + mark and sweep collectors remain among the best in terms of + performance. + + This paper describes our implementation of an efficient mark and + sweep garbage collector tailored to each program. Compiler support + provides the type information required to statically and + automatically generate this customized garbage collector. The + segregation of object by type allows the production of a more + efficient GC code. This technique, implemented in SmallEiffel, our + compiler for the object-oriented language Eiffel, is applicable to + other languages and other garbage collection algorithms, be they + distributed or not. + + We present the results obtained on programs featuring a variety of + programming styles and compare our results to a well-known and + high-quality garbage collector. + +* .. _CWZ93: + + Jonathan E. Cook, Alexander L. Wolf, Benjamin Zorn. 1994. "`Partition Selection Policies in Object Database Garbage Collection `_". ACM. SIGMOD. International Conference on the Management of Data (SIGMOD'94), pp. 371--382. + + .. admonition:: Abstract + + The automatic reclamation of storage for unreferenced objects is + very important in object databases. Existing language system + algorithms for automatic storage reclamation have been shown to be + inappropriate. In this paper, we investigate methods to improve + the performance of algorithms for automatic storage reclamation of + object databases. These algorithms are based on a technique called + partitioned garbage collection, in which a subset of the entire + database is collected independently of the rest. Specifically, we + investigate the policy that is used to select what partition in + the database should be collected. The new partition selection + policies that we propose and investigate are based on the + intuition that the values of overwritten pointers provide good + hints about where to find garbage. Using trace-driven simulation, + we show that one of our policies requires less I/O to collect more + garbage than any existing implementable policy and performs close + to an impractical-to-implement but near-optimal policy over a wide + range of database sizes and connectivities. + +* .. _CKWZ96: + + Jonathan E. Cook, Artur Klauser, Alexander L. Wolf, Benjamin Zorn. 1996. "`Semi-automatic, Self-adaptive Control of Garbage Collection Rates in Object Databases `_". ACM, SIGMOD. International Conference on the Management of Data (SIGMOD'96), pp. 377--388. + + .. admonition:: Abstract + + A fundamental problem in automating object database storage + reclamation is determining how often to perform garbage + collection. We show that the choice of collection rate can have a + significant impact on application performance and that the "best" + rate depends on the dynamic behavior of the application, tempered + by the particular performance goals of the user. We describe two + semi-automatic, self-adaptive policies for controlling collection + rate that we have developed to address the problem. Using + trace-driven simulations, we evaluate the performance of the + policies on a test database application that demonstrates two + distinct reclustering behaviors. Our results show that the + policies are effective at achieving user-specified levels of I/O + operations and database garbage percentage. We also investigate + the sensitivity of the policies over a range of object + connectivities. The evaluation demonstrates that semi-automatic, + self-adaptive policies are a practical means for flexibly + controlling garbage collection rate. + +* .. _CNS92: + + Eric Cooper, Scott Nettles, Indira Subramanian. 1992. "Improving the Performance of SML Garbage Collection using Application-Specific Virtual Memory Management". ACM Conference on LISP and Functional Programming, pp. 43--52. + + .. admonition:: Abstract + + We improved the performance of garbage collection in the Standard ML of + New Jersey system by using the virtual memory facilities provided by + the Mach kernel. We took advantage of Mach's support for large sparse + address spaces and user-defined paging servers. We decreased the + elapsed time for realistic applications by as much as a factor of 4. + +* .. _DACONTA93: + + Michael C. Daconta. 1993. *C Pointers and Dynamic Memory Management.* Wiley. ISBN 0-471-56152-5. + +* .. _DACONTA95: + + Michael C. Daconta. 1995. *C++ Pointers and Dynamic Memory Management.* Wiley. ISBN 0-471-04998-0. + + .. admonition:: From the back cover + + Using techniques developed in the classroom at America Online's + Programmer's University, Michael Daconta deftly pilots programmers + through the intricacies of the two most difficult aspects of C++ + programming: pointers and dynamic memory management. Written by a + programmer for programmers, this no-nonsense, nuts-and-bolts guide + shows you how to fully exploit advanced C++ programming features, + such as creating class-specific allocators, understanding + references versus pointers, manipulating multidimensional arrays + with pointers, and how pointers and dynamic memory are the core of + object-oriented constructs like inheritance, name-mangling, and + virtual functions. + +* .. _DAHL63: + + O.-J. Dahl. 1963. "The SIMULA Storage Allocation Scheme". Norsk Regnesentral. NCC Document no. 162. + +* .. _DENNING68: + + P. J. Denning. 1968. "`Thrashing: Its Causes and Prevention `_". Proceedings AFIPS,1968 Fall Joint Computer Conference, vol. 33, pp. 915--922. + + .. admonition:: From the introduction + + A particularly troublesome phenomenon, thrashing, may seriously + interfere with the performance of paged memory systems, reducing + computing giants (Multics, IBM System 360, and others not + necessarily excepted) to computing dwarfs. The term thrashing + denotes excessive overhead and severe performance degradation or + collapse caused by too much paging. Thrashing inevitably turns a + shortage of memory space into a surplus of processor time. + +* .. _DENNING70: + + P. J. Denning. 1970. "`Virtual Memory `_". ACM. ACM Computing Surveys, vol. 2, no. 3, pp. 153--190, Sept. 1970. + + .. admonition:: Abstract + + The need for automatic storage allocation arises from desires for + program modularity, machine independence, and resource sharing. + Virtual memory is an elegant way of achieving these objectives. In + a virtual memory, the addresses a program may use to identify + information are distinguished from the addresses the memory system + uses to identify physical storage sites, and program-generated + addresses are translated automatically to the corresponding + machine addresses. Two principal methods for implementing virtual + memory, segmentation and paging, are compared and contrasted. Many + contemporary implementations have experienced one or more of these + problems: poor utilization of storage, thrashing, and high costs + associated with loading information into memory. These and + subsidiary problems are studied from a theoretic view, and are + shown to be controllable by a proper combination of hardware and + memory management policies. + +* .. _DS72: + + P. J. Denning, S. C. Schwartz. 1972. "`Properties of the Working-set Model `_". CACM. vol. 15, no. 3, pp. 191--198. + + .. admonition:: Abstract + + A program's working set *W*\ (*t*, *T*) at time *t* is the set of + distinct pages among the *T* most recently referenced pages. + Relations between the average working-set size, the missing-page + rate, and the interreference-interval distribution may be derived + both from time-average definitions and from ensemble-average + (statistical) definitions. An efficient algorithm for estimating + these quantities is given. The relation to LRU (least recently + used) paging is characterized. The independent-reference model, in + which page references are statistically independent, is used to + assess the effects of interpage dependencies on working-set size + observations. Under general assumptions, working-set size is shown + to be normally distributed. + +* .. _DETLEFS92: + + David L. Detlefs. 1992. "`Garbage collection and runtime typing as a C++ library `_". USENIX C++ Conference. + + .. admonition:: From the introduction + + Automatic storage management, or *garbage collection*, is a + feature that can ease program development and enhance program + reliability. Many high-level languages other than C++ provide + garbage collection. This paper proposes the use of "smart pointer" + template classes as an interface for the use of garbage collection + in C++. Template classes and operator overloading are techniques + allowing language extension at the level of user code; I claim + that using these techniques to create smart pointer classes + provdes a syntax for manipulating garbage-collected storage safely + and conveniently. Further, the use of a smart-pointer template + class offers the possibility of implementing the collector at the + user-level, without requiring support from the compiler. If such a + compiler-independent implementation is possible with adequate + performance, then programmers can start to write code using + garbage collection without waiting for language and compiler + modifications. If the use of such a garbage collection interface + becomes widespread, then C++ compilation systems can be built to + specially support tht garbage collection interface, thereby + allowing the use of collection algorithms with enhanced + performance. + +* .. _ZORN93: + + David L. Detlefs, Al Dosser, Benjamin Zorn. 1994. "`Memory Allocation Costs in Large C and C++ Programs `_". Software -- Practice and Experience. 24(6):527--542. + + .. admonition:: Abstract + + Dynamic storage allocation is an important part of a large class + of computer programs written in C and C++. High-performance + algorithms for dynamic storage allocation have been, and will + continue to be, of considerable interest. This paper presents + detailed measurements of the cost of dynamic storage allocation in + 11 diverse C and C++ programs using five very different dynamic + storage allocation implementations, including a conservative + garbage collection algorithm. Four of the allocator + implementations measured are publicly-available on the Internet. A + number of the programs used in these measurements are also + available on the Internet to facilitate further research in + dynamic storage allocation. Finally, the data presented in this + paper is an abbreviated version of more extensive statistics that + are also publicly-available on the Internet. + +* .. _DB76: + + L. Peter Deutsch, Daniel G. Bobrow. 1976. "`An Efficient, Incremental, Automatic Garbage Collector `_". CACM. vol. 19, no. 9, pp. 522--526. + + .. admonition:: Abstract + + This paper describes a new way of solving the storage reclamation + problem for a system such as Lisp that allocates storage + automatically from a heap, and does not require the programmer to + give any indication that particular items are no longer useful or + accessible. A reference count scheme for reclaiming + non-self-referential structures, and a linearizing, compacting, + copying scheme to reorganize all storage at the users discretion + are proposed. The algorithms are designed to work well in systems + which use multiple levels of storage, and large virtual address + space. They depend on the fact that most cells are referenced + exactly once, and that reference counts need only be accurate when + storage is about to be reclaimed. A transaction file stores + changes to reference counts, and a multiple reference table stores + the count for items which are referenced more than once. + +* .. _DLMSS76: + + E. W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, E. F. M. Steffens. 1976. "`On-the-fly Garbage Collection: An Exercise in Cooperation `_". Springer-Verlag. Lecture Notes in Computer Science, Vol. 46. + + .. admonition:: Abstract + + As an example of cooperation between sequential processes with + very little mutual interference despite frequent manipulations of + a large shared data space, a technique is developed which allows + nearly all of the activity needed for garbage detection and + collection to be performed by an additional processor operating + con- currently with the processor devoted to the computation + proper. Exclusion and synchronization constraints have been kept + as weak as could be achieved; the severe complexities engendered + by doing so are illustrated. + +* .. _DMH92: + + Amer Diwan, Richard L. Hudson, J. Eliot B. Moss. 1992. "`Compiler Support for Garbage Collection in a Statically Typed Language `_". ACM. Proceedings of the 5th ACM SIGPLAN conference on Programming language design and implementation, pp. 273--282. + + .. admonition:: Abstract + + We consider the problem of supporting compacting garbage + collection in the presence of modern compiler optimizations. Since + our collector may move any heap object, it must accurately locate, + follow, and update all pointers and values derived from pointers. + To assist the collector, we extend the compiler to emit tables + describing live pointers, and values derived from pointers, at + each program location where collection may occur. Significant + results include identification of a number of problems posed by + optimizations, solutions to those problems, a working compiler, + and experimental data concerning table sizes, table compression, + and time overhead of decoding tables during collection. While gc + support can affect the code produced, our sample programs show no + significant changes, the table sizes are a modest fraction of the + size of the optimized code, and stack tracing is a small fraction + of total gc time. Since the compiler enhancements are also modest, + we conclude that the approach is practical. + +* .. _DTM93: + + Amer Diwan, David Tarditi, J. Eliot B. Moss. 1993. "`Memory Subsystem Performance of Programs with Intensive Heap Allocation `_". Carnegie Mellon University. CMU-CS-93-227. + + .. admonition:: Abstract + + Heap allocation with copying garbage collection is a general + storage management technique for modern programming languages. It + is believed to have poor memory subsystem performance. To + investigate this, we conducted an in-depth study of the memory + subsystem performance of heap allocation for memory subsystems + found on many machines. We studied the performance of + mostly-functional Standard ML programs which made heavy use of + heap allocation. We found that most machines support heap + allocation poorly. However, with the appropriate memory subsystem + organization, heap allocation can have good performance. The + memory subsystem property crucial for achieving good performance + was the ability to allocate and initialize a new object into the + cache without a penalty. This can be achieved by having subblock + placement with a subblock size of one word with a write allocate + policy, along with fast page-mode writes or a write buffer. For + caches with subblock placement, the data cache overhead was under + 9% for a 64k or larger data cache; without subblock placement the + overhead was often higher than 50%. + +* .. _DTM93A: + + Amer Diwan, David Tarditi, J. Eliot B. Moss. 1994. "`Memory Subsystem Performance of Programs Using Copying Garbage Collection `_". ACM. CMU-CS-93-210, also in POPL '94. + + .. admonition:: Abstract + + Heap allocation with copying garbage collection is believed to + have poor memory subsystem performance. We conducted a study of + the memory subsystem performance of heap allocation for memory + subsystems found on many machines. We found that many machines + support heap allocation poorly. However, with the appropriate + memory subsystem organization, heap allocation can have good + memory subsystem performance. + +* .. _DOLIGEZ93: + + Damien Doligez & Xavier Leroy. 1993. "`A concurrent, generational garbage collector for a multithreaded implementation of ML `_". ACM. POPL '93, 113--123. + + .. admonition:: Abstract + + This paper presents the design and implementation of a "quasi + real-time" garbage collector for Concurrent Caml Light, an + implementation of ML with threads. This two-generation system + combines a fast, asynchronous copying collector on the young + generation with a non-disruptive concurrent marking collector on + the old generation. This design crucially relies on the ML + compile-time distinction between mutable and immutable objects. + +* .. _DOLIGEZ94: + + Damien Doligez & Georges Gonthier. 1994. "`Portable, unobtrusive garbage collection for multiprocessor systems `_". ACM. POPL '94, 70--83. + + .. admonition:: Abstract + + We describe and prove the correctness of a new concurrent + mark-and-sweep garbage collection algorithm. This algorithm + derives from the classical on-the-fly algorithm from Dijkstra et + al. A distinguishing feature of our algorithm is that it supports + multiprocessor environments where the registers of running + processes are not readily accessible, without imposing any + overhead on the elementary operations of loading a register or + reading or initializing a field. Furthermore our collector never + blocks running mutator processes except possibly on requests for + free memory; in particular, updating a field or creating or + marking or sweeping a heap object does not involve + system-dependent synchronization primitives such as locks. We also + provide support for process creation and deletion, and for + managing an extensible heap of variable-sized objects. + +* .. _DBE93: + + R. Kent Dybvig, Carl Bruggeman, David Eby. 1993. "`Guardians in a Generation-Based Garbage Collector `_". SIGPLAN. Proceedings of the ACM SIGPLAN '93 Conference on Programming Language Design and Implementation, June 1993. + + .. admonition:: Abstract + + This paper describes a new language feature that allows + dynamically allocated objects to be saved from deallocation by an + automatic storage management system so that clean-up or other + actions can be performed using the data stored within the objects. + The program has full control over the timing of clean-up actions, + which eliminates several potential problems and often eliminates + the need for critical sections in code that interacts with + clean-up actions. Our implementation is "generation-friendly" in + the sense that the additional overhead within the mutator is + proportional to the number of clean-up actions actually performed. + +* .. _EDELSON92A: + + Daniel R. Edelson. 1992. "`Smart pointers: They're smart, but they're not pointers `_". USENIX C++ Conference. + + .. admonition:: From the introduction + + This paper shows hhow the behaviour of smart pointers diverges + from that of pointers in certain common C++ constructs. Given + this, we conclude that the C++ programming language does not + support seamless smart pointers: smart pointers cannot + transparently replace raw pointers in all ways except declaration + syntax. We show that this conclusion also applies to *accessors*. + +* .. _EDELSON92: + + Daniel R. Edelson. 1992. "`Comparing Two Garbage Collectors for C++ `_". University of California at Santa Cruz. Technical Report UCSC-CRL-93-20. + + .. admonition:: Abstract + + Our research is concerned with compiler- independent, tag-free + garbage collection for the C++ programming language. This paper + presents a mark-and-sweep collector, and explains how it + ameliorates shortcomings of a previous copy collector. The new + collector, like the old, uses C++'s facilities for creating + abstract data types to define a *tracked reference* type, called + *roots*, at the level of the application program. A programmer + wishing to utilize the garbage collection service uses these roots + in place of normal, raw pointers. We present a detailed study of + the cost of using roots, as compared to both normal pointers and + reference counted pointers, in terms of instruction counts. We + examine the efficiency of a small C++ application using roots, + reference counting, manual reclamation, and conservative + collection. Coding the application to use garbage collection, and + analyzing the resulting efficiency, helped us identify a number of + memory leaks and inefficiencies in the original, manually + reclaimed version. We find that for this program, garbage + collection using roots is much more efficient than reference + counting, though less efficient than manual reclamation. It is + hard to directly compare our collector to the conservative + collector because of the differing efficiencies of their + respective memory allocators. + +* .. _EDWARDS: + + Daniel J. Edwards. n.d. "`Lisp II Garbage Collector `_". MIT. AI Memo 19 (AIM-19). + + .. admonition:: Our summary + + (This short memo doesn't have an abstract. Basically, it describes + the plan for the LISP II Relocating Garbage Collector. It has four + phases: marking, collection, relocation and moving. Marking is by + recursive descent using a bit table. The remaining phases are + linear sweeps through the bit table. The collection phase + calculates how much everything needs to move, storing this + information in the free blocks. The relocation phase updates all + relocatable addresses. The moving phase moves the surviving + objects into one contiguous block.) + +* .. _ELLIS93: + + John R. Ellis, David L. Detlefs. 1993. "`Safe, Efficient Garbage Collection for C++ `_". Xerox PARC. + + .. admonition:: Abstract + + We propose adding safe, efficient garbage collection to C++, + eliminating the possibility of storage-management bugs and making + the design of complex, object-oriented systems much easier. This + can be accomplished with almost no change to the language itself + and only small changes to existing implementations, while + retaining compatibility with existing class libraries. + +* .. _FERREIRA96: + + Paulo Ferreira. 1996. "`Larchant: garbage collection in a cached distributed shared store with persistence by reachability `_". Université Paris VI. Thése de doctorat. + + .. admonition:: Abstract + + The model of Larchant is that of a *Shared Address Space* + (spanning every site in a network including secondary storage) + with *Persistence By Reachability*. To provide the illusion of a + shared address space across the network, despite the fact that + site memories are disjoint, Larchant implements a *distributed + shared memory* mechanism. Reachability is accessed by tracing the + pointer graph, starting from the persistent root, and reclaiming + unreachable objects. This is the task of *Garbage Collection* + (GC). + + GC was until recently thought to be intractable in a large-scale + system, due to problems of scale, incoherence, asynchrony, and + performance. This thesis presents the solutions that Larchant + proposes to these problems. + + The GC algorithm in Larchant combines tracing and + reference-listing. It traces whenever economically feasible, i.e., + as long as the memory subset being collected remains local to a + site, and counts references that would cost I/O traffic to trace. + GC is orthogonal to coherence, i.e., makes progress even if only + incoherent replicas are locally available. The garbage collector + runs concurrently and asynchronously to applications. The + reference-listing boundary changes dynamically and seamlessly, and + independently at each site, in order to collect cycles of + unreachable objects. + + We prove formally that our GC algorithm is correct, i.e., it is + safe and live. The performance results from our Larchant prototype + show that our design goals (scalability, coherence orthogonality, + and good performance) are fulfilled. + +* .. _FS98: + + Paulo Ferreira & Marc Shapiro. 1998. "`Modelling a Distributed Cached Store for Garbage Collection `_". Springer-Verlag. Proceedings of 12th European Conference on Object-Oriented Programming, ECOOP98, LNCS 1445. + + .. admonition:: Abstract + + Caching and persistence support efficient, convenient and + transparent distributed data sharing. The most natural model of + persistence is persistence by reachability, managed automatically + by a garbage collector (GC). We propose a very general model of + such a system (based on distributed shared memory) and a scalable, + asynchronous distributed GC algorithm. Within this model, we show + sufficient and widely applicable correctness conditions for the + interactions between applications, store, memory, coherence, and + GC. + + The GC runs as a set of processes (local to each participating + machine) communicating by asynchronous messages. Collection does + not interfere with applications by setting locks, polluting + caches, or causing I/O; this requirement raised some novel and + interesting challenges which we address in this article. The + algorithm is safe and live; it is not complete, i.e. it collects + some distributed cycles of garbage but not necessarily all. + +* .. _FW76: + + Daniel P Friedman, David S. Wise. 1976. "`Garbage collecting a heap which includes a scatter table `_". *Information Processing Letters.* 5, 6 (December 1976): 161--164. + + .. admonition:: Abstract + + A new algorithm is introduced for garbage collecting a heap which + contains shared data structures accessed from a scatter table. The + scheme provides for the purging of useless entries from the + scatter table with no traverslas beyond the two required by + classic collection schemes. For languages which use scatter tables + to sustain unique existence of complex structures, like natural + variables of SNOBOL, it indirectly allows liberal use of a single + scatter table by ensuring efficient deletion of useless entries. + Since the scatter table is completely restructured during the + course of execution, the hashing scheme itself is easily altered + during garbage collection whenever skewed loading of the scatter + table warrants abandonment of the old hashing. This procedure is + applicable to the maintenance of dynamic structures such as those + in information retrieval schemes or in languages like LISP and + SNOBOL. + +* .. _FW77: + + Daniel P Friedman, David S. Wise. 1977. "`The One Bit Reference Count `_". *BIT.* (17)3: 351--359. + + .. admonition:: Abstract + + Deutsch and Bobrow propose a storage reclamation scheme for a heap + which is a hybrid of garbage collection and reference counting. + The point of the hybrid scheme is to keep track of very low + reference counts between necessary invocation of garbage + collection so that nodes which are allocated and rather quickly + abandoned can be returned to available space, delaying necessity + for garbage collection. We show how such a scheme may be + implemented using the mark bit already required in every node by + the garbage collector. Between garbage collections that bit is + used to distinguish nodes with a reference count known to be one. + A significant feature of our scheme is a small cache of references + to nodes whose implemented counts "ought to be higher" which + prevents the loss of logical count information in simple + manipulations of uniquely referenced structures. + +* .. _FW79: + + Daniel P Friedman, David S. Wise. 1979. "`Reference counting can manage the circular environments of mutual recursion `_". *Information Processing Letters.* 8, 1 (January 1979): 41--45. + + .. admonition:: From the introduction + + In this note we advance reference counting as a storage management + technique viable for implementing recursive languages like ISWIM + or pure LISP with the ``labels`` construct for implementing mutual + recursion from SCHEME. ``Labels`` is derived from ``letrec`` and + displaces the ``label`` operator, a version of the paradoxical + Y-combinator. The key observation is that the requisite circular + structure (which ordinarily cripples reference counts) occurs only + within the language--rather than the user--structure, and that the + references into this structure are well-controlled. + +* .. _GZH93: + + Dirk Grunwald, Benjamin Zorn, R. Henderson. 1993. "`Improving the Cache Locality of Memory Allocation `_". SIGPLAN. SIGPLAN '93, Conference on PLDI, June 1993, Albuquerque, New Mexico. + + .. admonition:: Abstract + + The allocation and disposal of memory is a ubiquitous operation in + most programs. Rarely do programmers concern themselves with + details of memory allocators; most assume that memory allocators + provided by the system perform well. This paper presents a + performance evaluation of the reference locality of dynamic + storage allocation algorithms based on trace-driven simulation of + five large allocation-intensive C programs. In this paper, we show + how the design of a memory allocator can significantly affect the + reference locality for various applications. Our measurements show + that poor locality in sequential-fit algorithms reduces program + performance, both by increasing paging and cache miss rates. While + increased paging can be debilitating on any architecture, cache + misses rates are also important for modern computer architectures. + We show that algorithms attempting to be space-efficient, by + coalescing adjacent free objects show poor reference locality, + possibly negating the benefits of space efficiency. At the other + extreme, algorithms can expend considerable effort to increase + reference locality yet gain little in total execution performance. + Our measurements suggest an allocator design that is both very + fast and has good locality of reference. + +* .. _GRUN92: + + Dirk Grunwald & Benjamin Zorn. 1993. "`CustoMalloc: Efficient Synthesized Memory Allocators `_". Software -- Practice and Experience. 23(8):851--869. + + .. admonition:: Abstract + + The allocation and disposal of memory is a ubiquitous operation in + most programs. Rarely do programmers concern themselves with + details of memory allocators; most assume that memory allocators + provided by the system perform well. Yet, in some applications, + programmers use domain-specific knowledge in an attempt to improve + the speed or memory utilization of memory allocators. In this + paper, we describe a program (CustoMalloc) that synthesizes a + memory allocator customized for a specific application. Our + experiments show that the synthesized allocators are uniformly + faster than the common binary-buddy (BSD) allocator, and are more + space efficient. Constructing a custom allocator requires little + programmer effort. The process can usually be accomplished in a + few minutes, and yields results superior even to domain-specific + allocators designed by programmers. Our measurements show the + synthesized allocators are from two to ten times faster than + widely used allocators. + +* .. _GUDEMAN93: + + David Gudeman. 1993. "`Representing Type Information in Dynamically Typed Languages `_". University of Arizona at Tucson. Technical Report TR 93-27. + + .. admonition:: Abstract + + This report is a discussion of various techniques for representing + type information in dynamically typed languages, as implemented on + general-purpose machines (and costs are discussed in terms of + modern RISC machines). It is intended to make readily available a + large body of knowledge that currently has to be absorbed + piecemeal from the literature or re-invented by each language + implementor. This discussion covers not only tagging schemes but + other forms of representation as well, although the discussion is + strictly limited to the representation of type information. It + should also be noted that this report does not purport to contain + a survey of the relevant literature. Instead, this report gathers + together a body of folklore, organizes it into a logical + structure, makes some generalizations, and then discusses the + results in terms of modern hardware. + +* .. _HARRIS99: + + Timothy Harris. 1999. "`Early storage reclamation in a tracing garbage collector `_". ACM. ACM SIG-PLAN Notices 34:4, pp. 46--53. + + .. admonition:: Abstract + + This article presents a technique for allowing the early recovery + of storage space occupied by garbage data. The idea is similar to + that of generational garbage collection, except that the heap is + partitioned based on a static analysis of data type definitions + rather than on the approximate age of allocated objects. A + prototype implementation is presented, along with initial results + and ideas for future work. + +* .. _HENRIK94: + + Roger Henriksson. 1994. "Scheduling Real Time Garbage Collection". Department of Computer Science at Lund University. LU-CS-TR:94-129. + + .. admonition:: Abstract + + This paper presents a new model for scheduling the work of an + incremental garbage collector in a system with hard real time + requirements. The method utilizes the fact that just some of the + processes in the system have to meet hard real time requirements + and that these processes typically run periodically, a fact that + we can make use of when scheduling the garbage collection. The + work of the collector is scheduled to be performed in the pauses + between the critical processes and is suspended when the processes + with hard real time requirements run. It is shown that this + approach is feasible for many real time systems and that it leaves + the time-critical parts of the system undisturbed from garbage + collection induced delays. + +* .. _HENRIK96: + + Roger Henriksson. 1996. "`Adaptive Scheduling of Incremental Copying Garbage Collection for Interactive Applications `_". NWPER96. + + .. admonition:: Abstract + + Incremental algorithms are often used to interleave the work of a + garbage collector with the execution of an application program, + the intention being to avoid long pauses. However, overestimating + the worst-case storage needs of the program often causes all the + garbage collection work to be performed in the beginning of the + garbage collection cycles, slowing down the application program to + an unwanted degree. This paper explores an approach to + distributing the work more evenly over the garbage collection + cycle. + +* .. _HENRIKSSON98: + + Roger Henriksson. 1998. "`Scheduling Garbage Collection in Embedded Systems `_". Department of Computer Science at Lund University. Ph.D. thesis. + + .. admonition:: Abstract + + The complexity of systems for automatic control and other + safety-critical applications grows rapidly. Computer software + represents an increasing part of the complexity. As larger systems + are developed, we need to find scalable techniques to manage the + complexity in order to guarantee high product quality. Memory + management is a key quality factor for these systems. Automatic + memory management, or garbage collection, is a technique that + significantly reduces the complex problem of correct memory + management. The risk of software errors decreases and development + time is reduced. + + Garbage collection techniques suitable for interactive and soft + real-time systems exist, but few approaches are suitable for + systems with hard real-time requirements, such as control systems + (embedded systems). One part of the problem is solved by + incremental garbage collection algorithms, which have been + presented before. We focus on the scheduling problem which forms + the second part of the problem, i.e. how the work of a garbage + collector should be scheduled in order to disturb the application + program as little as possible. It is studied how a priori + scheduling analysis of systems with automatic memory management + can be made. The field of garbage collection research is thus + joined with the field of scheduling analysis in order to produce a + practical synthesis of the two fields. + + A scheduling strategy is presented that employs the properties of + control systems to ensure that no garbage collection work is + performed during the execution of critical processes. The hard + real-time part of the system is thus never disturbed by garbage + collection work. Existing incremental garbage collection + algorithms are adapted to the presented strategy. Necessary + modifications of the algorithms and the real-time kernel are + discussed. A standard scheduling analysis technique, rate + monotonic analysis, is extended in order to make a priori analysis + of the schedulability of the garbage collector possible. + + The scheduling algorithm has been implemented in an industrially + relevant real-time environment in order to show that the strategy + is feasible in practice. The experimental evaluation shows that + predictable behaviour and sub-millisecond worst-case delays can be + achieved on standard hardware even by a non-optimized prototype + garbage collector. + +* .. _HOSKING91: + + Antony L. Hosking. 1991. "`Main memory management for persistence `_". ACM. Proceedings of the ACM OOPSLA'91 Workshop on Garbage Collection. + + .. admonition:: Abstract + + Reachability-based persistence imposes new requirements for main + memory management in general, and garbage collection in + particular. After a brief introduction to the characteristics and + requirements of reachability-based persistence, we present the + design of a run-time storage manager for Persistent Smalltalk and + Persistent Modula-3, which allows the reclamation of storage from + both temporary objects and buffered persistent objects. + +* .. _HMS92: + + Antony L. Hosking, J. Eliot B. Moss, Darko Stefanovic. 1992. "`A comparative performance evaluation of write barrier implementations `_". ACM. OOPSLA'92 Conference Proceedings, ACM SIGPLAN Notices 27(10), pp 92--109. + + .. admonition:: Abstract + + Generational garbage collectors are able to achieve very small + pause times by concentrating on the youngest (most recently + allocated) objects when collecting, since objects have been + observed to die young in many systems. Generational collectors + must keep track of all pointers from older to younger generations, + by “monitoring” all stores into the heap. This *write barrier* has + been implemented in a number of ways, varying essentially in the + granularity of the information observed and stored. Here we + examine a range of write barrier implementations and evaluate + their relative performance within a generation scavenging garbage + collector for Smalltalk. + +* .. _HH93: + + Antony L. Hosking, Richard L. Hudson. 1993. "`Remembered sets can also play cards `_". ACM. Proceedings of the ACM OOPSLA'93 Workshop on Memory Management and Garbage Collection. + + .. admonition:: Abstract + + Remembered sets and dirty bits have been proposed as alternative + implementations of the write barrier for garbage collection. There + are advantages to both approaches. Dirty bits can be efficiently + maintained with minimal, bounded overhead per store operation, + while remembered sets concisely, and accurately record the + necessary information. Here we present evidence to show that + hybrids can combine the virtues of both schemes and offer + competitive performance. Moreover, we argue that a hybrid can + better avoid the devils that are the downfall of the separate + alternatives. + +* .. _HM93: + + Antony L. Hosking, J. Eliot B. Moss. 1993. "`Protection traps and alternatives for memory management of an object-oriented language `_". ACM. Proceedings of the Fourteenth ACM Symposium on Operating Systems Principles, ACM Operating Systems Review 27(5), pp 106--119. + + .. admonition:: Abstract + + Many operating systems allow user programs to specify the + protection level (inaccessible, read-only, read-write) of pages in + their virtual memory address space, and to handle any protection + violations that may occur. Such page-protection techniques have + been exploited by several user-level algorithms for applications + including generational garbage collection and persistent stores. + Unfortunately, modern hardware has made efficient handling of page + protection faults more difficult. Moreover, page-sized granularity + may not match the natural granularity of a given application. In + light of these problems, we reevaluate the usefulness of + page-protection primitives in such applications, by comparing the + performance of implementations that make use of the primitives + with others that do not. Our results show that for certain + applications software solutions outperform solutions that rely on + page-protection or other related virtual memory primitives. + +* .. _HMDW91: + + Richard L. Hudson, J. Eliot B. Moss, Amer Diwan, Christopher F. Weight. 1991. "`A Language-Independent Garbage Collector Toolkit `_". University of Massachusetts at Amherst. COINS Technical Report 91--47. + + .. admonition:: Abstract + + We describe a memory management toolkit for language implementors. + It offers efficient and flexible generation scavenging garbage + collection. In addition to providing a core of + language-independent algorithms and data structures, the toolkit + includes auxiliary components that ease implementation of garbage + collection for programming languages. We have detailed designs for + Smalltalk and Modula-3 and are confident the toolkit can be used + with a wide variety of languages. The toolkit approach is itself + novel, and our design includes a number of additional innovations + in flexibility, efficiency, accuracy, and cooperation between the + compiler and the collector. + +* .. _HM92: + + Richard L. Hudson, J. Eliot B. Moss. 1992. "`Incremental Collection of Mature Objects `_". Springer-Verlag. LNCS #637 International Workshop on Memory Management, St. Malo, France, Sept. 1992, pp. 388--403. + + .. admonition:: Abstract + + We present a garbage collection algorithm that extends + generational scavenging to collect large older generations (mature + objects) non-disruptively. The algorithm's approach is to process + bounded-size pieces of mature object space at each collection; the + subtleties lie in guaranteeing that it eventually collects any and + all garbage. The algorithm does not assume any special hardware or + operating system support, e.g., for forwarding pointers or + protection traps. The algorithm copies objects, so it naturally + supports compaction and reclustering. + +* .. _HMMM97: + + Richard L. Hudson, Ron Morrison, J. Eliot B. Moss, David S. Munro. 1997. "`Garbage Collecting the World: One Car at a Time `_". ACM. Proc. OOPSLA 97, pp. 162--175. + + .. admonition:: Abstract + + A new garbage collection algorithm for distributed object systems, + called DMOS (Distributed Mature Object Space), is presented. It is + derived from two previous algorithms, MOS (Mature Object Space), + sometimes called the train algorithm, and PMOS (Persistent Mature + Object Space). The contribution of DMOS is that it provides the + following unique combination of properties for a distributed + collector: safety, completeness, non-disruptiveness, + incrementality, and scalability. Furthermore, the DMOS collector + is non-blocking and does not use global tracing. + +* .. _JOHNSTONE97: + + Mark S. Johnstone. 1997. "`Non-Compacting Memory Allocation and Real-Time Garbage Collection `_". University of Texas at Austin. + + .. admonition:: Abstract + + Dynamic memory use has been widely recognized to have profound + effects on program performance, and has been the topic of many + research studies over the last forty years. In spite of years of + research, there is considerable confusion about the effects of + dynamic memory allocation. Worse, this confusion is often + unrecognized, and memory allocators are widely thought to be + fairly well understood. + + In this research, we attempt to clarify many issues for both + manual and automatic non-moving memory management. We show that + the traditional approaches to studying dynamic memory allocation + are unsound, and develop a sound methodology for studying this + problem. We present experimental evidence that fragmentation costs + are much lower than previously recognized for most programs, and + develop a framework for understanding these results and enabling + further research in this area. For a large class of programs using + well-known allocation policies, we show that fragmentation costs + are near zero. We also study the locality effects of memory + allocation on programs, a research area that has been almost + completely ignored. We show that these effects can be quite + dramatic, and that the best allocation policies in terms of + fragmentation are also among the best in terms of locality at both + the cache and virtual memory levels of the memory hierarchy. + + We extend these fragmentation and locality results to real-time + garbage collection. We have developed a hard real-time, + non-copying generational garbage collector which uses a + write-barrier to coordinate collection work only with + modifications of pointers, therefore making coordination costs + cheaper and more predictable than previous approaches. We combine + this write-barrier approach with implicit non-copying reclamation, + which has most of the advantages of copying collection (notably + avoiding both the sweep phase required by mark-sweep collectors, + and the referencing of garbage objects when reclaiming their + space), without the disadvantage of having to actually copy the + objects. In addition, we present a model for non-copying + implicit-reclamation garbage collection. We use this model to + compare and contrast our work with that of others, and to discuss + the tradeoffs that must be made when developing such a garbage + collector. + +* .. _JW98: + + Mark S. Johnstone, Paul R. Wilson. 1998. "`The Memory Fragmentation Problem: Solved? `_". ACM. ISMM'98 pp. 26--36. + + .. admonition:: Abstract + + We show that for 8 real and varied C and C++ programs, several + conventional dynamic storage allocators provide near-zero + fragmentation, once overheads due to implementation details + (headers, alignment, etc.) are properly accounted for. This + substantially strengthens our previous results showing that the + memory fragmentation problem has generally been misunderstood, and + that good allocator policies can provide good memory usage for + most programs. The new results indicate that for most programs, + excellent allocator policies are readily available, and efficiency + of implementation is the major challenge. While we believe that + our experimental results are state-of-the-art and our methodology + is superior to most previous work, more work should be done to + identify and study unusual problematic program behaviors not + represented in our sample. + +* .. _JONES92: + + Richard E. Jones. 1992. "`Tail recursion without space leaks `_". *Journal of Functional Programming.* 2(1):73--79. + + .. admonition:: Abstract + + The G-machine is a compiled graph reduction machine for lazy + functional languages. The G-machine compiler contains many + optimisations to improve performance. One set of such + optimisations is designed to improve the performance of tail + recursive functions. Unfortunately the abstract machine is subject + to a space leak--objects are unnecessarily preserved by the + garbage collector. + + This paper analyses why a particular form of space leak occurs in + the G-machine, and presents some ideas for fixing this problem. + This phenomena in other abstract machines is also examined + briefly. + +* .. _JL92: + + Richard E. Jones, Rafael Lins. 1992. "`Cyclic weighted reference counting without delay `_". Computing Laboratory, The University of Kent at Canterbury. Technical Report 28-92. + + .. admonition:: Abstract + + Weighted Reference Counting is a low-communication distributed + storage reclamation scheme for loosely-coupled multiprocessors. + The algorithm we present herein extends weighted reference + counting to allow the collection of cyclic data structures. To do + so, the algorithm identifies candidate objects that may be part of + cycles and performs a tricolour mark-scan on their subgraph in a + lazy manner to discover whether the subgraph is still in use. The + algorithm is concurrent in the sense that multiple useful + computation processes and garbage collection processes can be + performed simultaneously. + +* .. _JONES96: + + Richard E. Jones, Rafael Lins. 1996. "`Garbage Collection: Algorithms for Automatic Dynamic Memory Management `_". Wiley. ISBN 0-471-94148-4. + + .. admonition:: From the back cover + + The memory storage requirements of complex programs are extremely + difficult to manage correctly by hand. A single error may lead to + indeterminate and inexplicable program crashes. Worse still, + failures are often unrepeatable and may surface only long after + the program has been delivered to the customer. The eradication of + memory errors typically consumes a substantial amount of + development time. And yet the answer is relatively easy -- garbage + collection; removing the clutter of memory management from module + interfaces, which then frees the programmer to concentrate on the + problem at hand rather than low-level book-keeping details. For + this reason, most modern object-oriented languages such as + Smalltalk, Eiffel, Java and Dylan, are supported by garbage + collection. Garbage collecting libraries are even available for + such uncooperative languages as C and C++. + + This book considers how dynamic memory can be recycled + automatically to guarantee error-free memory management. There is + an abundant but disparate literature on the subject, largely + confined to research papers. This book sets out to pool this + experience in a single accessible and unified framework. + + Each of the important algorithms is explained in detail, often + with illustrations of its characteristic features and animations + of its use. Techniques are described and compared for declarative + and imperative programming styles, for sequential, concurrent and + distributed architectures. + + For professionals developing programs from simple software tools + to complex systems, as well as for researchers and students + working in compiler construction, functional, logic and + object-oriented programming design, this book will provide not + only a clear introduction but also a convenient reference source + for modern garbage collection techniques. + +* .. _ACM98: + + Richard E. Jones. 1998. "`ISMM'98 International Symposium on Memory Management `_". ACM. ISBN 1-58113-114-3. + + .. admonition:: From the Preface + + The International Symposium on Memory Management is a forum for + research in several related areas of memory management, especially + garbage collectors and dynamic storage allocators. [...] The + nineteen papers selected for publication in this volume cover a + remarkably broad range of memory management topics from explicit + malloc-style allocation to automatic memory management, from + cache-conscious data layout to efficient management of distributed + references, from conservative to type-accurate garbage collection, + for applications ranging from user application to long-running + servers, supporting languages as different as C, C++, Modula-3, + Java, Eiffel, Erlang, Scheme, ML, Haskell and Prolog. + +* .. _JONES12: + + Richard E. Jones, Antony Hosking, and Eliot Moss. 2012. "`The Garbage Collection Handbook `_". Chapman & Hall. + +* .. _JOYNER96: + + Ian Joyner. 1996. "`C++??: A Critique of C++ `_.". + + .. admonition:: Abstract + + The C++?? Critique is an analysis of some of the flaws of C++. It + is by no means exhaustive, nor does it attempt to document every + little niggle with C++, rather concentrating on main themes. The + critique uses Java and Eiffel as comparisons to C++ to give a more + concrete feel to the criticisms, viewing conceptual differences + rather than syntactic ones as being more important. Some C++ + authors realising there are glaring deficiencies in C++ have + chosen to defend C++ by also being critical within their own work. + Most notable are Bjarne Stroustup's "Design and Evolution of C++," + and Scott Meyers' "Effective" and "More Effective C++." These warn + of many traps and pitfalls, but reach the curious conclusion that + since "good" C++ programmers are aware of these problems and know + how to avoid them, C++ is alright. + + The C++ critique makes many of the same criticisms, but comes to + the different conclusion that these pitfalls are not acceptable, + and should not be in a language used for modern large scale + software engineering. Clean design is more important than after + the fact warnings, and it is inconceivable that purchasers of end + user software would tolerate this tactic on the part of vendors. + The critique also takes a look at C, and concludes that many of + the features of C should be left out of modern languages, and that + C is a flawed base for a language. + +* .. _KANEFSKY89: + + Bob Kanefsky. 1989. "`Recursive Memory Allocation `_". Bob Kanefsky. Songworm 3, p.?. + +* .. _KQH98: + + Jin-Soo Kim, Xiaohan Qin, Yarsun Hsu. 1998. "`Memory Characterization of a Parallel Data Mining Workload `_". IEEE. Proc. Workload Characterization: Methodology and Case Studies, pp. . + + .. admonition:: Abstract + + This paper studies a representative of an important class of + emerging applications, a parallel data mining workload. The + application, extracted from the IBM Intelligent Miner, identifies + groups of records that are mathematically similar based on a + neural network model called self-organizing map. We examine and + compare in details two implementations of the application: + (1) temporal locality or working set sizes; (2) spatial locality + and memory block utilization; (3) communication characteristics + and scalability; and (4) TLB performance. + + First, we find that the working set hierarchy of the application + is governed by two parameters, namely the size of an input record + and the size of prototype array; it is independent of the number + of input records. Second, the application shows good spatial + locality, with the implementation optimized for sparse data sets + having slightly worse spatial locality. Third, due to the batch + update scheme, the application bears very low communication. + Finally, a 2-way set associative TLB may result in severely skewed + TLB performance in a multiprocessor environment caused by the + large discrepancy in the amount of conflict misses. Increasing the + set associativity is more effective in mitigating the problem than + increasing the TLB size. + +* .. _KH00: + + Jin-Soo Kim & Yarsun Hsu. 2000. "Memory system behavior of Java programs: methodology and analysis". ACM. Proc. International conference on measurements and modeling of computer systems, pp. 264--274. + + .. admonition:: Abstract + + This paper studies the memory system behavior of Java programs by + analyzing memory reference traces of several SPECjvm98 + applications running with a Just-In-Time (JIT) compiler. Trace + information is collected by an exception-based tracing tool called + JTRACE, without any instrumentation to the Java programs or the + JIT compiler.First, we find that the overall cache miss ratio is + increased due to garbage collection, which suffers from higher + cache misses compared to the application. We also note that going + beyond 2-way cache associativity improves the cache miss ratio + marginally. Second, we observe that Java programs generate a + substantial amount of short-lived objects. However, the size of + frequently-referenced long-lived objects is more important to the + cache performance, because it tends to determine the application's + working set size. Finally, we note that the default heap + configuration which starts from a small initial heap size is very + inefficient since it invokes a garbage collector frequently. + Although the direct costs of garbage collection decrease as we + increase the available heap size, there exists an optimal heap + size which minimizes the total execution time due to the + interaction with the virtual memory performance. + +* .. _KOLODNER92: + + Elliot K. Kolodner. 1992. "Atomic Incremental Garbage Collection and Recovery for a Large Stable Heap". Laboratory for Computer Science at MIT. MIT-LCS-TR-534. + + .. admonition:: Abstract + + A stable heap is a storage that is managed automatically using + garbage collection, manipulated using atomic transactions, and + accessed using a uniform storage model. These features enhance + reliability and simplify programming by preventing errors due to + explicit deallocation, by masking failures and concurrency using + transactions, and by eliminating the distinction between accessing + temporary storage and permanent storage. Stable heap management is + useful for programming language for reliable distributed + computing, programming languages with persistent storage, and + object-oriented database systems. Many applications that could + benefit from a stable heap (e.g., computer-aided design, + computer-aided software engineering, and office information + systems) require large amounts of storage, timely responses for + transactions, and high availability. We present garbage collection + and recovery algorithms for a stable heap implementation that meet + these goals and are appropriate for stock hardware. The collector + is incremental: it does not attempt to collect the whole heap at + once. The collector is also atomic: it is coordinated with the + recovery system to prevent problems when it moves and modifies + objects . The time for recovery is independent of heap size, and + can be shortened using checkpoints. + +* .. _LK98: + + Per-Åke Larson & Murali Krishnan. 1998. "`Memory Allocation for Long-Running Server Applications `_". ACM. ISMM'98 pp. 176--185. + + .. admonition:: Abstract + + Prior work on dynamic memory allocation has largely neglected + long-running server applications, for example, web servers and + mail servers. Their requirements differ from those of one-shot + applications like compilers or text editors. We investigated how + to build an allocator that is not only fast and memory efficient + but also scales well on SMP machines. We found that it is not + sufficient to focus on reducing lock contention. Only limited + improvement can be achieved this way; higher speedups require a + reduction in cache misses and cache invalidation traffic. We then + designed and prototyped a new allocator, called Lkmalloc, targeted + for both traditional applications and server applications. + LKmalloc uses several subheaps, each one with a separate set of + free lists and memory arena. A thread always allocates from the + same subheap but can free a block belonging to any subheap. A + thread is assigned to a subheap by hashing on its thread ID. We + compared its performance with several other allocators on a + server-like, simulated workload and found that it indeed scales + well and is quite fast but could use memory more efficiently. + +* .. _LH83: + + Henry Lieberman & Carl Hewitt. 1983. "`A real-time garbage collector based on the lifetimes of objects `_". ACM. 26(6):419--429. + + .. admonition:: Abstract + + In previous heap storage systems, the cost of creating objects and + garbage collection is independent of the lifetime of the object. + Since objects with short lifetimes account for a large portion of + storage use, it is worth optimizing a garbage collector to reclaim + storage for these objects more quickly. The garbage collector + should spend proportionately less effort reclaiming objects with + longer lifetimes. We present a garbage collection algorithm that + (1) makes storage for short-lived objects cheaper than storage for + long-lived objects, (2) that operates in real-time--object + creation and access times are bounded, (3) increases locality of + reference, for better virtual memory performance, (4) works well + with multiple processors and a large address space. + +* .. _MM59: + + J. McCarthy, M. L. Minsky. 1959. "`Artificial Intelligence, Quarterly Progress Report no. 53 `_". Research Laboratory of Electronics at MIT. + +* .. _MCCARTHY60: + + J. McCarthy. 1960. "`Recursive Functions of Symbolic Expressions and Their Computation by Machine `_". CACM. + + .. admonition:: Abstract + + A programming system called LISP (for LISt Processor) has been + developed for the IBM 704 computer by the Artificial Intelligence + group at M.I.T. The system was designed to facilitate experiments + with a proposed system called the Advice Taker, whereby a machine + could be instructed to handle declarative as well as imperative + sentences and could exhibit "common sense" in carrying out its + instructions. The original proposal for the Advice Taker was made + in November 1958. The main requirement was a programming system + for manipulating expressions representing formalized declarative + and imperative sentences so that the Advice Taker could make + deductions. + + In the course of its development the LISP system went through + several stages of simplification and eventually came to be based + on a scheme for representing the partial recursive functions of a + certain class of symbolic expressions. This representation is + independent of the IBM 704 computer, or of any other electronic + computer, and it now seems expedient to expound the system by + starting with the class of expressions called S-expressions and + the functions called S-functions. + +* .. _MCCARTHY79: + + John McCarthy. 1979. "`History of Lisp `_". In *History of programming languages I*, pp. 173--185. ACM. + +* .. _PTM98: + + Veljko Milutinovic, Jelica Protic, Milo Tomasevic. 1997. "`Distributed shared memory: concepts and systems `_". IEEE Computer Society Press. ISBN 0-8186-7737-6. + + .. admonition:: From the publisher's catalog + + Presents a survey of both distributed shared memory (DSM) efforts + and commercial DSM systems. The book discusses relevant issues + that make the concept of DSM one of the most attractive approaches + for building large-scale, high-performance multiprocessor systems. + Its text provides a general introduction to the DSM field as well + as a broad survey of the basic DSM concepts, mechanisms, design + issues, and systems. + + Distributed Shared Memory concentrates on basic DSM algorithms, + their enhancements, and their performance evaluation. In addition, + it details implementations that employ DSM solutions at the + software and the hardware level. The book is a research and + development reference that provides state-of-the art information + that will be useful to architects, designers, and programmers of + DSM systems. + +* .. _MINSKY63: + + M. L. Minsky. 1963. "`A LISP Garbage Collector Algorithm Using Serial Secondary Storage `_". MIT. Memorandum MAC-M-129, Artificial Intelligence Project, Memo 58 (revised). + + .. admonition:: Abstract + + This paper presents an algorithm for reclaiming unused free + storage memory cells is LISP. It depends on availability of a fast + secondary storage device, or a large block of available temporary + storage. For this price, we get 1. Packing of free-storage into a + solidly packed block. 2. Smooth packing of arbitrary linear blocks + and arrays. 3. The collector will handle arbitrarily complex + re-entrant list structure with no introduction of spurious copies. + 4. The algorithm is quite efficient; the marking pass visits words + at most twice and usually once, and the loading pass is linear. + 5. The system is easily modified to allow for increase in size of + already fixed consecutive blocks, provide one can afford to + initiate a collection pass or use a modified array while waiting + for such a pass to occur. + +* .. _MOON84: + + David Moon. 1984. "`Garbage Collection in a Large Lisp System `_". ACM. Symposium on Lisp and Functional Programming, August 1984. + + .. admonition:: Abstract + + This paper discusses garbage collection techniques used in a + high-performance Lisp implementation with a large virtual memory, + the Symbolics 3600. Particular attention is paid to practical + issues and experience. In a large system problems of scale appear + and the most straightforward garbage-collection techniques do not + work well. Many of these problems involve the interaction of the + garbage collector with demand-paged virtual memory. Some of the + solutions adopted in the 3600 are presented, including incremental + copying garbage collection, approximately depth-first copying, + ephemeral objects, tagged architecture, and hardware assists. We + discuss techniques for improving the efficiency of garbage + collection by recognizing that objects in the Lisp world have a + variety of lifetimes. The importance of designing the architecture + and the hardware to facilitate garbage collection is stressed. + +* .. _MOON85: + + David Moon. 1985. "Architecture of the Symbolics 3600". IEEE. 12th International Symposium on Computer Architecture, pp. 76--83. + +* .. _MOON87: + + David Moon. 1990. "Symbolics Architecture". Wiley. Chapter 3 of *Computers for Artificial Intelligence Processing*, ISBN 0-471-84811-5. + +* .. _MOON91: + + David Moon. 1991. "Genera Retrospective". IEEE. 1991 International Workshop on Object Orientation in Operating Systems, order #2265. + +* .. _MORDEC84: + + Ben-Ari Mordechai. 1984. "Algorithms for On-the-fly Garbage Collection". *TOPLAS* 6(3): 333--344 (1984). + +* .. _MOREAU98: + + Luc Moreau. 1998. "`Hierarchical Distributed Reference Counting `_". ACM. ISMM'98 pp. 57--67. + + .. admonition:: Abstract + + Massively distributed computing is a challenging problem for + garbage collection algorithm designers as it raises the issue of + scalability. The high number of hosts involved in a computation + can require large tables for reference listing, whereas the lack + of information sharing between hosts in a same locality can entail + redundant GC traffic. In this paper, we argue that a conceptual + hierarchical organisation of massive distributed computations can + solve this problem. By conceptual hierarchical organisation, we + mean that processors are still able to communicate in a peer to + peer manner using their usual communication mechanism, but GC + messages will be routed as if processors were organised in + hierarchy. We present an extension of a distributed reference + counting algorithm that uses such a hierarchical organisation. It + allows us to bound table sizes by the number of hosts in a domain, + and it allows us to share GC information between hosts in a same + locality in order to reduce cross-network GC traffic. + +* .. _MFH95: + + Greg Morrisett, Matthias Felleisen, Robert Harper. 1995. "`Abstract Models of Memory Management `_". Carnegie Mellon University. CMU-CS-FOX-95-01. + + .. admonition:: Abstract + + Most specifications of garbage collectors concentrate on the + low-level algorithmic details of how to find and preserve + accessible objects. Often, they focus on bit-level manipulations + such as "scanning stack frames," "marking objects," "tagging + data," etc. While these details are important in some contexts, + they often obscure the more fundamental aspects of memory + management: what objects are garbage and why? + + We develop a series of calculi that are just low-level enough that + we can express allocation and garbage collection, yet are + sufficiently abstract that we may formally prove the correctness + of various memory management strategies. By making the heap of a + program syntactically apparent, we can specify memory actions as + rewriting rules that allocate values on the heap and automatically + dereference pointers to such objects when needed. This formulation + permits the specification of garbage collection as a relation that + removes portions of the heap without affecting the outcome of + evaluation. + + Our high-level approach allows us to specify in a compact manner a + wide variety of memory management techniques, including standard + trace-based garbage collection (i.e., the family of copying and + mark/sweep collection algorithms), generational collection, and + type-based, tag-free collection. Furthermore, since the definition + of garbage is based on the semantics of the underlying language + instead of the conservative approximation of inaccessibility, we + are able to specify and prove the idea that type inference can be + used to collect some objects that are accessible but never used. + +* .. _MBMM99: + + David S. Munro, Alfred Brown, Ron Morrison, J. Eliot B. Moss. 1999. "`Incremental Garbage Collection of a Persistent Object Store using PMOS `_". Morgan Kaufmann. in Advances in Persistent Object Systems, pp. 78--91. + + .. admonition:: Abstract + + PMOS is an incremental garbage collector designed specifically to + reclaim space in a persistent object store. It is complete in that + it will, after a finite number of invocations, reclaim all + unreachable storage. PMOS imposes minimum constraints on the order + of collection and offers techniques to reduce the I/O traffic + induced by the collector. Here we present the first implementation + of the PMOS collector called PMOS#1. The collector has been + incorporated into the stable heap layer of the generic persistent + object store used to support a number of languages including + Napier88. Our main design goals are to maintain the independence + of the language from the store and to retain the existing store + interface. The implementation has been completed and tested using + a Napier88 system. The main results of this work show that the + PMOS collector is implementable in a persistent store and that it + can be built without requiring changes to the language + interpreter. Initial performance measurements are reported. These + results suggest however, that effective use of PMOS requires + greater co-operation between language and store. + +* .. _NOPH92: + + Scott Nettles, James O'Toole, David Pierce, Nickolas Haines. 1992. "`Replication-Based Incremental Copying Collection `_". IWMM'92. + + .. admonition:: Abstract + + We introduce a new replication-based copying garbage collection + technique. We have implemented one simple variation of this method + to provide incremental garbage collection on stock hardware with + no special operating system or virtual memory support. The + performance of the prototype implementation is excellent: major + garbage collection pauses are completely eliminated with only a + slight increase in minor collection pause times. + + Unlike the standard copying algorithm, the replication-based + method does not destroy the original replica when a copy is + created. Instead, multiple copies may exist, and various standard + strategies for maintaining consistency may be applied. In our + implementation for Standard ML of New Jersey, the mutator + continues to use the from-space replicas until the collector has + achieved a consistent replica of all live data in to-space. + + We present a design for a concurrent garbage collector using the + replication-based technique. We also expect replication-based GC + methods to be useful in providing services for persistence and + distribution, and briefly discuss these possibilities. + +* .. _NETTLES92: + + Scott Nettles. 1992. "`A Larch Specification of Copying Garbage Collection `_". Carnegie Mellon University. CMU-CS-92-219. + + .. admonition:: Abstract + + Garbage collection (GC) is an important part of many language + implementations. One of the most important garbage collection + techniques is copying GC. This paper consists of an informal but + abstract description of copying collection, a formal specification + of copying collection written in the Larch Shared Language and the + Larch/C Interface Language, a simple implementation of a copying + collector written in C, an informal proof that the implementation + satisfies the specification, and a discussion of how the + specification applies to other types of copying GC such as + generational copying collectors. Limited familiarity with copying + GC or Larch is needed to read the specification. + +* .. _NO93A: + + Scott Nettles & James O'Toole. 1993. "Implementing Orthogonal Persistence: A Simple Optimization Using Replicating Collection". USENIX. IWOOOS'93. + + .. admonition:: Abstract + + Orthogonal persistence provides a safe and convenient model of + object persistence. We have implemented a transaction system which + supports orthogonal persistence in a garbage-collected heap. In + our system, replicating collection provides efficient concurrent + garbage collection of the heap. In this paper, we show how + replicating garbage collection can also be used to reduce commit + operation latencies in our implementation. + + We describe how our system implements transaction commit. We + explain why the presence of non-persistent objects can add to the + cost of this operation. We show how to eliminate these additional + costs by using replicating garbage collection. The resulting + implementation of orthogonal persistence should provide + transaction performance that is independent of the quantity of + non-persistent data in use. We expect efficient support for + orthogonal persistence to be valuable in operating systems + applications which use persistent data. + +* .. _NO93: + + Scott Nettles & James O'Toole. 1993. "`Real-Time Replication Garbage Collection `_". ACM. PLDI'93. + + .. admonition:: Abstract + + We have implemented the first copying garbage collector that + permits continuous unimpeded mutator access to the original + objects during copying. The garbage collector incrementally + replicates all accessible objects and uses a mutation log to bring + the replicas up-to-date with changes made by the mutator. An + experimental implementation demonstrates that the costs of using + our algorithm are small and that bounded pause times of 50 + milliseconds can be readily achieved. + +* .. _NIELSEN77: + + Norman R. Nielsen. 1977. "Dynamic Memory Allocation in Computer Simulation". ACM. CACM 20:11. + + .. admonition:: Abstract + + This paper investigates the performance of 35 dynamic memory + allocation algorithms when used to service simulation programs as + represented by 18 test cases. Algorithm performance was measured + in terms of processing time, memory usage, and external memory + fragmentation. Algorithms maintaining separate free space lists + for each size of memory block used tended to perform quite well + compared with other algorithms. Simple algorithms operating on + memory ordered lists (without any free list) performed + surprisingly well. Algorithms employing power-of-two block sizes + had favorable processing requirements but generally unfavorable + memory usage. Algorithms employing LIFO, FIFO, or memory ordered + free lists generally performed poorly compared with others. + +* .. _OTOOLE90: + + James O'Toole. 1990. "Garbage Collecting Locally". + + .. admonition:: Abstract + + Generational garbage collection is a simple technique for + automatic partial memory reclamation. In this paper, I present the + basic mechanics of generational collection and discuss its + characteristics. I compare several published algorithms and argue + that fundamental considerations of locality, as reflected in the + changing relative speeds of processors, memories, and disks, + strongly favor a focus on explicit optimization of I/O + requirements during garbage collection. I show that this focus on + I/O costs due to memory hierarchy debunks a well-known claim about + the relative costs of garbage collection and stack allocation. I + suggest two directions for future research in this area and + discuss some simple architectural changes in virtual memory + interfaces which may enable efficient garbage collector + utilization of standard virtual memory hardware. + +* .. _ON94: + + James O'Toole & Scott Nettles. 1994. "`Concurrent Replicating Garbage Collection `_". ACM. LFP'94. + + .. admonition:: Abstract + + We have implemented a concurrent copying garbage collector that + uses replicating garbage collection. In our design, the client can + continuously access the heap during garbage collection. No + low-level synchronization between the client and the garbage + collector is required on individual object operations. The garbage + collector replicates live heap objects and periodically + synchronizes with the client to obtain the client's current root + set and mutation log. An experimental implementation using the + Standard ML of New Jersey system on a shared-memory multiprocessor + demonstrates excellent pause time performance and moderate + execution time speedups. + +* .. _JRR99: + + Simon Peyton Jones, Norman Ramsey, Fermin Reig. 1999. "`C--: a portable assembly language that supports garbage collection `_". Springer-Verlag. International Conference on Principles and Practice of Declarative Programming 1999, LNCS 1702, pp. 1--28. + + .. admonition:: Abstract + + For a compiler writer, generating good machine code for a variety + of platforms is hard work. One might try to reuse a retargetable + code generator, but code generators are complex and difficult to + use, and they limit one's choice of implementation language. One + might try to use C as a portable assembly language, but C limits + the compiler writer's flexibility and the performance of the + resulting code. The wide use of C, despite these drawbacks, argues + for a portable assembly language. C-- is a new language designed + expressly for this purpose. The use of a portable assembly + language introduces new problems in the support of such high-level + run-time services as garbage collection, exception handling, + concurrency, profiling, and debugging. We address these problems + by combining the C-- language with a C-- run-time interface. The + combination is designed to allow the compiler writer a choice of + source-language semantics and implementation techniques, while + still providing good performance. + +* .. _PIEPER93: + + John S. Pieper. 1993. "Compiler Techniques for Managing Data Motion". Carnegie Mellon University. Technical report number CMU-CS-93-217. + + .. admonition:: Abstract + + Software caching, automatic algorithm blocking, and data overlays + are different names for the same problem: compiler management of + data movement throughout the memory hierarchy. Modern + high-performance architectures often omit hardware support for + moving data between levels of the memory hierarchy: iWarp does not + include a data cache, and Cray supercomputers do not have virtual + memory. These systems have effectively traded a more complicated + programming model for performance by replacing a + hardware-controlled memory hierarchy with a simple fast memory. + The simpler memories have less logic in the critical path, so the + cycle time of the memories is improved. + + For programs which fit in the resulting memory, the extra + performance is great. Unfortunately, the driving force behind + supercomputing today is a class of very large scientific problems, + both in terms of computation time and in terms of the amount of + data used. Many of these programs do not fit in the memory of the + machines available. When architects trade hardware support for + data migration to gain performance, control of the memory + hierarchy is left to the programmer. Either the program size must + be cut down to fit into the machine, or every loop which accesses + more data than will fit into memory must be restructured by hand. + This thesis describes how a compiler can relieve the programmer of + this burden, and automate data motion throughout the memory + hierarchy without direct hardware support. + + This works develops a model of how data is accessed within a + nested loop by typical scientific programs. It describes + techniques which can be used by compilers faced with the task of + managing data motion. The concentration is on nested loops which + process large data arrays using linear array subscripts. Because + the array subscripts are linear functions of the loop indices and + the loop indices form an integer lattice, linear algebra can be + applied to solve many compilation problems. + + The approach it to tile the iteration space of the loop nest. + Tiling allows the compiler to improve locality of reference. The + tiling basis matrix is chosen from a set of candidate vectors + which neatly divide the data set. The execution order of the tiles + is selected to maximize locality between tiles. Finally, the tile + sizes are chosen to minimize execution time. + + The approach has been applied to several common scientific loop + nests: matrix-matrix multiplication, QR-decomposition, and + LU-decomposition. In addition, an illustrative example from the + Livermore Loop benchmark set is examined. Although more compiler + time can be required in some cases, this technique produces better + code at no cost for most programs. + +* .. _PIRINEN98: + + Pekka P. Pirinen. 1998. "Barrier techniques for incremental tracing". ACM. ISMM'98 pp. 20--25. + + .. admonition:: Abstract + + This paper presents a classification of barrier techniques for + interleaving tracing with mutator operation during an incremental + garbage collection. The two useful tricolour invariants are + derived from more elementary considerations of graph traversal. + Barrier techniques for maintaining these invariants are classified + according to the action taken at the barrier (such as scanning an + object or changing its colour), and it is shown that the + algorithms described in the literature cover all the possibilities + except one. Unfortunately, the new technique is impractical. Ways + of combining barrier techniques are also discussed. + +* .. _PRINTEZIS96: + + Tony Printezis. 1996. "Disk Garbage Collection Strategies for Persistent Java". Proceedings of the First International Workshop on Persistence and Java. + + .. admonition:: Abstract + + This paper presents work currently in progress on Disk Garbage + Collection issues for PJava, an orthogonally persistent version of + Java. In particular, it concentrates on the initial Prototype of + the Disk Garbage Collector of PJava0 which has already been + implemented. This Prototype was designed to be very simple and + modular in order to be easily changed, evolved, improved, and + allow experimentation. Several experiments were performed in order + to test possible optimisations; these experiments concentrated on + the following four areas: a) efficient access to the store; b) + page-replacement algorithms; c) efficient discovery of live + objects during compaction; and d) dealing with forward references. + The paper presents a description of the Prototype's architecture, + the results of these experiments and related discussion, and some + future directions based on the experience gained from this work. + +* .. _PC96: + + Tony Printezis & Quentin Cutts. 1996. "Measuring the Allocation Rate of Napier88". Department of Computing Science at University of Glasgow. TR ?. + +* .. _REINHOLD93: + + M. B. Reinhold. 1993. "`Cache Performance of Garbage Collected Programming Languages `_". Laboratory for Computer Science at MIT. MIT/LCS/TR-581. + + .. admonition:: Abstract + + As processor speeds continue to improve relative to main-memory + access times, cache performance is becoming an increasingly + important component of program performance. Prior work on the + cache performance of garbage-collected programming languages has + either assumed or argued that conventional garbage-collection + methods will yield poor performance, and has therefore + concentrated on new collection algorithms designed specifically to + improve cache-level reference locality. This dissertation argues + to the contrary: Many programs written in garbage-collected + languages are naturally well-suited to the direct-mapped caches + typically found in modern computer systems. + + Using a trace-driven cache simulator and other analysis tools, + five nontrivial, long-running Scheme programs are studied. A + control experiment shows that the programs have excellent cache + performance without any garbage collection at all. A second + experiment indicates that the programs will perform well with a + simple and infrequently-run generational compacting collector. + + An analysis of the test programs' memory usage patterns reveals + that the mostly-functional programming style typically used in + Scheme programs, in combination with simple linear storage + allocation, causes most data objects to be dispersed in time and + space so that references to them cause little cache interference. + From this it follows that other Scheme programs, and programs + written in similar styles in different languages, should perform + well with a simple generational compacting collector; + sophisticated collectors intended to improve cache performance are + unlikely to be effective. The analysis also suggests that, as + locality becomes ever more important to program performance, + programs written in garbage-collected languages may turn out to + have significant performance advantage over programs written in + more conventional languages. + +* .. _ROBSON77: + + J. M. Robson. 1977. "Worst case fragmentation of first fit and best fit storage allocation strategies". ACM. ACM Computer Journal, 20(3):242--244. + +* .. _RR97: + + Gustavo Rodriguez-Rivera & Vince Russo. 1997. "Non-intrusive Cloning Garbage Collection with Stock Operating System Support". Software -- Practice and Experience. 27:8. + + .. admonition:: Abstract + + It is well accepted that automatic garbage collection simplifies + programming, promotes modularity, and reduces development effort. + However it is commonly believed that these advantages do not + counteract the perceived price: excessive overheads, possible long + pause times while garbage collections occur, and the need to + modify existing code. Even though there are publically available + garbage collector implementations that can be used in existing + programs, they do not guarantee short pauses, and some + modification of the application using them is still required. In + this paper we describe a snapshot-at-beginning concurrent garbage + collector algorithm and its implementation. This algorithm + guarantees short pauses, and can be easily implemented on stock + UNIX-like operating systems. Our results show that our collector + performs comparable to other garbage collection implementations on + uniprocessor machines and outperforms similar collectors on + multiprocessor machines. We also show our collector to be + competitive in performance with explicit deallocation. Our + collector has the added advantage of being non-intrusive. Using a + dynamic linking technique and effective root set inferencing, we + have been able to successfully run our collector even in + commercial programs where only the binary executable and no source + code is available. In this paper we describe our algorithm, its + implementation, and provide both an algorithmic and a performance + comparison between our collector and other similar garbage + collectors. + +* .. _ROJEMO95: + + Niklas Röjemo. 1995. "Highlights from nhc -- a space-efficient Haskell compiler". Chalmers University of Technology. + + .. admonition:: Abstract + + Self-compiling implementations of Haskell, i.e., those written in + Haskell, have been and, except one, are still space consuming + monsters. Object code size for the compilers themselves are 3-8Mb, + and they need 12-20Mb to recompile themselves. One reason for the + huge demands for memory is that the main goal for these compilers + is to produce fast code. However, the compiler described in this + paper, called "nhc" for "Nearly a Haskell Compiler", is the one + above mentioned exception. This compiler concentrates on keeping + memory usage down, even at a cost in time. The code produced is + not fast, but nhc is usable, and the resulting programs can be run + on computers with small memory. + + This paper describes some of the implementation choices done, in + the Haskell part of the source code, to reduce memory consumption + in nhc. It is possible to use these also in other Haskell + compilers with no, or very small, changes to their run-time + systems. + + Time is neither the main focus of nhc nor of this paper, but there + is nevertheless a small section about the speed of nhc. The most + notable observation concerning speed is that nhc spends + approximately half the time processing interface files, which is + much more than needed in the type checker. Processing interface + files is also the most space consuming part of nhc in most cases. + It is only when compiling source files with large sets of mutually + recursive functions that more memory is needed to type check than + to process interface files. + +* .. _ROJEMO95A: + + Niklas Röjemo. 1995. "Generational garbage collection for lazy functional languages without temporary space leaks". Chalmers University of Technology. + + .. admonition:: Abstract + + Generational garbage collection is an established method for + creating efficient garbage collectors. Even a simple + implementation where all nodes that survive one garbage collection + are *tenured*, i.e., moved to an old generation, works well in + strict languages. In lazy languages, however, such an + implementation can create severe *temporary space leaks*. The + temporary space leaks appear in programs that traverse large + lazily built data structures, e.g., a lazy list representing a + large file, where only a small part is needed at any time. A + simple generational garbage collector cannot reclaim the memory, + used by the lazily built list, at minor collections. The reason is + that at least one of the nodes in the list belongs to the old + generation, after the first minor collection, and will hold on to + the rest of the nodes in the list until the next major collection. + +* .. _RR96: + + Niklas Röjemo & Colin Runciman. 1996. "Lag, drag, void and use -- heap profiling and space-efficient compilation revisited". ACM, SIGPLAN. ICFP'96, ACM SIGPLAN Notices 31:6, ISBN 0-89791-770-7, pp. 34--41. + + .. admonition:: Abstract + + The context for this paper is functional computation by graph + reduction. Our overall aim is more efficient use of memory. The + specific topic is the detection of dormant cells in the live graph + -- those retained in heap memory though not actually playing a + useful role in computation. We describe a profiler that can + identify heap consumption by such 'useless' cells. Unlike heap + profilers based on traversals of the live heap, this profiler + works by examining cells post-mortem. The new profiler has + revealed a surprisingly large proportion of 'useless' cells, even + in some programs that previously seemed space-efficient such as + the bootstrapping Haskell compiler "nhc". + +* .. _RW99: + + David J. Roth, David S. Wise. 1999. "`One-bit counts between unique and sticky `_". ACM. ISMM'98, pp. 49--56. + + .. admonition:: Abstract + + Stoye's one-bit reference tagging scheme can be extended to local + counts of two or more via two strategies. The first, suited to + pure register transactions, is a cache of referents to two shared + references. The analog of Deutch's and Bobrow's multiple-reference + table, this cache is sufficient to manage small counts across + successive assignment statements. Thus, accurate reference counts + above one can be tracked for short intervals, like that bridging + one function's environment to its successor's. + + The second, motivated by runtime stacks that duplicate references, + avoids counting any references from the stack. It requires a local + pointer-inversion protocol in the mutator, but one still local to + the referent and the stack frame. Thus, an accurate reference + count of one can be maintained regardless of references from the + recursion stack. + +* .. _ROVNER85: + + Paul Rovner. 1985. "`On Adding Garbage Collection and Runtime Types to a Strongly-Typed, Statically-Checked, Concurrent Language `_". Xerox PARC. TR CSL-84-7. + + .. admonition:: Abstract + + Enough is known now about garbage collection, runtime types, + strong-typing, static-checking and concurrency that it is possible + to explore what happens when they are combined in a real + programming system. + + Storage management is one of a few central issues through which + one can get a good view of the design of an entire system. + Tensions between ease of integration and the need for protection; + between generality, simplicity, flexibility, extensibility and + efficiency are all manifest when assumptions and attitudes about + managing storage are studied. And deep understanding follows best + from the analysis of systems that people use to get real work + done. + + This paper is not for those who seek arguments pro or con about + the need for these features in programming systems; such issues + are for other papers. This one assumes these features to be good + and describes how they combine and interact in Cedar, a + programming language and environment designed to help programmers + build moderate-sized experimental systems for moderate numbers of + people to test and use. + +* .. _RUNCIMAN92: + + Colin Runciman & David Wakeling. 1992. "`Heap Profiling of Lazy Functional Programs `_". University of York. + + .. admonition:: Abstract + + We describe the design, implementation, and use of a new kind of + profiling tool that yields valuable information about the memory + use of lazy functional programs. The tool has two parts: a + modified functional language implementation which generated + profiling implementation during the execution of programs, and a + separate program which converts this information to graphical + form. With the aid of profile graphs, one can make alterations to + a functional program which dramatically reduce its space + consumption. We demonstrate that this is the case of a genuine + example -- the first to which the tool has been applied -- for + which the results are strikingly successful. + +* .. _RR94: + + Colin Runciman & Niklas Röjemo. 1994. "`New dimensions in heap profiling `_". University of York. + + .. admonition:: Abstract + + First-generation heap profilers for lazy functional languages have + proved to be effective tools for locating some kinds of space + faults, but in other cases they cannot provide sufficient + information to solve the problem. This paper describes the design, + implementation and use of a new profiler that goes beyond the + two-dimensional "who produces what" view of heap cells to provide + information about their more dynamic and structural attributes. + Specifically, the new profiler can distinguish between cells + according to their *eventual lifetime*, or on the basis of the + *closure retainers* by virtue of which they remain part of the + live heap. A bootstrapping Haskell compiler (nhc) hosts the + implementation: among examples of the profiler's use we include + self-application to nhc. Another example is the original + heap-profiling case study "clausify", which now consumes even less + memory and is much faster. + +* .. _RR96A: + + Colin Runciman & Niklas Röjemo. 1996. "Two-pass heap profiling: a matter of life and death". Department of Computer Science, University of York. + + .. admonition:: Abstract + + A heap profile is a chart showing the contents of heap memory + throughout a computation. Contents are depicted abstractly by + showing how much space is occupied by memory cells in each of + several classes. A good heap profiler can use a variety of + attributes of memory cells to de-fine a classification. Effective + profiling usually involves a combination of attributes. The ideal + profiler gives full support for combination in two ways. First, a + section of the heap of interest to the programmer can be specified + by constraining the values of any combination of cell attributes. + Secondly, no matter what attributes are used to specify such a + section, a heap profile can be obtained for that section only, and + any other attribute can be used to define the classification. + + Achieving this ideal is not simple For some combinations of + attributes. A heap profile is derived by interpolation of a series + of censuses of heap contents at different stages. The obvious way + to obtain census data is to traverse the live heap at intervals + throughout the computation. This is fine for static attributes + (e.g. What type of value does this memory cell represent?), and + for dynamic attributes that can be determined for each cell by + examining the heap at any given moment (e.g. From which function + closures can this cell be reached?). But some attributes of cells + can only be determined retrospectively by post-mortem inspection + asa cell is overwritten or garbage-collected (e.g. Is this cell + ever used again?). Now we see the problem: if a profiler supports + both live and pose-mortem attributes, how can we implement the + ideal of unrestricted combinations? That is the problem me solve + in this paper. We give techniques for profiling a. heap section + specified in terms of both live and post-mortem attributes. We + show how to generate live-attribute profiles of a section of the + heal, specified using post-mortem attributes, and vice versa. + +* .. _SG95: + + Jacob Seligmann & Steffen Grarup. 1995. "`Incremental Mature Garbage Collection Using the Train Algorithm `_". Springer-Verlag. ECOOP'95, Lecture Notes in Computer Science, Vol. 952, pp. 235--252, ISBN 3-540-60160-0. + + .. admonition:: Abstract + + We present an implementation of the Train Algorithm, an + incremental collection scheme for reclamation of mature garbage in + generation-based memory management systems. To the best of our + knowledge, this is the first Train Algorithm implementation ever. + Using the algorithm, the traditional mark-sweep garbage collector + employed by the Mjølner run-time system for the + object-oriented BETA programming language was replaced by a + non-disruptive one, with only negligible time and storage + overheads. + +* .. _SB00: + + Manuel Serrano, Hans-J. Boehm. 2000. "`Understanding memory allocation of Scheme programs `_". ACM. Proceedings of International Conference on Functional Programming 2000. + + .. admonition:: Abstract + + Memory is the performance bottleneck of modern architectures. + Keeping memory consumption as low as possible enables fast and + unobtrusive applications. But it is not easy to estimate the + memory use of programs implemented in functional languages, due to + both the complex translations of some high level constructs, and + the use of automatic memory managers. To help understand memory + allocation behavior of Scheme programs, we have designed two + complementary tools. The first one reports on frequency of + allocation, heap configurations and on memory reclamation. The + second tracks down memory leaks. We have applied these tools to + our Scheme compiler, the largest Scheme program we have been + developing. This has allowed us to drastically reduce the amount + of memory consumed during its bootstrap process, without requiring + much development time. Development tools will be neglected unless + they are both conveniently accessible and easy to use. In order to + avoid this pitfall, we have carefully designed the user interface + of these two tools. Their integration into a real programming + environment for Scheme is detailed in the paper. + +* .. _SHAPIRO94: + + Marc Shapiro & Paulo Ferreira. 1994. "`Larchant-RDOSS: a distributed shared persistent memory and its garbage collector `_". INRIA. INRIA Rapport de Recherche no. 2399; Cornell Computer Science TR94-1466. + + .. admonition:: Abstract + + Larchant-RDOSS is a distributed shared memory that persists on + reliable storage across process lifetimes. Memory management is + automatic: including consistent caching of data and of locks, + collecting objects unreachable from the persistent root, writing + reachable objects to disk, and reducing store fragmentation. + Memory management is based on a novel garbage collection + algorithm, that approximates a global trace by a series of local + traces, with no induced I/O or locking traffic, and no + synchronization between the collector and the application + processes. This results in a simple programming model, and + expected minimal added application latency. The algorithm is + designed for the most unfavorable environment (uncontrolled + programming language, reference by pointers, distributed system, + non-coherent shared memory) and should work well also in more + favorable settings. + +* .. _SHAW87: + + Robert A. Shaw. 1987. "Improving Garbage Collector Performance in Virtual Memory". Stanford University. CSL-TR-87-323. + +* .. _SHAW88: + + Robert A. Shaw. 1988. "Empirical Analysis of a LISP System". Stanford University. CSL-TR-88-351. + +* .. _SINGHAL92: + + Vivek Singhal, Sheetal V. Kakkad, Paul R. Wilson. 1992. "`Texas: An Efficient, Portable Persistent Store `_". University of Texas at Austin. + + .. admonition:: Abstract + + Texas is a persistent storage system for C++, providing high + performance while emphasizing simplicity, modularity and + portability. A key component of the design is the use of pointer + swizzling at page fault time, which exploits existing virtual + memory features to implement large address spaces efficiently on + stock hardware, with little or no change to existing compilers. + Long pointers are used to implement an enormous address space, but + are transparently converted to the hardware-supported pointer + format when pages are loaded into virtual memory. + + Runtime type descriptors and slightly modified heap allocation + routines support pagewise pointer swizzling by allowing objects + and their pointer fields to be identified within pages. If + compiler support for runtime type identification is not available, + a simple preprocessor can be used to generate type descriptors. + + This address translation is largely independent of issues of data + caching, sharing, and checkpointing; it employs operating systems' + existing virtual memories for caching, and a simple and flexible + log-structured storage manager to improve checkpointing + performance. + + Pagewise virtual memory protections are also used to detect writes + for logging purposes, without requiring any changes to compiled + code. This may degrade checkpointing performance for small + transactions with poor locality of writes, but page diffing and + sub-page logging promise to keep performance competitive with + finer-grained checkpointing schemes. + + Texas presents a simple programming interface; an application + creates persistent objects by simply allocating them on the + persistent heap. In addition, the implementation is relatively + small, and is easy to incorporate into existing applications. The + log-structured storage module easily supports advanced extensions + such as compressed storage, versioning, and adaptive + reorganization. + +* .. _SOBALVARRO88: + + P. G. Sobalvarro. 1988. "`A Lifetime-based Garbage Collector for LISP Systems on General-Purpose Computers `_". MIT. AITR-1417. + + .. admonition:: Abstract + + Garbage collector performance in LISP systems on custom hardware has been substantially improved by the adoption of lifetime-based garbage collection techniques. To date, however, successful lifetime-based garbage collectors have required special-purpose hardware, or at least privileged access to data structures maintained by the virtual memory system. I present here a lifetime-based garbage collector requiring no special-purpose hardware or virtual memory system support, and discuss its performance. + +* .. _STEELE75: + + Guy L. Steele. 1975. "Multiprocessing Compactifying Garbage Collection". CACM. 18:9 pp. 495--508. + + .. admonition:: Abstract + + Algorithms for a multiprocessing compactifying garbage collector + are presented and discussed. The simple case of two processors, + one performing LISP-like list operations and the other performing + garbage collection continuously, is thoroughly examined. The + necessary capabilities of each processor are defined, as well as + interprocessor communication and interlocks. Complete procedures + for garbage collection and for standard list processing primitives + are presented and thoroughly explained. Particular attention is + given to the problems of marking and relocating list cells while + another processor may be operating on them. The primary aim + throughout is to allow the list processor to run unimpeded while + the other processor reclaims list storage The more complex case + involving several list processors and one or more garbage + collection processors are also briefly discussed. + +* .. _STEELE76: + + Guy L. Steele. 1976. "Corrigendum: Multiprocessing Compactifying Garbage Collection". CACM. 19:6 p.354. + +* .. _STEELE77: + + Guy L. Steele. 1977. "`Data Representation in PDP-10 MACLISP `_". MIT. AI Memo 420. + + .. admonition:: Abstract + + The internal representations of the various MacLISP data types are + presented and discussed. Certain implementation tradeoffs are + considered. The ultimate decisions on these tradeoffs are + discussed in the light of MacLISP's prime objective of being an + efficient high-level language for the implementation of large + systems such as MACSYMA. The basic strategy of garbage collection + is outlined, with reference to the specific representations + involved. Certain "clever tricks" are explained and justified. The + "address space crunch" is explained and some alternative solutions + explored. + +* .. _SLC99: + + James M. Stichnoth, Guei-Yuan Lueh, Michal Cierniak. 1999. "`Support for Garbage Collection at Every Instruction in a Java Compiler `_". SIGPLAN. Proceedings of the 1999 ACM SIGPLAN Conference on Programming Language Design and Implementation (PLDI). SIGPLAN Notices 34(5). pp. 118--127. + + .. admonition:: Abstract + + A high-performance implementation of a Java Virtual Machine + requires a compiler to translate Java bytecodes into native + instructions, as well as an advanced garbage collector (e.g., + copying or generational). When the Java heap is exhausted and the + garbage collector executes, the compiler must report to the + garbage collector all live object references contained in physical + registers and stack locations. Typical compilers only allow + certain instructions (e.g., call instructions and backward + branches) to be GC-safe; if GC happens at some other instruction, + the compiler may need to advance execution to the next GC-safe + point. Until now, no one has ever attempted to make every + compiler-generated instruction GC-safe, due to the perception that + recording this information would require too much space. This kind + of support could improve the GC performance in multithreaded + applications. We show how to use simple compression techniques to + reduce the size of the GC map to about 20% of the generated code + size, a result that is competitive with the best previously + published results. In addition, we extend the work of Agesen, + Detlefs, and Moss, regarding the so-called “JSR Problem” (the + single exception to Java’s type safety property), in a way that + eliminates the need for extra runtime overhead in the generated + code. + +* .. _SCN84: + + Will R Stoye, T J W Clarke, Arthur C Norman. 1984. "Some Practical Methods for Rapid Combinator Reduction". In LFP 1984, 159--166. + + .. admonition:: Abstract + + The SKIM II processor is a microcoded hardware machine for the + rapid evaluation of functional languages. This paper gives details + of some of the more novel methods employed by SKIM II, and + resulting performance measurements. The authors conclude that + combinator reduction can still form the basis for the efficient + implementation of a functional language. + +* .. _TD95: + + David Tarditi & Amer Diwan. 1995. "`Measuring the Cost of Storage Management `_". Carnegie Mellon University. CMU-CS-94-201. + + .. admonition:: Abstract + + We study the cost of storage management for garbage-collected + programs compiled with the Standard ML of New Jersey compiler. We + show that the cost of storage management is not the same as the + time spent garbage collecting. For many of the programs, the time + spent garbage collecting is less than the time spent doing other + storage-management tasks. + +* .. _TJ94: + + Stephen Thomas, Richard E. Jones. 1994. "Garbage Collection for Shared Environment Closure Reducers". Computing Laboratory, The University of Kent at Canterbury. Technical Report 31-94. + + .. admonition:: Abstract + + Shared environment closure reducers such as Fairbairn and Wray's + TIM incur a comparatively low cost when creating a suspension, and + so provide an elegant method for implementing lazy functional + evaluation. However, comparatively little attention has been given + to the problems involved in identifying which portions of a shared + environment are needed (and ignoring those which are not) during a + garbage collection. Proper consideration of this issue has subtle + consequences when implementing a storage manager in a TIM-like + system. We describe the problem and illustrate the negative + consequences of ignoring it. + + We go on to describe a solution in which the compiler determines + statically which portions of that code's environment are required + for each piece of code it generates, and emits information to + assist the run-time storage manager to scavenge environments + selectively. We also describe a technique for expressing this + information directly as executable code, and demonstrate that a + garbage collector implemented in this way can perform + significantly better than an equivalent, table-driven interpretive + collector. + +* .. _THOMAS95: + + Stephen Thomas. 1995. "Garbage Collection in Shared-Environment Closure Reducers: Space-Efficient Depth First Copying using a Tailored Approach". *Information Processing Letters.* 56:1, pp. 1--7. + + .. admonition:: Abstract + + Implementations of abstract machines such as the OP-TIM and the + PG-TIM need to use a tailored garbage collector which seems to + require an auxiliary stack,with a potential maximum size that is + directly proportional to the amount of live data in the heap. + However, it turns out that it is possible to build a recursive + copying collector that does not require additional space by + reusing already-scavenged space. This paper is a description of + this technique. + +* .. _TT97: + + Mads Tofte & Jean-Pierre Talpin. 1997. "`Region-Based Memory Management `_". Information and Computation 132(2), pp. 109--176. + + .. admonition:: Abstract + + This paper describes a memory management discipline for programs + that perform dynamic memory allocation and de-allocation. At + runtime, all values are put into regions. The store consists of a + stack of regions. All points of region allocation and + de-allocation are inferred automatically, using a type and effect + based program analysis. The scheme does not assume the presence of + a garbage collector. The scheme was first presented in 1994 (M. + Tofte and J.-P. Talpin, in *Proceedings of the 21st ACM + SIGPLAN-SIGACT Symposium on Principles of Programming Languages,* + pp. 188--201); subsequently, it has been tested in the ML Kit with + Regions, a region-based, garbage-collection free implementation of + the Standard ML Core Language, which includes recursive datatypes, + higher-order functions and updatable references (L. Birkedal, M. + Tofte, and M. Vejlstrup, (1996), in *Proceedings of the 23rd ACM + SIGPLAN-SIGACT Symposium on Principles of Programming Languages,* + pp. 171--183). This paper defines a region-based dynamic semantics + for a skeletal programming language extracted from Standard ML. We + present the inference system which specifies where regions can be + allocated and de-allocated and a detailed proof that the system is + sound with respect to a standard semantics. We conclude by giving + some advice on how to write programs that run well on a stack of + regions, based on practical experience with the ML Kit. + +* .. _UNGAR84: + + Dave Ungar. 1984. "`Generation Scavenging: A Non-disruptive High Performance Storage Reclamation Algorithm `_". ACM, SIGSOFT, SIGPLAN. Practical Programming Environments Conference. + + .. admonition:: Abstract + + Many interactive computing environments provide automatic storage + reclamation and virtual memory to ease the burden of managing + storage. Unfortunately, many storage reclamation algorithms impede + interaction with distracting pauses. *Generation Scavenging* is a + reclamation algorithm that has no noticeable pauses, eliminates + page faults for transient objects, compacts objects without + resorting to indirection, and reclaims circular structures, in one + third the time of traditional approaches. + +* .. _UNGAR88: + + Dave Ungar & Frank Jackson. 1988. "`Tenuring Policies for Generation-Based Storage Reclamation `_". SIGPLAN. OOPSLA '88 Conference Proceedings, ACM SIGPLAN Notices, Vol. 23, No. 11, pp. 1--17. + + .. admonition:: Abstract + + One of the most promising automatic storage reclamation + techniques, generation-based storage reclamation, suffers poor + performance if many objects live for a fairly long time and then + die. We have investigated the severity of the problem by + simulating Generation Scavenging automatic storage reclamation + from traces of actual four-hour sessions. There was a wide + variation in the sample runs, with garbage-collection overhead + ranging from insignificant, during interactive runs, to sever, + during a single non-interactive run. All runs demonstrated that + performance could be improved with two techniques: segregating + large bitmaps and strings, and mediating tenuring with demographic + feedback. These two improvements deserve consideration for any + generation-based storage reclamation strategy. + +* .. _VO96: + + Kiem-Phong Vo. 1996. "Vmalloc: A General and Efficient Memory Allocator". Software -- Practice and Experience. 26(3): 357--374 (1996). + + .. admonition:: Abstract + + On C/Unix systems, the malloc interface is standard for dynamic + memory allocation. Despite its popularity, malloc's shortcomings + frequently cause programmers to code around it. The new library + Vmalloc generalizes malloc to give programmers more control over + memory allocation. Vmalloc introduces the idea of organizing + memory into separate regions, each with a discipline to get raw + memory and a method to manage allocation. Applications can write + their own disciplines to manipulate arbitrary type of memory or + just to better organize memory in a region by creating new regions + out of its memory. The provided set of allocation methods include + general purpose allocations, fast special cases and aids for + memory debugging or profiling. A compatible malloc interface + enables current applications to select allocation methods using + environment variables so they can tune for performance or perform + other tasks such as profiling memory usage, generating traces of + allocation calls or debugging memory errors. A performance study + comparing Vmalloc and currently popular malloc implementations + shows that Vmalloc is competitive to the best of these allocators. + Applications can gain further performance improvement by using the + right mixture of regions with different Vmalloc methods. + +* .. _WW76: + + Daniel C. Watson, David S. Wise. 1976. "Tuning Garwick's algorithm for repacking sequential storage". *BIT.* 16, 4 (December 1976): 442--450. + + .. admonition:: Abstract + + Garwick's algorithm, for repacking LIFO lists stored in a + contiguous block of memory, bases the allocation of remaining + space upon both sharing and previous stack growth. A system + whereby the weight applied to each method can be adjusted + according to the current behaviour of the stacks is discussed. + + We also investigate the problem of determining during memory + repacking that the memory is used to saturation and the driving + program should therefore be aborted. The tuning parameters studied + here seem to offer no new grasp on this problem. + +* .. _WLM92: + + Paul R. Wilson, Michael S. Lam, Thomas G. Moher. 1992. "Caching Considerations for Generational Garbage Collection". ACM. L&FP 92. + + .. admonition:: Abstract + + GC systems allocate and reuse memory cyclically; this imposes a + cyclic pattern on memory accesses that has its own distinctive + locality characteristics. The cyclic reuse of memory tends to + defeat caching strategies if the reuse cycle is too large to fit + in fast memory. Generational GCs allow a smaller amount of memory + to be reused more often. This improves VM performance, because the + frequently-reused area stays in main memory. The same principle + can be applied at the level of high-speed cache memories, if the + cache is larger than the youngest generation. Because of the + repeated cycling through a fixed amount of memory, however, + generational GC interacts with cache design in unusual ways, and + modestly set-associative caches can significantly outperform + direct-mapped caches. + + While our measurements do not show very high miss rates for GCed + systems, they indicate that performance problems are likely in + faster next-generation systems, where second-level cache misses + may cost scores of cycles. Software techniques can improve cache + performance of garbage-collected systems, by decreasing the cache + "footprint" of the youngest generation; compiler techniques that + reduce the amount of heap allocation also improve locality. Still, + garbage-collected systems with a high rate of heap allocation + require somewhat more cache capacity and/or main memory bandwidth + than conventional systems. + +* .. _WIL92A: + + Paul R. Wilson, Sheetal V. Kakkad. 1992. "`Pointer Swizzling at Page Fault Time `_". University of Texas at Austin. + + .. admonition:: Abstract + + Pointer swizzling at page fault time is a novel address + translation mechanism that exploits conventional address + translation hardware. It can support huge address spaces + efficiently without long hardware addresses; such large address + spaces are attractive for persistent object stores, distributed + shared memories, and shared address space operating systems. This + swizzling scheme can be used to provide data compatibility across + machines with different word sizes, and even to provide binary + code compatibility across machines with different hardware address + sizes. + + Pointers are translated ("swizzled") from a long format to a + shorter hardware-supported format at page fault time. No extra + hardware is required, and no continual software overhead is + incurred by presence checks of indirection of pointers. This + pagewise technique exploits temporal and spatial locality in much + the same way as normal virtual memory; this gives it many + desirable performance characteristics, especially given the trend + toward larger main memories. It is easy to implement using common + compilers and operating systems. + +* .. _WIL94: + + Paul R. Wilson. 1994. "`Uniprocessor Garbage Collection Techniques `_". University of Texas. + + .. admonition:: Abstract + + We survey basic garbage collection algorithms, and variations such + as incremental and generational collection; we then discuss + low-level implementation considerations and the relationships + between storage management systems, languages, and compilers. + Throughout, we attempt to present a unified view based on abstract + traversal strategies, addressing issues of conservatism, + opportunism, and immediacy of reclamation; we also point out a + variety of implementation details that are likely to have a + significant impact on performance. + +* .. _WIL95: + + Paul R. Wilson, Mark S. Johnstone, Michael Neely, David Boles. 1995. "`Dynamic Storage Allocation: A Survey and Critical Review `_". University of Texas at Austin. + + .. admonition:: Abstract + + Dynamic memory allocation has been a fundamental part of most + computer systems since roughly 1960, and memory allocation is + widely considered to be either a solved problem or an insoluble + one. In this survey, we describe a variety of memory allocator + designs and point out issues relevant to their design and + evaluation. We then chronologically survey most of the literature + on allocators between 1961 and 1995. (Scores of papers are + discussed, in varying detail, and over 150 references are given.) + + We argue that allocator designs have been unduly restricted by an + emphasis on mechanism, rather than policy, while the latter is + more important; higher-level strategic issues are still more + important, but have not been given much attention. + + Most theoretical analyses and empirical allocator evaluations to + date have relied on very strong assumptions of randomness and + independence, but real program behavior exhibits important + regularities that must be exploited if allocators are to perform + well in practice. + +* .. _WISE78: + + David S. Wise. 1978. "`The double buddy system `_". Department of Computer Science at Indiana University. Technical Report 79. + + .. admonition:: Abstract + + A new buddy system is described in which the region of storage + being managed is partitioned into two sub-regions, each managed by + a fairly standard "binary" buddy system. Like the weighted buddy + systems of Shen and Peterson, the block sizes are of sizes 2\ + :superscript:`n+1` or 3·2\ :superscript:`n`, but unlike theirs + there is no extra overhead for typing information or for buddy + calculation, and an allocation which requires splitting an extant + available block only rarely creates a block smaller than the one + being allocated. Such smaller blocks are carved out only when the + boundary between the two subregions floats; the most interesting + property of this system is that the procedures for allocation and + deallocation are designed to keep blocks immediately adjacent to + the subregion boundary free, so that the boundary may be moved + within a range of unused space without disturbing blocks in use. + This option is attained with a minimum of extra computation beyond + that of a binary buddy system, and provides this scheme with a new + approach to the problem of external fragmentation. + +* .. _WISE79: + + David S. Wise. 1979. "`Morris's garbage compaction algorithm restores reference counts `_". TOPLAS. 1, 1 (July 1979): 115--120. + + .. admonition:: Abstract + + The two-pass compaction algorithm of F.L. Morris, which follows + upon the mark phase in a garbage collector, may be modified to + recover reference counts for a hybrid storage management system. + By counting the executions of two loops in that algorithm where + upward and downward references, respectively, are forwarded to the + relocation address of one node, we can initialize a count of + active references and then update it but once. The reference count + may share space with the mark bit in each node, but it may not + share the additional space required in each pointer by Morris's + algorithm, space which remains unused outside the garbage + collector. + +* .. _WISE85: + + David S. Wise. 1985. "`Design for a multiprocessing heap with on-board reference counting `_". Springer-Verlag. In J.-P. Jouannaud (ed.), Functional Programming Languages and Computer Architecture, Lecture Notes in Computer Science 201: 289--304. + + .. admonition:: Abstract + + A project to design a pair of memory chips with a modicum of + intelligence is described. Together, the two allow simple + fabrication of a small memory bank, a heap of binary (LISP-like) + nodes that offers the following features: 64-bit nodes; two + pointer fields per node up to 29 bits each; reference counts + implicitly maintained on writes; 2 bits per node for marking + (uncounted) circular references; 4 bits per node for + conditional-store testing at the memory; provision for + processor-driven, recounting garbage collection. + +* .. _WISE92: + + .. _WISE93: + + David S. Wise. 1993. "`Stop-and-copy and one-bit reference counting `_". *Information Processing Letters.* 46, 5 (July 1993): 243--249. + + .. admonition:: Abstract + + A stop-and-copy garbage collector updates one-bit reference + counting with essentially no extra space and minimal memory cycles + beyond the conventional collection algorithm. Any object that is + uniquely referenced during a collection becomes a candidate for + cheap recovery before the next one, or faster recopying then if it + remains uniquely referenced. Since most objects stay uniquely + referenced, subsequent collections run faster even if none are + recycled between garbage collections. This algorithm extends to + generation scavenging, it admits uncounted references from roots, + and it corrects conservatively stuck counters, that result from + earlier uncertainty whether references were unique. + +* .. _WW95: + + David S. Wise, Joshua Walgenbach. 1996. "`Static and Dynamic Partitioning of Pointers as Links and Threads `_". SIGPLAN. Proc. 1996 ACM SIGPLAN Intl. Conf. on Functional Programming, SIGPLAN Not. 31, 6 (June 1996), pp. 42--49. + + .. admonition:: Abstract + + Identifying some pointers as invisible threads, for the purposes + of storage management, is a generalization from several widely + used programming conventions, like threaded trees. The necessary + invariant is that nodes that are accessible (without threads) emit + threads only to other accessible nodes. Dynamic tagging or static + typing of threads ameliorates storage recycling both in functional + and imperative languages. + + We have seen the distinction between threads and links sharpen + both hardware- and software-supported storage management in + SCHEME, and also in C. Certainly, therefore, implementations of + languages that already have abstract management and concrete + typing, should detect and use this as a new static type. + +* .. _WHHHO94: + + David S. Wise, Brian Heck, Caleb Hess, Willie Hunt, Eric Ost. 1997. "`Uniprocessor Performance of a Reference-Counting Hardware Heap `_". *LISP and Symbolic Computation.* 10, 2 (July 1997), pp. 159--181. + + .. admonition:: Abstract + + A hardware self-managing heap memory (RCM) for languages like + LISP, SMALLTALK, and JAVA has been designed, built, tested and + benchmarked. On every pointer write from the processor, + reference-counting transactions are performed in real time within + this memory, and garbage cells are reused without processor + cycles. A processor allocates new nodes simply by reading from a + distinguished location in its address space. The memory hardware + also incorporates support for off-line, multiprocessing, + mark-sweep garbage collection. + + Performance statistics are presented from a partial implementation + of SCHEME over five different memory models and two garbage + collection strategies, from main memory (no access to RCM) to a + fully operational RCM installed on an external bus. The + performance of the RCM memory is more than competitive with main + memory. + +* .. _WITHINGTON91: + + P. Tucker Withington. 1991. "`How Real is 'Real-Time' Garbage Collection? `_". ACM. OOPSLA/ECOOP '91 Workshop on Garbage Collection in Object-Oriented Systems. + + .. admonition:: Abstract + + A group at Symbolics is developing a Lisp runtime kernel, derived + from its Genera operating system, to support real-time control + applications. The first candidate application has strict + response-time requirements (so strict that it does not permit the + use of paged virtual memory). Traditionally, Lisp's automatic + storage-management mechanism has made it unsuitable to real-time + systems of this nature. A number of garbage collector designs and + implementations exist (including the Genera garbage collector) + that purport to be "real-time", but which actually have only + mitigated the impact of garbage collection sufficiently that it + usually goes unnoticed by humans. Unfortunately, + electro-mechanical systems are not so forgiving. This paper + examines the limitations of existing real-time garbage collectors + and describes the avenues that we are exploring in our work to + develop a CLOS-based garbage collector that can meet the real-time + requirements of real real-time systems. + +* .. _YIP91: + + G. May Yip. 1991. "`Incremental, Generational Mostly-Copying Garbage Collection in Uncooperative Environments `_". Digital Equipment Corporation. + + .. admonition:: Abstract + + The thesis of this project is that incremental collection can be + done feasibly and efficiently in an architecture and compiler + independent manner. The design and implementation of an + incremental, generational mostly-copying garbage collector for C++ + is presented. The collector achieves, simultaneously, real-time + performance (from incremental collection), low total garbage + collection delay (from generational collection), and the ability + to function without hardware and compiler support (from + mostly-copying collection). + + The incremental collector runs on commercially-available + uniprocessors, such as the DECStation 3100, without any special + hardware support. It uses UNIX's user controllable page protection + facility (mprotect) to synchronize between the scanner (of the + collector) and the mutator (of the application program). Its + implementation does not require any modification to the C++ + compiler. The maximum garbage collection pause is well within the + 100-millisecond limit imposed by real-time applications executing + on interactive workstations. Compared to its non-incremental + version, the total execution time of the incremental collector is + not adversely affected. + +* .. _YUASA90: + + Taiichi Yuasa. 1990. "Real-Time Garbage Collection on General-Purpose Machines". Journal of Software and Systems. 11:3 pp. 181--198. + + .. admonition:: Abstract + + An algorithm for real-time garbage collection is presented, proved + correct, and evaluated. This algorithm is intended for + list-processing systems on general-purpose machines, i.e., Von + Neumann style serial computers with a single processor. On these + machines, real-time garbage collection inevitably causes some + overhead on the overall execution of the list-processing system, + because some of the primitive list-processing operations must + check the status of garbage collection. By removing such overhead + from frequently used primitives such as pointer references (e.g., + Lisp car and cdr) and stack manipulations, the presented algorithm + reduces the execution overhead to a great extent. Although the + algorithm does not support compaction of the whole data space, it + efficiently supports partial compaction such as array relocation. + +* .. _ZORN88: + + Benjamin Zorn & Paul Hilfinger. 1988. "`A Memory Allocation Profiler for C and Lisp Programs `_". USENIX. Proceedings for the Summer 1988 USENIX Conference, pp. 223--237. + + .. admonition:: Abstract + + This paper describes inprof, a tool used to study the memory + allocation behavior of programs. mprof records the amount of + memory each function allocates, breaks down allocation information + by type and size, and displays a program's dynamic cal graph so + that functions indirectly responsible for memory allocation are + easy to identify. mprof is a two-phase tool. The monitor phase is + linked into executing programs and records information each time + memory is allocated. The display phase reduces the data generated + by the monitor and displays the information to the user in several + tables. mprof has been implemented for C and Kyoto Common Lisp. + Measurements of these implementations are presented. + +* .. _ZORN89: + + Benjamin Zorn. 1989. "`Comparative Performance Evaluation of Garbage Collection Algorithms `_". Computer Science Division (EECS) of University of California at Berkeley. Technical Report UCB/CSD 89/544 and PhD thesis. + + .. admonition:: Abstract + + This thesis shows that object-level, trace-driven simulation can + facilitate evaluation of language runtime systems and reaches new + conclusions about the relative performance of important garbage + collection algorithms. In particular, I reach the unexpected + conclusion that mark-and-sweep garbage collection, when augmented + with generations, shows comparable CPU performance and much better + reference locality than the more widely used copying algorithms. + In the past, evaluation of garbage collection algorithms has been + limited by the high cost of implementing the algorithms. + Substantially different algorithms have rarely been compared in a + systematic way. + + With the availability of high-performance, low-cost workstations, + trace-driven performance evaluation of these algorithms is now + economical. This thesis describes MARS, a runtime system simulator + that is driven by operations on program objects, and not memory + addresses. MARS has been attached to a commercial Common Lisp + system and eight large Lisp applications are used in the thesis as + test programs. To illustrate the advantages of the object-level + tracing technique used by MARS, this thesis compares the relative + performance of stop-and-copy, incremental, and mark-and-sweep + collection algorithms, all organized with multiple generations. + The comparative evaluation is based on several metrics: CPU + overhead, reference locality, and interactive availability. + + Mark-and-sweep collection shows slightly higher CPU overhead than + stop-and-copy ability (5 percent), but requires significantly less + physical memory to achieve the same page fault rate (30-40 + percent). Incremental collection has very good interactive + availability, but implementing the read barrier on stock hardware + incurs a substantial CPU overhead (30-60 percent). In the future, + I will use MARS to investigate other performance aspects of + sophisticated runtime systems. + +* .. _ZORN90B: + + Benjamin Zorn. 1990. "Comparing Mark-and-sweep and Stop-and-copy Garbage Collection". ACM. Conference on Lisp and Functional Programming, pp. 87--98. + + .. admonition:: Abstract + + Stop-and-copy garbage collection has been preferred to + mark-and-sweep collection in the last decade because its + collection time is proportional to the size of reachable data and + not to the memory size. This paper compares the CPU overhead and + the memory requirements of the two collection algorithms extended + with generations, and finds that mark-and-sweep collection + requires at most a small amount of additional CPU overhead (3-6%) + but requires an average of 20% (and up to 40%) less memory to + achieve the same page fault rate. The comparison is based on + results obtained using trace-driven simulation with large Common + Lisp programs. + +* .. _ZORN90: + + Benjamin Zorn. 1990. "`Barrier Methods for Garbage Collection `_". University of Colorado at Boulder. Technical Report CU-CS-494-90. + + .. admonition:: Abstract + + Garbage collection algorithms have been enhanced in recent years + with two methods: generation-based collection and Baker + incremental copying collection. Generation-based collection + requires special actions during certain store operations to + implement the "write barrier". Incremental collection requires + special actions on certain load operations to implement the "read + barrier". This paper evaluates the performance of different + implementations of the read and write barriers and reaches several + important conclusions. First, the inlining of barrier checks + results in surprisingly low overheads, both for the write barrier + (2%-6%) and the read barrier (< 20%). Contrary to previous + belief, these results suggest that a Baker-style read barrier can + be implemented efficiently without hardware support. Second, the + use of operating system traps to implement garbage collection + methods results in extremely high overheads because the cost of + trap handling is so high. Since this large overhead is completely + unnecessary, operating system memory protection traps should be + reimplemented to be as fast as possible. Finally, the performance + of these approaches on several machine architectures is compared + to show that the results are generally applicable. + +* .. _ZORN91: + + Benjamin Zorn. 1991. "`The Effect of Garbage Collection on Cache Performance `_". University of Colorado at Boulder. Technical Report CU-CS-528-91. + + .. admonition:: Abstract + + Cache performance is an important part of total performance in + modern computer systems. This paper describes the use of + trace-driven simulation to estimate the effect of garbage + collection algorithms on cache performance. Traces from four large + Common Lisp programs have been collected and analyzed with an + all-associativity cache simulator. While previous work has focused + on the effect of garbage collection on page reference locality, + this evaluation unambiguously shows that garbage collection + algorithms can have a profound effect on cache performance as + well. On processors with a direct-mapped cache, a generation + stop-and-copy algorithm exhibits a miss rate up to four times + higher than a comparable generation mark-and-sweep algorithm. + Furthermore, two-way set-associative caches are shown to reduce + the miss rate in stop-and-copy algorithms often by a factor of two + and sometimes by a factor of almost five over direct-mapped + caches. As processor speeds increase, cache performance will play + an increasing role in total performance. These results suggest + that garbage collection algorithms will play an important part in + improving that performance. + +* .. _ZORN92B: + + Benjamin Zorn & Dirk Grunwald. 1992. "`Empirical Measurements of Six Allocation-intensive C Programs `_". ACM, SIGPLAN. SIGPLAN notices, 27(12):71--80. + + .. admonition:: Abstract + + Dynamic memory management is an important part of a large class of + computer programs and high-performance algorithms for dynamic + memory management have been, and will continue to be, of + considerable interest. This paper presents empirical data from a + collection of six allocation-intensive C programs. Extensive + statistics about the allocation behavior of the programs measured, + including the distributions of object sizes, lifetimes, and + interarrival times, are presented. This data is valuable for the + following reasons: first, the data from these programs can be used + to design high-performance algorithms for dynamic memory + management. Second, these programs can be used as a benchmark test + suite for evaluating and comparing the performance of different + dynamic memory management algorithms. Finally, the data presented + gives readers greater insight into the storage allocation patterns + of a broad range of programs. The data presented in this paper is + an abbreviated version of more extensive statistics that are + publicly available on the internet. + +* .. _ZORN92: + + Benjamin Zorn. 1993. "`The Measured Cost of Conservative Garbage Collection `_". Software -- Practice and Experience. 23(7):733--756. + + .. admonition:: Abstract + + Because dynamic memory management is an important part of a large + class of computer programs, high-performance algorithms for + dynamic memory management have been, and will continue to be, of + considerable interest. Experience indicates that for many + programs, dynamic storage allocation is so important that + programmers feel compelled to write and use their own + domain-specific allocators to avoid the overhead of system + libraries. Conservative garbage collection has been suggested as + an important algorithm for dynamic storage management in C + programs. In this paper, I evaluate the costs of different dynamic + storage management algorithms, including domain-specific + allocators; widely-used general-purpose allocators; and a publicly + available conservative garbage collection algorithm. Surprisingly, + I find that programmer enhancements often have little effect on + program performance. I also find that the true cost of + conservative garbage collection is not the CPU overhead, but the + memory system overhead of the algorithm. I conclude that + conservative garbage collection is a promising alternative to + explicit storage management and that the performance of + conservative collection is likely to be improved in the future. C + programmers should now seriously consider using conservative + garbage collection instead of malloc/free in programs they write. + +* .. _ZORN92A: + + Benjamin Zorn & Dirk Grunwald. 1994. "`Evaluating Models of Memory Allocation `_". ACM. Transactions on Modeling and Computer Simulation 4(1):107--131. + + .. admonition:: Abstract + + Because dynamic memory management is an important part of a large + class of computer programs, high-performance algorithms for + dynamic memory management have been, and will continue to be, of + considerable interest. We evaluate and compare models of the + memory allocation behavior in actual programs and investigate how + these models can be used to explore the performance of memory + management algorithms. These models, if accurate enough, provide + an attractive alternative to algorithm evaluation based on + trace-driven simulation using actual traces. We explore a range of + models of increasing complexity including models that have been + used by other researchers. Based on our analysis, we draw three + important conclusions. First, a very simple model, which generates + a uniform distribution around the mean of observed values, is + often quite accurate. Second, two new models we propose show + greater accuracy than those previously described in the + literature. Finally, none of the models investigated appear + adequate for generating an operating system workload. + diff --git a/mps/manual/source/conf.py b/mps/manual/source/conf.py index b3c561d615f..733438fc253 100644 --- a/mps/manual/source/conf.py +++ b/mps/manual/source/conf.py @@ -22,6 +22,34 @@ import sys # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('.')) +# -- Project configuration ----------------------------------------------------- + +# The same set of sources builds the Memory Pool System documentation +# and the Memory Management Reference, depending on whether the MMREF +# environment variable is set. + +if os.environ.get('MMREF'): + project = u'Memory Management Reference' + master_doc = 'index' + html_theme = 'mmref' + version = '4' + release = '4.0' +else: + project = u'Memory Pool System' + master_doc = 'index' + html_theme = 'mps' + html_sidebars = { + '**': ['localtoc.html', 'relations.html', 'links.html', 'contact.html'], + } + with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), + '../../code/version.c')) as f: + for line in f: + m = re.match(r'#define MPS_RELEASE "release/((\d+\.\d+)\.\d+)"', line) + if m: + release, version = m.groups() + break + + # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. @@ -40,24 +68,9 @@ source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' - # General information about the project. -project = u'Memory Pool System' copyright = date.today().strftime(u'%Y, Ravenbrook Limited') -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), - '../../code/version.c')) as f: - for line in f: - m = re.match(r'#define MPS_RELEASE "release/((\d+\.\d+)\.\d+)"', line) - if m: - release, version = m.groups() - break - # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None @@ -98,10 +111,6 @@ pygments_style = 'sphinx' # -- Options for HTML output --------------------------------------------------- -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'mps' - # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. @@ -119,7 +128,7 @@ html_theme_path = ['themes'] # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = 'diagrams/logo.png' +html_logo = 'images/logo.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 @@ -129,7 +138,7 @@ html_logo = 'diagrams/logo.png' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = [] +html_static_path = ['images'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. @@ -139,11 +148,6 @@ html_static_path = [] # typographically correct entities. html_use_smartypants = True -# Custom sidebar templates, maps document names to template names. -html_sidebars = { - '**': ['localtoc.html', 'relations.html', 'links.html', 'contact.html'], -} - # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} diff --git a/mps/manual/source/copyright.rst b/mps/manual/source/copyright.rst index 98b2dea1b42..234ae604da6 100644 --- a/mps/manual/source/copyright.rst +++ b/mps/manual/source/copyright.rst @@ -1,7 +1,7 @@ -.. _license: - .. index:: single: copyright single: license +.. _license: + .. include:: ../../license.txt diff --git a/mps/manual/source/design/index.rst b/mps/manual/source/design/index.rst index 49d5b1cd85f..8ac8bb9e9c2 100644 --- a/mps/manual/source/design/index.rst +++ b/mps/manual/source/design/index.rst @@ -10,13 +10,16 @@ Design cbs config critical-path + failover freelist guide.hex.trans guide.impl.c.format interface-c keyword-arguments + land nailboard range ring sig + splay type diff --git a/mps/manual/source/design/old.rst b/mps/manual/source/design/old.rst index b002b14ef35..2aefac2a40c 100644 --- a/mps/manual/source/design/old.rst +++ b/mps/manual/source/design/old.rst @@ -53,7 +53,6 @@ Old design scan seg shield - splay sso1al strategy telemetry diff --git a/mps/manual/source/extensions/mps/designs.py b/mps/manual/source/extensions/mps/designs.py index 230594e9392..54e910e673b 100644 --- a/mps/manual/source/extensions/mps/designs.py +++ b/mps/manual/source/extensions/mps/designs.py @@ -20,13 +20,13 @@ TYPES = ''' AccessSet Accumulation Addr Align AllocFrame AllocPattern AP Arg Arena Attr Bool BootBlock BT Buffer BufferMode Byte Chain Chunk - Clock Compare Count Epoch FindDelete Format FrameState Fun Globals - Index Land LD Lock Message MessageType MutatorFaultContext Page - Pointer Pool PThreadext Range Rank RankSet Ref RefSet Res - Reservoir Ring Root RootMode RootVar ScanState Seg SegBuf SegPref - SegPrefKind Serial Shift Sig Size Space SplayNode SplayTree - StackContext Thread Trace TraceId TraceSet TraceStartWhy - TraceState ULongest VM Word ZoneSet + Clock Compare Count Epoch FindDelete Format FrameState Fun GenDesc + Globals Index Land LD Lock Message MessageType MutatorFaultContext + Page Pointer Pool PoolGen PThreadext Range Rank RankSet + ReadonlyAddr Ref RefSet Res Reservoir Ring Root RootMode RootVar + ScanState Seg SegBuf SegPref SegPrefKind Serial Shift Sig Size + Space SplayNode SplayTree StackContext Thread Trace TraceId + TraceSet TraceStartWhy TraceState ULongest VM Word ZoneSet ''' @@ -124,6 +124,15 @@ def convert_file(name, source, dest): with open(dest, 'wb') as out: out.write(s.encode('utf-8')) +def newer(src, target): + """Return True if src is newer (that is, modified more recently) than + target, False otherwise. + + """ + return (not os.path.isfile(target) + or os.path.getmtime(target) < os.path.getmtime(src) + or os.path.getmtime(target) < os.path.getmtime(__file__)) + # Mini-make def convert_updated(app): app.info(bold('converting MPS design documents')) @@ -131,11 +140,11 @@ def convert_updated(app): name = os.path.splitext(os.path.basename(design))[0] if name == 'index': continue converted = 'source/design/%s.rst' % name - if (not os.path.isfile(converted) - or os.path.getmtime(converted) < os.path.getmtime(design) - or os.path.getmtime(converted) < os.path.getmtime(__file__)): + if newer(design, converted): app.info('converting design %s' % name) convert_file(name, design, converted) for diagram in glob.iglob('../design/*.svg'): - shutil.copyfile(diagram, 'source/design/%s' % os.path.basename(diagram)) - + target = os.path.join('source/design/', os.path.basename(diagram)) + if newer(diagram, target): + shutil.copyfile(diagram, target) + diff --git a/mps/manual/source/glossary/a.rst b/mps/manual/source/glossary/a.rst index a7ab2de1bda..ec4da5f86d2 100644 --- a/mps/manual/source/glossary/a.rst +++ b/mps/manual/source/glossary/a.rst @@ -103,6 +103,26 @@ Memory Management Glossary: A .. seealso:: :term:`virtual address space`, :term:`physical address space`. + address space layout randomization + + .. aka:: *ASLR*. + + The random placement in :term:`address space` of the + :term:`stack`, data segment, :term:`heap`, and so on, of a + process. + + The purpose of ASLR is to make it harder for an attacker to + exploit buffer overflow bugs, by making it harder to determine + the addresses of data structures. + + .. mps:specific:: + + ASLR also makes it hard to prepare a repeatable test case + for a program that performs computation based on the + addresses of objects, for example, hashing objects by + their address. See :ref:`guide-debug-aslr` for techniques + to deal with this. + address translation cache .. see:: :term:`translation lookaside buffer`. @@ -182,7 +202,11 @@ Memory Management Glossary: A .. mps:specific:: An alignment is represented by the unsigned integral type - :c:type:`mps_align_t`. It must be a positive power of 2. + :c:type:`mps_align_t`. It must be a power of 2. The + alignment of objects allocated in a :term:`pool` may be + specified by passing the :c:macro:`MPS_KEY_ALIGN` + :term:`keyword argument` when calling + :c:func:`mps_pool_create_k`. alive @@ -389,6 +413,10 @@ Memory Management Glossary: A class of :term:`arenas`. Arena classes include :term:`client arenas` and :term:`virtual memory arenas`. + ASLR + + .. see:: :term:`address space layout randomization`. + assertion A declaration in a program of a condition that is expected @@ -452,6 +480,12 @@ Memory Management Glossary: A .. opposite:: :term:`manual memory management`. + .. mps:specific:: + + The MPS provides automatic memory management through + :term:`pool classes` such as :ref:`pool-amc`, + :ref:`pool-ams`, and :ref:`pool-awl`. + automatic storage duration In :term:`C`, :term:`objects` that are declared with diff --git a/mps/manual/source/glossary/c.rst b/mps/manual/source/glossary/c.rst index b2d8f2b1400..2449ce9f82a 100644 --- a/mps/manual/source/glossary/c.rst +++ b/mps/manual/source/glossary/c.rst @@ -131,7 +131,7 @@ Memory Management Glossary: C A cactus stack is a :term:`stack` with branches. When diagrammed, its shape resembles that of a `saguaro cactus - `_. + `_. In languages that support :term:`continuations`, :term:`activation records` can have :term:`indefinite extent`. @@ -615,6 +615,12 @@ Memory Management Glossary: C .. seealso:: :term:`broken heart`, :term:`forwarding pointer`, :term:`two-space collector`. + .. mps:specific:: + + The :ref:`pool-amc` pool class implements copying garbage + collection (more precisely, :term:`mostly-copying garbage + collection`). + core A historical synonym for :term:`main memory`, deriving from diff --git a/mps/manual/source/glossary/f.rst b/mps/manual/source/glossary/f.rst index 540cc048d46..eedbd00a9da 100644 --- a/mps/manual/source/glossary/f.rst +++ b/mps/manual/source/glossary/f.rst @@ -26,6 +26,11 @@ Memory Management Glossary: F .. similar:: :term:`in-band header`. + .. mps:specific:: + + :term:`Debugging pools` use fenceposts. See + :ref:`topic-debugging`. + fencepost error fence post error diff --git a/mps/manual/source/glossary/g.rst b/mps/manual/source/glossary/g.rst index 25feb894906..4dc74e66221 100644 --- a/mps/manual/source/glossary/g.rst +++ b/mps/manual/source/glossary/g.rst @@ -89,7 +89,7 @@ Memory Management Glossary: G This term is often used when referring to particular implementations or algorithms, for example, "the - Boehm-Demers-Weiser *collector*". + Boehm--Demers--Weiser *collector*". GB @@ -132,16 +132,16 @@ Memory Management Glossary: G .. mps:specific:: The :term:`client program` specifies the generational - structure of a :term:`pool` using a :term:`generation - chain`. See :ref:`topic-collection`. + structure of a :term:`pool` (or group of pools) using a + :term:`generation chain`. See :ref:`topic-collection`. generation chain .. mps:specific:: A data structure that specifies the structure of the - :term:`generations` in a :term:`pool`. See - :ref:`topic-collection`. + :term:`generations` in a :term:`pool` (or group of pools). + See :ref:`topic-collection`. generation scavenging @@ -174,6 +174,11 @@ Memory Management Glossary: G .. seealso:: :term:`remembered set`. + .. mps:specific:: + + The :ref:`pool-amc` and :ref:`pool-amcz` pool classes + support generational garbage collection. + generational hypothesis .. aka:: *infant mortality*. diff --git a/mps/manual/source/glossary/i.rst b/mps/manual/source/glossary/i.rst index 9ce2a3f9cea..b174241959c 100644 --- a/mps/manual/source/glossary/i.rst +++ b/mps/manual/source/glossary/i.rst @@ -133,6 +133,12 @@ Memory Management Glossary: I .. bibref:: :ref:`Appel et al. (1988) `, :ref:`Boehm et al. (1991) `. + .. mps:specific:: + + The MPS uses incremental collection, except for + collections started by calling + :c:func:`mps_arena_collect`. + incremental update Incremental-update algorithms for :term:`tracing `, diff --git a/mps/manual/source/glossary/index.rst b/mps/manual/source/glossary/index.rst index bb55975bf97..40656261350 100644 --- a/mps/manual/source/glossary/index.rst +++ b/mps/manual/source/glossary/index.rst @@ -7,6 +7,7 @@ Memory Management Glossary .. toctree:: :maxdepth: 1 + :hidden: a b @@ -31,3 +32,594 @@ Memory Management Glossary v w z + +All +=== + +:term:`absolute address ` +:term:`activation frame ` +:term:`activation record` +:term:`activation stack ` +:term:`active ` +:term:`address` +:term:`address space` +:term:`address space layout randomization` +:term:`address translation cache ` +:term:`address-ordered first fit` +:term:`aging space` +:term:`algebraic data type` +:term:`alignment` +:term:`alive ` +:term:`allocate` +:term:`allocation frame` +:term:`allocation mechanism` +:term:`allocation pattern` +:term:`allocation point` +:term:`allocation point protocol` +:term:`allocation policy` +:term:`allocation strategy` +:term:`allocator` +:term:`ambiguous reference` +:term:`ambiguous root` +:term:`arena` +:term:`arena class` +:term:`ASLR
` +:term:`assertion` +:term:`asynchronous garbage collector` +:term:`ATC ` +:term:`atomic object ` +:term:`automatic memory management` +:term:`automatic storage duration` + +:term:`backing store` +:term:`barrier (1)` +:term:`barrier (2)` +:term:`barrier hit ` +:term:`base pointer` +:term:`best fit` +:term:`BIBOP` +:term:`big bag of pages ` +:term:`binary buddies` +:term:`bit array ` +:term:`bit table ` +:term:`bit vector ` +:term:`bitmap` +:term:`bitmapped fit` +:term:`bitmask` +:term:`bitset ` +:term:`black` +:term:`blacklisting` +:term:`black-listing` +:term:`block` +:term:`bounds error ` +:term:`boxed` +:term:`break-table` +:term:`brk` +:term:`broken heart` +:term:`bucket` +:term:`buddy system` +:term:`buffer` +:term:`bus error` +:term:`byte (1)` +:term:`byte (2)` +:term:`byte (3)` +:term:`byte (4)` + +:term:`C89 ` +:term:`C90` +:term:`C99` +:term:`cache (1)` +:term:`cache (2)` +:term:`cache memory ` +:term:`cache policy` +:term:`caching (3)` +:term:`cactus stack` +:term:`card` +:term:`card marking` +:term:`cell ` +:term:`Cheney collector` +:term:`Cheney scan ` +:term:`clamped state` +:term:`client arena` +:term:`client object` +:term:`client pointer` +:term:`client program ` +:term:`closure` +:term:`coalesce` +:term:`collect` +:term:`collection ` +:term:`collection cycle` +:term:`collector (1) ` +:term:`collector (2)` +:term:`color` +:term:`colour` +:term:`commit limit` +:term:`committed (1) ` +:term:`committed (2)` +:term:`compactifying ` +:term:`compaction` +:term:`composite object` +:term:`comprehensive` +:term:`concurrent garbage collection ` +:term:`condemned set` +:term:`connected` +:term:`cons (1)` +:term:`cons (2) ` +:term:`conservative garbage collection` +:term:`constant root` +:term:`constructor (1)` +:term:`constructor (2)` +:term:`continuation` +:term:`control stack` +:term:`cool` +:term:`copy method` +:term:`copying garbage collection` +:term:`core` +:term:`creation space` +:term:`critical path` +:term:`crossing map` +:term:`cyclic data structure` + +:term:`dangling pointer` +:term:`data stack` +:term:`dead` +:term:`deallocate ` +:term:`debugging pool` +:term:`deferred coalescing` +:term:`deferred reference counting` +:term:`dependent object` +:term:`derived pointer ` +:term:`derived type` +:term:`destructor (1)` +:term:`destructor (2)` +:term:`DGC ` +:term:`direct method` +:term:`dirty bit` +:term:`distributed garbage collection` +:term:`double buddies` +:term:`double free` +:term:`doubleword` +:term:`doubly weak hash table` +:term:`DRAM ` +:term:`dynamic allocation ` +:term:`dynamic extent` +:term:`dynamic memory` +:term:`dynamic RAM ` + +:term:`ecru ` +:term:`edge` +:term:`entry table (1)` +:term:`entry table (2)` +:term:`exact garbage collection` +:term:`exact reference` +:term:`exact root` +:term:`exact segregated fit` +:term:`execution stack ` +:term:`exit table` +:term:`extent ` +:term:`external fragmentation` + +:term:`fencepost` +:term:`fence post` +:term:`fencepost error` +:term:`fence post error` +:term:`Fibonacci buddies` +:term:`FIFO-ordered first fit` +:term:`file mapping ` +:term:`finalization` +:term:`finalized block` +:term:`first fit` +:term:`fix` +:term:`flip` +:term:`floating garbage` +:term:`foreign code` +:term:`format` +:term:`format method` +:term:`formatted object` +:term:`forward method` +:term:`forwarding marker` +:term:`forwarding object` +:term:`forwarding pointer` +:term:`fragmentation` +:term:`frame ` +:term:`free (1)` +:term:`free (2)` +:term:`free (3)` +:term:`free (4) ` +:term:`free block` +:term:`free block chain` +:term:`free list` +:term:`free store ` +:term:`freestore ` +:term:`from space` +:term:`fromspace` +:term:`function pointer` +:term:`function record ` + +:term:`garbage` +:term:`garbage collection` +:term:`garbage collector` +:term:`GB ` +:term:`GC ` +:term:`General Protection Fault` +:term:`generation` +:term:`generation chain` +:term:`generation scavenging ` +:term:`generational garbage collection` +:term:`generational hypothesis` +:term:`gigabyte` +:term:`good fit` +:term:`GPF ` +:term:`grain` +:term:`graph` +:term:`gray` +:term:`grey` +:term:`gray list` +:term:`grey list` + +:term:`handle` +:term:`header ` +:term:`heap` +:term:`heap allocation` +:term:`hit` +:term:`hit rate` +:term:`hot` +:term:`huge page` + +:term:`immediate data` +:term:`immune set` +:term:`immutable` +:term:`immutable object ` +:term:`in-band header` +:term:`in parameter` +:term:`in/out parameter` +:term:`incremental garbage collection` +:term:`incremental update` +:term:`indefinite extent` +:term:`indexed fit` +:term:`indirect method` +:term:`infant mortality ` +:term:`inline allocation (1)` +:term:`inline allocation (2)` +:term:`inter-generational pointer` +:term:`interior pointer` +:term:`internal fragmentation` +:term:`invalid page fault` +:term:`inverted page table` +:term:`inverted page-table` +:term:`is-forwarded method` + +:term:`kB ` +:term:`keyword argument` +:term:`kilobyte` + +:term:`large object area` +:term:`large page ` +:term:`leaf object` +:term:`leak ` +:term:`life ` +:term:`lifetime` +:term:`LIFO-ordered first fit` +:term:`limited-field reference count` +:term:`linear addressing` +:term:`live` +:term:`load` +:term:`locality of reference` +:term:`location ` +:term:`location dependency` +:term:`lock free` +:term:`logical address ` +:term:`longword ` + +:term:`machine word ` +:term:`main memory` +:term:`malloc` +:term:`manual memory management` +:term:`mapped` +:term:`mapping` +:term:`mark-compact` +:term:`mark-sweep` +:term:`mark-and-sweep` +:term:`marking` +:term:`MB ` +:term:`megabyte` +:term:`memoization ` +:term:`memory (1)` +:term:`memory (2)` +:term:`memory (3)
` +:term:`memory (4)` +:term:`memory bandwidth` +:term:`memory cache ` +:term:`memory hierarchy ` +:term:`memory leak` +:term:`memory location` +:term:`memory management` +:term:`Memory Management Unit ` +:term:`memory manager` +:term:`memory mapping` +:term:`memory protection ` +:term:`message` +:term:`message queue` +:term:`message type` +:term:`misaligned ` +:term:`miss` +:term:`miss rate` +:term:`mmap` +:term:`MMU` +:term:`mostly-copying garbage collection` +:term:`mostly-exact garbage collection ` +:term:`mostly-precise garbage collection ` +:term:`moving garbage collector` +:term:`moving memory manager` +:term:`mutable` +:term:`mutator` + +:term:`nailing ` +:term:`natural alignment` +:term:`nepotism` +:term:`next fit` +:term:`new space` +:term:`newspace ` +:term:`node` +:term:`non-moving garbage collector` +:term:`non-moving memory manager` +:term:`nursery generation ` +:term:`nursery space` + +:term:`object` +:term:`object format` +:term:`object pointer` +:term:`off-white` +:term:`old space ` +:term:`oldspace ` +:term:`one-bit reference count` +:term:`opaque type` +:term:`out parameter` +:term:`out-of-band header` +:term:`overcommit` +:term:`overwriting error` + +:term:`padding` +:term:`padding method` +:term:`padding object` +:term:`page` +:term:`page fault` +:term:`page marking` +:term:`page protection ` +:term:`page table` +:term:`paged in` +:term:`paged out` +:term:`paging` +:term:`palimpsest` +:term:`parallel garbage collection` +:term:`parked state` +:term:`perfect fit` +:term:`phantom reachable` +:term:`phantomly reachable` +:term:`phantom reference` +:term:`physical address` +:term:`physical address space` +:term:`physical memory (1)` +:term:`physical memory (2)` +:term:`physical storage ` +:term:`pig in the python` +:term:`pig in the snake ` +:term:`pinning` +:term:`placement policy ` +:term:`platform` +:term:`plinth` +:term:`pointer` +:term:`pool` +:term:`pool class` +:term:`precise garbage collection ` +:term:`precise reference ` +:term:`precise root ` +:term:`premature free` +:term:`premature promotion ` +:term:`premature tenuring` +:term:`primary storage
` +:term:`promotion` +:term:`protectable root` +:term:`protection` +:term:`protection exception ` +:term:`protection fault` +:term:`protection violation ` + +:term:`quadword` + +:term:`RAM` +:term:`random access memory ` +:term:`ramp allocation` +:term:`rank` +:term:`rash` +:term:`raw ` +:term:`reachable` +:term:`read barrier` +:term:`read fault` +:term:`read-only memory ` +:term:`real memory (1)` +:term:`real memory (2) ` +:term:`reclaim` +:term:`recycle` +:term:`reference` +:term:`reference counting` +:term:`reference object` +:term:`region inference` +:term:`register` +:term:`register set partitioning` +:term:`relocation` +:term:`remembered set` +:term:`remote reference` +:term:`replicating garbage collector` +:term:`reserved` +:term:`resident` +:term:`resident set` +:term:`result code` +:term:`resurrection` +:term:`ROM` +:term:`root` +:term:`root description` +:term:`root mode` +:term:`root set` + +:term:`sbrk` +:term:`scalar data type` +:term:`scan` +:term:`scan method` +:term:`scan state` +:term:`scavenging garbage collection ` +:term:`SDRAM` +:term:`segmentation violation` +:term:`segmented addressing` +:term:`segregated allocation cache` +:term:`segregated fit` +:term:`segregated free list` +:term:`segregated free-list` +:term:`semi-conservative garbage collection` +:term:`semi-space` +:term:`semi-space collector ` +:term:`sequential fit` +:term:`sequential store buffer` +:term:`shared memory` +:term:`simple object` +:term:`simple segregated storage` +:term:`size` +:term:`size class` +:term:`skip method` +:term:`smart pointer` +:term:`snap-out` +:term:`snapshot at the beginning` +:term:`soft reference` +:term:`softly reachable` +:term:`space leak ` +:term:`spare commit limit` +:term:`spare committed memory` +:term:`spaghetti stack ` +:term:`splat` +:term:`split` +:term:`SRAM ` +:term:`SSB ` +:term:`stack` +:term:`stack allocation` +:term:`stack frame` +:term:`stack record ` +:term:`static allocation` +:term:`static memory (1)` +:term:`static memory (2)` +:term:`static object` +:term:`static RAM ` +:term:`static storage duration` +:term:`stepper function` +:term:`sticky reference count ` +:term:`stop-and-copy collection` +:term:`storage ` +:term:`storage hierarchy` +:term:`storage level` +:term:`storage management ` +:term:`store (1)` +:term:`store (2) ` +:term:`stretchy vector` +:term:`strict segregated fit` +:term:`strong reference` +:term:`strong root` +:term:`strong tri-color invariant` +:term:`strong tri-colour invariant` +:term:`strong tricolor invariant` +:term:`strong tricolour invariant` +:term:`strongly reachable` +:term:`suballocator` +:term:`subgraph` +:term:`superpage ` +:term:`sure reference ` +:term:`swap space` +:term:`swapped in` +:term:`swapped out` +:term:`swapping` +:term:`sweeping` +:term:`synchronous garbage collector` + +:term:`tabling ` +:term:`tag` +:term:`tagged architecture` +:term:`tagged reference` +:term:`TB (1) ` +:term:`TB (2) ` +:term:`telemetry filter` +:term:`telemetry label` +:term:`telemetry stream` +:term:`tenuring ` +:term:`terabyte` +:term:`termination ` +:term:`thrash` +:term:`thread` +:term:`threatened set ` +:term:`TLB ` +:term:`to space` +:term:`tospace` +:term:`trace` +:term:`tracing garbage collection` +:term:`translation buffer` +:term:`translation lookaside buffer` +:term:`transparent alias` +:term:`transparent type` +:term:`transport` +:term:`transport snap-out ` +:term:`treadmill` +:term:`tri-color invariant` +:term:`tri-colour invariant` +:term:`tricolor invariant` +:term:`tricolour invariant` +:term:`tri-color marking` +:term:`tri-colour marking` +:term:`tricolor marking` +:term:`tricolour marking` +:term:`two-space collector` +:term:`two space collector` +:term:`type-accurate garbage collection ` +:term:`type punning` + +:term:`unaligned` +:term:`unboxed` +:term:`unclamped state` +:term:`undead` +:term:`unmapped` +:term:`unreachable` +:term:`unsure reference ` +:term:`unwrapped` +:term:`use after free ` + +:term:`value object` +:term:`variety` +:term:`vector data type` +:term:`virtual address` +:term:`virtual address space` +:term:`virtual memory` +:term:`virtual memory arena` +:term:`visitor function ` +:term:`VM (1) ` +:term:`VM (2)` + +:term:`weak-key hash table` +:term:`weak-value hash table` +:term:`weak hash table` +:term:`weak reference (1)` +:term:`weak reference (2)` +:term:`weak root` +:term:`weak tri-color invariant` +:term:`weak tri-colour invariant` +:term:`weak tricolor invariant` +:term:`weak tricolour invariant` +:term:`weakly reachable` +:term:`weighted buddies` +:term:`weighted reference counting` +:term:`white` +:term:`word` +:term:`working set` +:term:`worst fit` +:term:`wrapped` +:term:`wrapper` +:term:`write barrier` +:term:`write fault` + +:term:`ZCT ` +:term:`zero count table` diff --git a/mps/manual/source/glossary/l.rst b/mps/manual/source/glossary/l.rst index 5a143e55f69..3e4f0db4270 100644 --- a/mps/manual/source/glossary/l.rst +++ b/mps/manual/source/glossary/l.rst @@ -81,9 +81,14 @@ Memory Management Glossary: L If leaf objects can be identified, a :term:`garbage collector` can make certain optimizations: leaf objects do not have to be :term:`scanned ` for references nor - are :term:`barrier (1)` needed to detect + are :term:`barriers (1)` needed to detect and maintain references in the object. + .. mps:specific:: + + The :ref:`pool-amcz` and :ref:`pool-lo` pool classes are + designed for the storage of leaf objects. + leak .. see:: :term:`memory leak`. diff --git a/mps/manual/source/glossary/m.rst b/mps/manual/source/glossary/m.rst index 4562d30d7b6..db4d8b88a59 100644 --- a/mps/manual/source/glossary/m.rst +++ b/mps/manual/source/glossary/m.rst @@ -535,6 +535,11 @@ Memory Management Glossary: M .. bibref:: :ref:`Bartlett (1989) `, :ref:`Yip (1991) `. + .. mps:specific:: + + The :ref:`pool-amc` pool class implements mostly-copying + garbage collection. + mostly-exact garbage collection .. see:: :term:`semi-conservative garbage collection`. diff --git a/mps/manual/source/glossary/n.rst b/mps/manual/source/glossary/n.rst index 24ddcf66cea..67fbd65a371 100644 --- a/mps/manual/source/glossary/n.rst +++ b/mps/manual/source/glossary/n.rst @@ -117,4 +117,10 @@ Memory Management Glossary: N The size of the nursery space must be chosen carefully. Often it is related to the size of :term:`physical memory (1)`. + .. mps:specific:: + By default, a garbage-collected :term:`pool` allocates + into the first :term:`generation` in its :term:`generation + chain`, but this can be altered by setting the + :c:macro:`MPS_KEY_GEN` :term:`keyword argument` when + calling :c:func:`mps_pool_create_k`. diff --git a/mps/manual/source/glossary/p.rst b/mps/manual/source/glossary/p.rst index fc08d9c2abb..5fdba129d77 100644 --- a/mps/manual/source/glossary/p.rst +++ b/mps/manual/source/glossary/p.rst @@ -167,7 +167,7 @@ Memory Management Glossary: P mutator changing :term:`objects` while collection occurs. The problem is similar to that of :term:`incremental GC `, but harder. The solution - typically involves :term:`barrier (1)`. + typically involves :term:`barriers (1)`. .. similar:: :term:`incremental `. @@ -222,7 +222,7 @@ Memory Management Glossary: P .. link:: - `Class java.lang.ref.PhantomReference `_, `Reference Objects and Garbage Collection `_. + `Class java.lang.ref.PhantomReference `_, `Reference Objects and Garbage Collection `_. phantom reference @@ -239,7 +239,7 @@ Memory Management Glossary: P .. link:: - `Class java.lang.ref.PhantomReference `_, `Reference Objects and Garbage Collection `_. + `Class java.lang.ref.PhantomReference `_, `Reference Objects and Garbage Collection `_. physical address @@ -321,6 +321,14 @@ Memory Management Glossary: P .. seealso:: :term:`generational garbage collection`. + .. mps:specific:: + + A :term:`pool` can be configured to allocate into a + specific :term:`generation` in its :term:`generation + chain` by setting the :c:macro:`MPS_KEY_GEN` + :term:`keyword argument` when calling + :c:func:`mps_pool_create_k`. + pig in the snake .. see:: :term:`pig in the python`. diff --git a/mps/manual/source/glossary/r.rst b/mps/manual/source/glossary/r.rst index a521c1d7bd4..2c8a9ef70ad 100644 --- a/mps/manual/source/glossary/r.rst +++ b/mps/manual/source/glossary/r.rst @@ -93,7 +93,7 @@ Memory Management Glossary: R .. link:: - `Package java.lang.ref `_, `Reference Objects and Garbage Collection `_. + `Package java.lang.ref `_, `Reference Objects and Garbage Collection `_. read barrier @@ -317,7 +317,7 @@ Memory Management Glossary: R .. link:: - `Package java.lang.ref `_, `Reference Objects and Garbage Collection `_. + `Package java.lang.ref `_, `Reference Objects and Garbage Collection `_. .. bibref:: :ref:`Dybvig et al. (1993) `. @@ -471,6 +471,11 @@ Memory Management Glossary: R .. seealso:: :term:`mapping`, :term:`mmap`. + .. mps:specific:: + + The function :c:func:`mps_arena_reserved` returns the + total address space reserved by an arena. + resident In a :term:`cache (2)` system, that part of the cached storage diff --git a/mps/manual/source/glossary/s.rst b/mps/manual/source/glossary/s.rst index 41389cb2103..dc9355f2a0f 100644 --- a/mps/manual/source/glossary/s.rst +++ b/mps/manual/source/glossary/s.rst @@ -333,7 +333,7 @@ Memory Management Glossary: S By overloading certain operators it is possible for the class to present the illusion of being a pointer, so that - ``operator\*``, ``operator-\>``, etc. can be used as normal. + ``operator*``, ``operator->``, etc. can be used as normal. Reference counting allows the objects that are referred to using the smart pointer class to have their :term:`memory (1)` automatically :term:`reclaimed` when they are no longer @@ -429,7 +429,7 @@ Memory Management Glossary: S .. link:: - `Class java.lang.ref.SoftReference `_, `Reference Objects and Garbage Collection `_. + `Class java.lang.ref.SoftReference `_, `Reference Objects and Garbage Collection `_. softly reachable @@ -453,7 +453,7 @@ Memory Management Glossary: S .. link:: - `Class java.lang.ref.SoftReference `_, `Reference Objects and Garbage Collection `_. + `Class java.lang.ref.SoftReference `_, `Reference Objects and Garbage Collection `_. space leak @@ -785,6 +785,26 @@ Memory Management Glossary: S .. see:: :term:`memory (1)`. + stretchy vector + + A :term:`vector ` that may grow or shrink to + accommodate adding or removing elements. Named after the + ```` abstract class in Dylan. + + .. relevance:: + + In the presence of an :term:`asynchronous garbage + collector`, the vector and its size may need to be updated + atomically. + + .. link:: + + `Dylan Reference Manual: Collections `_. + + .. mps:specific:: + + See :ref:`guide-stretchy-vector`. + strict segregated fit A :term:`segregated fit` :term:`allocation mechanism` which @@ -806,7 +826,7 @@ Memory Management Glossary: S collection>`, a strong reference is a :term:`reference` that keeps the :term:`object` it refers to :term:`alive `. - A strong reference is the usual sort of reference; The term is + A strong reference is the usual sort of reference: the term is usually used to draw a contrast with :term:`weak reference (1)`. @@ -819,7 +839,7 @@ Memory Management Glossary: S A strong root is a :term:`root` such that all :term:`references` in it are :term:`strong references`. - A strong root is the usual sort of root. The term is usually + A strong root is the usual sort of root: the term is usually used to draw a contrast with :term:`weak root`. .. opposite:: :term:`weak root`. diff --git a/mps/manual/source/glossary/w.rst b/mps/manual/source/glossary/w.rst index 7d061294ce6..46d8d9edd06 100644 --- a/mps/manual/source/glossary/w.rst +++ b/mps/manual/source/glossary/w.rst @@ -70,7 +70,7 @@ Memory Management Glossary: W .. link:: - `Class java.lang.ref.WeakReference `_, `Reference Objects and Garbage Collection `_. + `Class java.lang.ref.WeakReference `_, `Reference Objects and Garbage Collection `_. weak root @@ -134,7 +134,7 @@ Memory Management Glossary: W .. link:: - `Class java.lang.ref.WeakReference `_, `Reference Objects and Garbage Collection `_. + `Class java.lang.ref.WeakReference `_, `Reference Objects and Garbage Collection `_. weighted buddies diff --git a/mps/manual/source/guide/debug.rst b/mps/manual/source/guide/debug.rst index 678cc398593..060928c2ba6 100644 --- a/mps/manual/source/guide/debug.rst +++ b/mps/manual/source/guide/debug.rst @@ -42,21 +42,20 @@ General debugging advice in production), and can generate profiling output in the form of the :term:`telemetry stream`. -#. .. index:: - single: ASLR - single: address space layout randomization +#. If your program triggers an assertion failure in the MPS, consult + :ref:`topic-error-cause` for suggestions as to the possible cause. - Prepare a reproducible test case if possible. The MPS may be +#. Prepare a reproducible test case if possible. The MPS may be :term:`asynchronous `, but it is deterministic, so in single-threaded applications you should be - able to get consistent results. (But you need to beware of `address - space layout randomization`_: if you perform computation based on - the addresses of objects, for example, hashing objects by their - address, then ASLR will cause your hash tables to be laid out - differently on each run, which may affect the order of memory - management operations.) + able to get consistent results. - .. _address space layout randomization: http://en.wikipedia.org/wiki/Address_space_layout_randomization + However, you need to beware of :term:`address space layout + randomization`: if you perform computation based on the addresses + of objects, for example, hashing objects by their address, then + ASLR will cause your hash tables to be laid out differently on each + run, which may affect the order of memory management operations. + See :ref:`guide-debug-aslr` below. A fact that assists with reproducibility is that the more frequently the collector runs, the sooner and more reliably errors @@ -66,7 +65,10 @@ General debugging advice result, by having a mode for testing in which you run frequent collections (by calling :c:func:`mps_arena_collect` followed by :c:func:`mps_arena_release`), perhaps as frequently as every - allocation. + allocation. (This will of course make the system run very slowly, + but it ensures that if there are roots or references that are not + being scanned then the failure will occur close in time to the cause, + making it easier to diagnose.) #. .. index:: single: debugger @@ -86,10 +88,109 @@ General debugging advice handle SIGSEGV pass nostop noprint - On OS X barrier hits do not use signals and so do not enter the debugger. + On these operating systems, you can add this command to your + ``.gdbinit`` if you always want it to be run. - (On these operating systems, you can add these commands to your - ``.gdbinit`` if you always want them to be run.) + On OS X, barrier hits do not use signals and so do not enter the + debugger. + + +.. index:: + single: ASLR + single: address space layout randomization + +.. _guide-debug-aslr: + +Address space layout randomization +---------------------------------- + +:term:`Address space layout randomization` (ASLR) makes it hard to +prepare a repeatable test case for a program that performs computation +based on the addresses of objects, for example, hashing objects by +their address. If this is affecting you, you'll find it useful to +disable ASLR when testing. + +Here's a small program that you can use to check if ASLR is enabled on +your system. It outputs addresses from four key memory areas in a +program (data segment, text segment, stack and heap): + +.. code-block:: c + + #include + #include + + int data; + + int main() { + void *heap = malloc(4); + int stack = 0; + printf("data: %p text: %p stack: %p heap: %p\n", + &data, (void *)main, &stack, heap); + return 0; + } + +When ASLR is turned on, running this program outputs different +addresses on each run. For example, here are four runs on OS X +10.9.3:: + + data: 0x10a532020 text: 0x10a531ed0 stack: 0x7fff556ceb1c heap: 0x7f9f80c03980 + data: 0x10d781020 text: 0x10d780ed0 stack: 0x7fff5247fb1c heap: 0x7fe498c03980 + data: 0x10164b020 text: 0x10164aed0 stack: 0x7fff5e5b5b1c heap: 0x7fb783c03980 + data: 0x10c7f8020 text: 0x10c7f7ed0 stack: 0x7fff53408b1c heap: 0x7f9740403980 + +By contrast, here are four runs on FreeBSD 8.3:: + + data: 0x8049728 text: 0x8048470 stack: 0xbfbfebfc heap: 0x28201088 + data: 0x8049728 text: 0x8048470 stack: 0xbfbfebfc heap: 0x28201088 + data: 0x8049728 text: 0x8048470 stack: 0xbfbfebfc heap: 0x28201088 + data: 0x8049728 text: 0x8048470 stack: 0xbfbfebfc heap: 0x28201088 + +Here's the situation on each of the operating systems supported by the MPS: + +* **FreeBSD** (as of version 10.0) does not support ASLR, so there's + nothing to do. + +* On **Windows** (Vista or later), ASLR is a property of the + executable, and it can be turned off at link time using the + |DYNAMICBASE|_. + + .. |DYNAMICBASE| replace:: ``/DYNAMICBASE:NO`` linker option + .. _DYNAMICBASE: http://msdn.microsoft.com/en-us/library/bb384887.aspx + +* On **Linux** (kernel version 2.6.12 or later), ASLR can be turned + off for a single process by running |setarch|_ with the ``-R`` + option:: + + -R, --addr-no-randomize + Disables randomization of the virtual address space + + .. |setarch| replace:: ``setarch`` + .. _setarch: http://man7.org/linux/man-pages/man8/setarch.8.html + + For example:: + + $ setarch $(uname -m) -R ./myprogram + +* On **OS X** (10.7 or later), ASLR can be disabled for a single + process by starting the process using :c:func:`posix_spawn`, passing + the undocumented attribute ``0x100``, like this: + + .. code-block:: c + + #include + + pid_t pid; + posix_spawnattr_t attr; + + posix_spawnattr_init(&attr); + posix_spawnattr_setflags(&attr, 0x100); + posix_spawn(&pid, argv[0], NULL, &attr, argv, environ); + + The MPS provides the source code for a command-line tool + implementing this (``tool/noaslr.c``). We've confirmed that this + works on OS X 10.9.3, but since the technique is undocumented, it + may well break in future releases. (If you know of a documented way + to achieve this, please :ref:`contact us `.) .. index:: diff --git a/mps/manual/source/guide/index.rst b/mps/manual/source/guide/index.rst index abd9ef242ca..a7218dd2ba5 100644 --- a/mps/manual/source/guide/index.rst +++ b/mps/manual/source/guide/index.rst @@ -9,6 +9,7 @@ Guide overview build lang + vector debug perf advanced diff --git a/mps/manual/source/guide/lang.rst b/mps/manual/source/guide/lang.rst index 916782424d5..68473613ba2 100644 --- a/mps/manual/source/guide/lang.rst +++ b/mps/manual/source/guide/lang.rst @@ -748,7 +748,7 @@ And third, the global symbol table:: static size_t symtab_size; You tell the MPS how to scan these by writing root scanning functions -of type :c:type:`mps_reg_scan_t`. These functions are similar to the +of type :c:type:`mps_root_scan_t`. These functions are similar to the :ref:`scan method ` in an :term:`object format`, described above. @@ -823,7 +823,7 @@ after the rehash has completed, de-registering the old root by calling :c:func:`mps_root_destroy`. It would be possible to write a root scanning function of type -:c:type:`mps_reg_scan_t`, as described above, to fix the references in +:c:type:`mps_root_scan_t`, as described above, to fix the references in the global symbol table, but the case of a table of references is sufficiently common that the MPS provides a convenient (and optimized) function, :c:func:`mps_root_create_table`, for registering it:: @@ -1142,28 +1142,28 @@ tracking down the causes, appear in the chapter :ref:`guide-debug`. Tidying up ---------- -When your program is done with the MPS, it's good practice to -:term:`park ` the arena (by calling -:c:func:`mps_arena_park`) and then tear down all the MPS data -structures. This causes the MPS to check the consistency of its data -structures and report any problems it detects. It also causes the MPS -to flush its :term:`telemetry stream`. +When your program is done with the MPS, you should :term:`park ` the arena (by calling :c:func:`mps_arena_park`) to ensure that +no incremental garbage collection is in progress, and then tear down +all the MPS data structures. This causes the MPS to check the +consistency of its data structures and report any problems it detects. +It also causes the MPS to flush its :term:`telemetry stream`. MPS data structures must be destroyed or deregistered in the reverse order to that in which they were registered or created. So you must -destroy all :term:`allocation points` created in a -:term:`pool` before destroying the pool; destroy all :term:`roots` and pools, and deregister all :term:`threads`, that -were created in an :term:`arena` before destroying the arena, and so -on. +destroy all :term:`allocation points` created in a :term:`pool` before +destroying the pool; destroy all :term:`roots` and pools, and +deregister all :term:`threads`, that were created in an :term:`arena` +before destroying the arena, and so on. For example:: mps_arena_park(arena); /* ensure no collection is running */ mps_ap_destroy(obj_ap); /* destroy ap before pool */ mps_pool_destroy(obj_pool); /* destroy pool before fmt */ - mps_fmt_destroy(obj_fmt); /* destroy fmt before arena */ - mps_root_destroy(reg_root); /* destroy root before arena */ + mps_root_destroy(reg_root); /* destroy root before thread */ mps_thread_dereg(thread); /* deregister thread before arena */ + mps_fmt_destroy(obj_fmt); /* destroy fmt before arena */ mps_arena_destroy(arena); /* last of all */ diff --git a/mps/manual/source/guide/vector.rst b/mps/manual/source/guide/vector.rst new file mode 100644 index 00000000000..8b9452ac1f6 --- /dev/null +++ b/mps/manual/source/guide/vector.rst @@ -0,0 +1,152 @@ +.. index:: + single: stretchy vectors + single: atomic updates + +.. _guide-stretchy-vector: + +The stretchy vector problem +============================ + +The :ref:`previous chapter ` pointed out that: + + Because the MPS is :term:`asynchronous `, it might be scanning, moving, or collecting, at any + point in time. + +The consequences of this can take a while to sink in, so this chapter +discusses a particular instance that catches people out: the *stretchy +vector* problem (named after the |stretchy-vector|_ abstract class in +Dylan). + +.. |stretchy-vector| replace:: ```` +.. _stretchy-vector: http://opendylan.org/books/drm/Collection_Classes#stretchy-vector + +A *stretchy vector* is a vector that can change length dynamically. +Such a vector is often implemented using two objects: an array, and a +header object that stores the length and a pointer to an array. +Stretching (or shrinking) such a vector involves five steps: + +1. allocate a new array; +2. copy elements from the old array to the new array; +3. clear unused elements in the new array (if stretching); +4. update the pointer to the array in the header; +5. update the length in the header. + +For example: + +.. code-block:: c + + typedef struct vector_s { + type_t type; /* TYPE_VECTOR */ + size_t length; /* number of elements */ + obj_t *array; /* array of elements */ + } vector_s, *vector_t; + + void resize_vector(vector_t vector, size_t new_length) { + obj_t *new_array = realloc(vector->array, new_length * sizeof(obj_t)); + if (new_array == NULL) + error("out of memory in resize_vector"); + if (vector->length < new_length) { + memset(&vector->array[vector->length], 0, + (new_length - vector->length) * sizeof(obj_t)); + } + vector->array = new_array; + vector->length = new_length; + } + +When adapting this code to the MPS, the following problems must be +solved: + +1. During step 2, the new array must be :term:`reachable` from the + roots, and :term:`scannable `. (If it's not reachable, then + it may be collected; if it's not scannable, then references it + contains will not be updated when they are moved by the collector.) + + This can solved by storing the new array in a :term:`root` until + the header has been updated. If the thread's stack has been + registered as a root by calling :c:func:`mps_root_create_reg` then + any local variable will do. + +2. References in the new array must not be scanned until they have been + copied or cleared. (Otherwise they will be invalid.) + + This can be solved by clearing the new array before calling + :c:func:`mps_commit`. + +3. The old array must be scanned at the old length (otherwise the scan + may run off the end of the old array when the vector grows), and + the new array must be scanned at the new length (otherwise the scan + may run off the end of the old array when the vector shrinks). + +4. The array object must be scannable without referring to the header + object. (Because the header object may have been protected by the + MPS: see :ref:`topic-format-cautions`.) + +Problems 3 and 4 can be solved by storing the length in the array. The +revised data structures and resizing code might look like this: + +.. code-block:: c + + typedef struct vector_s { + type_t type; /* TYPE_VECTOR */ + obj_t array; /* TYPE_ARRAY object */ + } vector_s, *vector_t; + + typedef struct array_s { + type_t type; /* TYPE_ARRAY */ + size_t length; /* number of elements */ + obj_t array[0]; /* array of elements */ + } array_s, *array_t; + + void resize_vector(vector_t vector, size_t new_length) { + size_t size = ALIGN_OBJ(offsetof(array_s, array) + new_length * sizeof(obj_t)); + mps_addr_t addr; + array_t array; + + do { + mps_res_t res = mps_reserve(&addr, ap, size); + if (res != MPS_RES_OK) error("out of memory in resize_vector"); + array = addr; + array->type = TYPE_ARRAY; + array->length = new_length; + memset(array->array, 0, new_length * sizeof(obj_t)); + /* Now the new array is scannable, and it is reachable via the + * local variable 'array', so it is safe to commit it. */ + } while(!mps_commit(ap, addr, size)); + + /* Copy elements after committing, so that the collector will + * update them if they move. */ + memcpy(array->array, vector->array->array, + min(vector->array->length, new_length) * sizeof(obj_t)); + vector->array = array; + } + +Similar difficulties can arise even when adapting code written for +other garbage collectors. For example, here's the function +|setarrayvector|_ from Lua_: + +.. |setarrayvector| replace:: ``setarrayvector()`` +.. _setarrayvector: http://www.lua.org/source/5.2/ltable.c.html#setarrayvector +.. _Lua: http://www.lua.org + +.. code-block:: c + + static void setarrayvector (lua_State *L, Table *t, int size) { + int i; + luaM_reallocvector(L, t->array, t->sizearray, size, TValue); + for (i=t->sizearray; iarray[i]); + t->sizearray = size; + } + +Lua's garbage collector is :term:`synchronous `, so it can be assumed that there cannot be a garbage +collection between the assignment to ``t->array`` (resulting from the +expansion of the |luaM_reallocvector|_ macro) and the assignment to +``t->sizearray``, and so the collector will always consistently see +either the old array or the new array, with the correct size. This +assumption will no longer be correct if this code is adapted to the +MPS. + +.. |luaM_reallocvector| replace:: ``luaM_reallocvector()`` +.. _luaM_reallocvector: http://www.lua.org/source/5.2/lmem.h.html#luaM_reallocvector diff --git a/mps/manual/source/diagrams/logo.png b/mps/manual/source/images/logo.png similarity index 100% rename from mps/manual/source/diagrams/logo.png rename to mps/manual/source/images/logo.png diff --git a/mps/manual/source/index.rst b/mps/manual/source/index.rst index 161a13ad3b3..4edc69bbf60 100644 --- a/mps/manual/source/index.rst +++ b/mps/manual/source/index.rst @@ -1,7 +1,3 @@ -.. Memory Pool System documentation master file, created by - sphinx-quickstart on Tue Oct 9 11:21:17 2012. - - Memory Pool System ################## @@ -15,26 +11,26 @@ Memory Pool System design/old -Memory Management Reference -########################### - -.. toctree:: - :maxdepth: 2 - - mmref/index - mmref/bib - mmref/credit - - Appendices ########## .. toctree:: :maxdepth: 1 + bib glossary/index copyright contact release * :ref:`genindex` + + +.. toctree:: + :hidden: + + mmref/index + mmref-index + mmref/faq + mmref-copyright + mmref/credit diff --git a/mps/manual/source/mmref-copyright.rst b/mps/manual/source/mmref-copyright.rst new file mode 100644 index 00000000000..b2e95ecd3a7 --- /dev/null +++ b/mps/manual/source/mmref-copyright.rst @@ -0,0 +1,26 @@ +Copyright +********* + + +Use subject to copyright restrictions +===================================== + +The copyright in The Memory Management Reference is owned by +`Ravenbrook Limited`_. + +.. _Ravenbrook Limited: http://www.ravenbrook.com/ + +Permission to copy part or all of The Memory Management Reference for +personal or classroom use is granted without fee, provided that copies +are not made or distributed for profit or commercial advantage; that +the copyright notice, the title of the publication, and its date +appear; and that notice is given that copying is by permission of +Ravenbrook Limited. To copy otherwise, to republish, to post on +servers, or to redistribute to lists requires prior specific +permission. + + +Warranty disclaimer +=================== + +The Memory Management Reference is provided "as is" without warranty of any kind, express or implied, including, but not limited to, the implied warranties of merchantability, fitness for a particular purpose, and non-infringement. diff --git a/mps/manual/source/mmref-index.rst b/mps/manual/source/mmref-index.rst new file mode 100644 index 00000000000..c645899bb49 --- /dev/null +++ b/mps/manual/source/mmref-index.rst @@ -0,0 +1,56 @@ +Home +**** + +Welcome to the **Memory Management Reference**! This is a resource for programmers and computer scientists interested in :term:`memory management` and :term:`garbage collection`. + + +.. admonition:: :ref:`glossary` + + A glossary of more than 500 memory management terms, from + :term:`absolute address` to :term:`zero count table`. + + .. image:: diagrams/treadmill.svg + :target: glossary_ + + .. _glossary: glossary/index.html#glossary + + +.. admonition:: :ref:`mmref-intro` + + Articles giving a beginner's overview of memory management. + + .. image:: diagrams/address.svg + :target: intro_ + + .. _intro: mmref/index.html#mmref-intro + + +.. admonition:: :ref:`bibliography` + + Books and research papers related to memory management. + + .. image:: diagrams/copying.svg + :target: bib_ + + .. _bib: bib.html#bibliography + + +.. admonition:: :ref:`mmref-faq` + + Frequently asked questions about memory management. + + .. image:: diagrams/snap-out.svg + :target: faq_ + + .. _faq: mmref/faq.html#mmref-faq + +The Memory Management Reference is maintained by `Ravenbrook +Limited`_. We also maintain the `Memory Pool System`_ (an open-source, +thread-safe, :term:`incremental ` +garbage collector), and we are happy to provide advanced memory +management solutions to language and application developers through +our `consulting service`_. + +.. _Ravenbrook Limited: http://www.ravenbrook.com/ +.. _consulting service: http://www.ravenbrook.com/services/mm/ +.. _Memory Pool System: http://www.ravenbrook.com/project/mps/ diff --git a/mps/manual/source/mmref/bib.rst b/mps/manual/source/mmref/bib.rst deleted file mode 100644 index aba18413d6e..00000000000 --- a/mps/manual/source/mmref/bib.rst +++ /dev/null @@ -1,965 +0,0 @@ -.. _bibliography: - -Bibliography -************ - -* .. _AD97: - - Ole Agesen, David L. Detlefs. 1997. "`Finding References in Java Stacks `_". Sun Labs. OOPSLA97 Workshop on Garbage Collection and Memory Management. - - .. abstract: ad97.html - -* .. _ADM98: - - Ole Agesen, David L. Detlefs, J. Eliot B. Moss. 1998. "`Garbage Collection and Local Variable Type-precision and Liveness in Java Virtual Machines `_". ACM. Proceedings of the ACM SIGPLAN '98 conference on Programming language design and implementation, pp. 269--279. - - .. abstract: adm98.html - -* .. _AEL88: - - Andrew Appel, John R. Ellis, Kai Li. 1988. "`Real-time Concurrent Collection on Stock Multiprocessors `_". ACM, SIGPLAN. ACM PLDI 88, SIGPLAN Notices 23, 7 (July 88), pp. 11--20. - - .. abstract: ael88.html - -* .. _APPLE94: - - Apple Computer, Inc. 1994. *Inside Macintosh: Memory*. Addison-Wesley. ISBN 0-201-63240-3. - - .. abstract: apple94.html - -* .. _ATTARDI94: - - Giuseppe Attardi & Tito Flagella. 1994. "`A Customisable Memory Management Framework `_". TR-94-010. - - .. abstract: attardi94.html - -* .. _AFI98: - - Giuseppe Attardi, Tito Flagella, Pietro Iglio. 1998. "`A customisable memory management framework for C++ `_". Software -- Practice and Experience. 28(11), 1143--1183. - - .. abstract: afi98.html - -* .. _AKPY98: - - Alain Azagury, Elliot K. Kolodner, Erez Petrank, Zvi Yehudai. 1998. "`Combining Card Marking with Remembered Sets: How to Save Scanning Time `_". ACM. ISMM'98 pp. 10--19. - - .. abstract: akpy98.html - -* .. _BAKER77: - - Henry G. Baker, Carl Hewitt. 1977. "`The Incremental Garbage Collection of Processes `_". ACM. SIGPLAN Notices 12, 8 (August 1977), pp. 55--59. - - .. abstract: baker77.html - -* .. _BAKER78: - - Henry G. Baker. 1978. "`List Processing in Real Time on a Serial Computer `_". ACM. Communications of the ACM 21, 4 (April 1978), pp. 280--294. - - .. abstract: baker78.html - -* .. _BAKER79: - - Henry G. Baker. 1979. "`Optimizing Allocation and Garbage Collection of Spaces `_". In Winston and Brown, eds. *Artificial Intelligence: An MIT Perspective.* MIT Press. - - .. abstract: baker79.html - -* .. _BAKER91: - - Henry G. Baker. 1991. "`Cache-Conscious Copying Collectors `_". OOPSLA'91/GC'91 Workshop on Garbage Collection. - - .. abstract: baker91.html - -* .. _BAKER92A: - - Henry G. Baker. 1992. "`Lively Linear Lisp -- 'Look Ma, No Garbage!' `_". ACM. SIGPLAN Notices 27, 8 (August 1992), pp. 89--98. - - .. abstract: baker92a.html - -* .. _BAKER92C: - - Henry G. Baker. 1992. "`The Treadmill: Real-Time Garbage Collection Without Motion Sickness `_". ACM. SIGPLAN Notices 27, 3 (March 1992), pp. 66--70. - - .. abstract: baker92c.html - -* .. _BAKER92: - - Henry G. Baker. 1992. "`CONS Should not CONS its Arguments, or, a Lazy Alloc is a Smart Alloc `_". ACM. SIGPLAN Notices 27, 3 (March 1992), 24--34. - - .. abstract: baker92.html - -* .. _BAKER92B: - - Henry G. Baker. 1992. "`NREVERSAL of Fortune -- The Thermodynamics of Garbage Collection `_". Springer-Verlag. LNCS Vol. 637. - - .. abstract: baker92b.html - -* .. _BAKER93: - - Henry G. Baker. 1993. "`'Infant Mortality' and Generational Garbage Collection `_". ACM. SIGPLAN Notices 28, 4 (April 1993), pp. 55--57. - - .. abstract: baker93.html - -* .. _BAKER93A: - - Henry G. Baker. 1993. "`Equal Rights for Functional Objects or, The More Things Change, The More They Are the Same `_". ACM. OOPS Messenger 4, 4 (October 1993), pp. 2--27. - - .. abstract: baker93a.html - -* .. _BAKER94: - - Henry G. Baker. 1994. "`Minimizing Reference Count Updating with Deferred and Anchored Pointers for Functional Data Structures `_". ACM. SIGPLAN Notices 29, 9 (September 1994), pp. 38--43. - - .. abstract: baker94.html - -* .. _BAKER94A: - - Henry G. Baker. 1994. "`Thermodynamics and Garbage Collection `_". ACM. SIGPLAN Notices 29, 4 (April 1994), pp. 58--63. - - .. abstract: baker94a.html - -* .. _BAKER95A: - - Henry G. Baker. 1995. "`'Use-Once' Variables and Linear Objects -- Storage Management, Reflection and Multi-Threading `_". ACM. SIGPLAN Notices 30, 1 (January 1995), pp. 45--52. - - .. abstract: baker95a.html - -* .. _BAKER95: - - Henry G. Baker. 1995. *Memory Management: International Workshop IWMM'95*. Springer-Verlag. ISBN 3-540-60368-9. - - .. abstract: baker95.html - -* .. _BBW97: - - Nick Barnes, Richard Brooksby, David Jones, Gavin Matthews, Pekka P. Pirinen, Nick Dalton, P. Tucker Withington. 1997. "`A Proposal for a Standard Memory Management Interface `_". OOPSLA97 Workshop on Garbage Collection and Memory Management. - -* .. _ZORN93B: - - David A. Barrett, Benjamin Zorn. 1993. "`Using Lifetime Predictors to Improve Memory Allocation Performance `_". ACM. SIGPLAN'93 Conference on Programming Language Design and Implementation, pp. 187--196. - - .. abstract: zorn93b.html - -* .. _BARRETT93: - - David A. Barrett, Benjamin Zorn. 1995. "`Garbage Collection using a Dynamic Threatening Boundary `_". ACM. SIGPLAN'95 Conference on Programming Language Design and Implementation, pp. 301--314. - - .. abstract: barrett93.html - -* .. _BARTLETT88: - - Joel F. Bartlett. 1988. "`Compacting Garbage Collection with Ambiguous Roots `_". Digital Equipment Corporation. - - .. abstract: bartlett88.html - -* .. _BARTLETT89: - - Joel F. Bartlett. 1989. "`Mostly-Copying Garbage Collection Picks Up Generations and C++ `_". Digital Equipment Corporation. - - .. abstract: bartlett89.html - -* .. _BC92: - - Yves Bekkers & Jacques Cohen. 1992. "`Memory Management, International Workshop IWMM 92 `_". Springer-Verlag. LNCS Vol. 637, ISBN 3-540-55940-X. - -* .. _BB99: - - Emery D. Berger, Robert D. Blumofe. 1999. "`Hoard: A Fast, Scalable, and Memory-Efficient Allocator for Shared-Memory Multiprocessors `_". University of Texas at Austin. UTCS TR99-22. - - .. abstract: bb99.html - -* .. _BERGER01: - - Emery D. Berger, Benjamin G. Zorn, Kathryn S. McKinley. 2001. "`Composing high-performance memory allocators `_" ACM SIGPLAN Conference on Programming Language Design and Implementation 2001, pp. 114--124. - -* .. _BW88: - - Hans-J. Boehm, Mark Weiser. 1988. "`Garbage collection in an uncooperative environment `_". Software -- Practice and Experience. 18(9):807--820. - - .. abstract: bw88.html - -* .. _BDS91: - - Hans-J. Boehm, Alan J. Demers, Scott Shenker. 1991. "`Mostly Parallel Garbage Collection `_". Xerox PARC. ACM PLDI 91, SIGPLAN Notices 26, 6 (June 1991), pp. 157--164. - - .. abstract: bds91.html - -* .. _BC92A: - - Hans-J. Boehm, David Chase. 1992. "A Proposal for Garbage-Collector-Safe C Compilation". *Journal of C Language Translation.* vol. 4, 2 (December 1992), pp. 126--141. - -* .. _BOEHM93: - - Hans-J. Boehm. 1993. "`Space Efficient Conservative Garbage Collection `_". ACM, SIGPLAN. Proceedings of the ACM SIGPLAN '91 Conference on Programming Language Design and Implementation, SIGPLAN Notices 28, 6, pp 197--206. - - .. abstract: boehm93.html - -* .. _BOEHM00: - - Hans-J. Boehm. 2000. "`Reducing Garbage Collector Cache Misses `_". ACM. ISMM'00 pp. 59--64. - - .. abstract: boehm00.html - -* .. _BOEHM02: - - Hans-J. Boehm. 2002. "`Destructors, Finalizers, and Synchronization `_". HP Labs technical report HPL-2002-335. - -* .. _BM77: - - Robert S. Boyer and J. Strother Moore. 1977. "`A Fast String Searching Algorithm `_". *Communications of the ACM* 20(10):762--772. - -* .. _BL72: - - P. Branquart, J. Lewi. 1972. "A scheme of storage allocation and garbage collection for ALGOL 68". Elsevier/North-Holland. ALGOL 68 Implementation -- Proceedings of the IFIP Working Conference on ALGOL 68 Implementation, July 1970. - -* .. _BROOKSBY02: - - Richard Brooksby. 2002. "`The Memory Pool System: Thirty person-years of memory management development goes Open Source `_". ISMM'02. - -* .. _C1990: - - International Standard ISO/IEC 9899:1990. "Programming languages — C". - -* .. _C1999: - - International Standard ISO/IEC 9899:1999. "`Programming languages — C `_". - -* .. _CGZ94: - - Brad Calder, Dirk Grunwald, Benjamin Zorn. 1994. "`Quantifying Behavioral Differences Between C and C++ Programs `_". *Journal of Programming Languages.* 2(4):313--351. - - .. abstract: cgz94.html - -* .. _CPC00: - - Dante J. Cannarozzi, Michael P. Plezbert, Ron K. Cytron. 2000. "`Contaminated garbage collection `_". ACM. Proceedings of the ACM SIGPLAN '00 conference on on Programming language design and implementation, pp. 264--273. - -* .. _CW86: - - Patrick J. Caudill, Allen Wirfs-Brock. 1986. "A Third-Generation Smalltalk-80 Implementation". ACM. SIGPLAN Notices. 21(11), OOPSLA'86 ACM Conference on Object-Oriented Systems, Languages and Applications. - -* .. _CHENEY70: - - C. J. Cheney. 1970. "A non-recursive list compacting algorithm". CACM. 13-11 pp. 677--678. - -* .. _CHL98: - - Perry Cheng, Robert Harper, Peter Lee. 1998. "`Generational stack collection and profile-driven pretenuring `_". ACM. Proceedings of SIGPLAN'98 Conference on Programming Language Design and Implementation, pp. 162--173. - -* .. _CL98: - - Trishul M. Chilimbi, James R. Larus. 1998. "`Using Generational Garbage Collection To Implement Cache-Conscious Data Placement `_". ACM. ISMM'98 pp. 37--48. - - .. abstract: cl98.html - -* .. _CH97: - - William D Clinger & Lars T Hansen. 1997. "`Generational Garbage Collection and the Radioactive Decay Model `_". ACM. Proceedings of PLDI 1997. - - .. abstract: ch97.html - -* .. _COHEN81: - - Jacques Cohen. 1981. "Garbage collection of linked data structures". Computing Surveys. Vol. 13, no. 3. - - .. abstract: cohen81.html - -* .. _CCZ98: - - Dominique Colnet, Philippe Coucaud, Olivier Zendra. 1998. "`Compiler Support to Customize the Mark and Sweep Algorithm `_". ACM. ISMM'98 pp. 154--165. - - .. abstract: ccz98.html - -* .. _CWZ93: - - Jonathan E. Cook, Alexander L. Wolf, Benjamin Zorn. 1994. "`Partition Selection Policies in Object Database Garbage Collection `_". ACM. SIGMOD. International Conference on the Management of Data (SIGMOD'94), pp. 371--382. - - .. abstract: cwz93.html - -* .. _CKWZ96: - - Jonathan E. Cook, Artur Klauser, Alexander L. Wolf, Benjamin Zorn. 1996. "`Semi-automatic, Self-adaptive Control of Garbage Collection Rates in Object Databases `_". ACM, SIGMOD. International Conference on the Management of Data (SIGMOD'96), pp. 377--388. - -* .. _CNS92: - - Eric Cooper, Scott Nettles, Indira Subramanian. 1992. "Improving the Performance of SML Garbage Collection using Application-Specific Virtual Memory Management". ACM Conference on LISP and Functional Programming, pp. 43--52. - - .. abstract: cns92.html - -* .. _DACONTA93: - - Michael C. Daconta. 1993. *C Pointers and Dynamic Memory Management.* Wiley. ISBN 0-471-56152-5. - -* .. _DACONTA95: - - Michael C. Daconta. 1995. *C++ Pointers and Dynamic Memory Management.* Wiley. ISBN 0-471-04998-0. - - .. abstract: daconta95.html - -* .. _DAHL63: - - O.-J. Dahl. 1963. "The SIMULA Storage Allocation Scheme". Norsk Regnesentral. NCC Document no. 162. - -* .. _DENNING68: - - P. J. Denning. 1968. "Thrashing: Its Causes and Prevention". Proceedings AFIPS,1968 Fall Joint Computer Conference, vol. 33, pp. 915--922. - -* .. _DENNING70: - - P. J. Denning. 1970. "`Virtual Memory `_". ACM. ACM Computing Surveys, vol. 2, no. 3, pp. 153--190, Sept. 1970. - -* .. _DS72: - - P. J. Denning, S. C. Schwartz. 1972. "`Properties of the Working-set Model `_". CACM. vol. 15, no. 3, pp. 191--198. - -* .. _DETLEFS92: - - David L. Detlefs. 1992. "`Garbage collection and runtime typing as a C++ library `_". USENIX C++ Conference. - -* .. _ZORN93: - - David L. Detlefs, Al Dosser, Benjamin Zorn. 1994. "`Memory Allocation Costs in Large C and C++ Programs `_". Software -- Practice and Experience. 24(6):527--542. - - .. abstract: zorn93.html - -* .. _DB76: - - L. Peter Deutsch, Daniel G. Bobrow. 1976. "`An Efficient, Incremental, Automatic Garbage Collector `_". CACM. vol. 19, no. 9, pp. 522--526. - -* .. _DLMSS76: - - E. W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, E. F. M. Steffens. 1976. "`On-the-fly Garbage Collection: An Exercise in Cooperation `_". Springer-Verlag. Lecture Notes in Computer Science, Vol. 46. - -* .. _DMH92: - - Amer Diwan, Richard L. Hudson, J. Eliot B. Moss. 1992. "`Compiler Support for Garbage Collection in a Statically Typed Language `_". ACM. Proceedings of the 5th ACM SIGPLAN conference on Programming language design and implementation, pp. 273--282. - - .. abstract: dmh92.html - -* .. _DTM93: - - Amer Diwan, David Tarditi, J. Eliot B. Moss. 1993. "`Memory Subsystem Performance of Programs with Intensive Heap Allocation `_". Carnegie Mellon University. CMU-CS-93-227. - - .. abstract: dtm93.html - -* .. _DTM93A: - - Amer Diwan, David Tarditi, J. Eliot B. Moss. 1994. "`Memory Subsystem Performance of Programs Using Copying Garbage Collection `_". ACM. CMU-CS-93-210, also in POPL '94. - - .. abstract: dtm93a.html - -* .. _DOLIGEZ93: - - Damien Doligez & Xavier Leroy. 1993. "`A concurrent, generational garbage collector for a multithreaded implementation of ML `_". ACM. POPL '93, 113--123. - - .. abstract: doligez93.html - -* .. _DOLIGEZ94: - - Damien Doligez & Georges Gonthier. 1994. "`Portable, unobtrusive garbage collection for multiprocessor systems `_". ACM. POPL '94, 70--83. - - .. abstract: doligez94.html - -* .. _DBE93: - - R. Kent Dybvig, Carl Bruggeman, David Eby. 1993. "`Guardians in a Generation-Based Garbage Collector `_". SIGPLAN. Proceedings of the ACM SIGPLAN '93 Conference on Programming Language Design and Implementation, June 1993. - - .. abstract: dbe93.html - -* .. _EDELSON92A: - - Daniel R. Edelson. 1992. "`Smart pointers: They're smart, but they're not pointers `_". USENIX C++ Conference. - -* .. _EDELSON92: - - Daniel R. Edelson. 1992. "Comparing Two Garbage Collectors for C++". University of California at Santa Cruz. Technical Report UCSC-CRL-93-20. - -* .. _EDWARDS: - - Daniel J. Edwards. n.d. "`Lisp II Garbage Collector `_". MIT. AI Memo 19 (AIM-19). - - .. abstract: edwards.html - -* .. _ELLIS93: - - John R. Ellis, David L. Detlefs. 1993. "`Safe, Efficient Garbage Collection for C++ `_". Xerox PARC. - - .. abstract: ellis93.html - -* .. _FERREIRA96: - - Paulo Ferreira. 1996. "`Larchant: garbage collection in a cached distributed shared store with persistence by reachability `_". Université Paris VI. Thése de doctorat. - - .. abstract: ferreira96.html - -* .. _FS98: - - Paulo Ferreira & Marc Shapiro. 1998. "`Modelling a Distributed Cached Store for Garbage Collection `_". Springer-Verlag. Proceedings of 12th European Conference on Object-Oriented Programming, ECOOP98, LNCS 1445. - -* .. _FW76: - - Daniel P Friedman, David S. Wise. 1976. "`Garbage collecting a heap which includes a scatter table `_". *Information Processing Letters.* 5, 6 (December 1976): 161--164. - -* .. _FW77: - - Daniel P Friedman, David S. Wise. 1977. "`The One Bit Reference Count `_". *BIT.* (17)3: 351--359. - - .. abstract: fw77.html - -* .. _FW79: - - Daniel P Friedman, David S. Wise. 1979. "`Reference counting can manage the circular environments of mutual recursion `_". *Information Processing Letters.* 8, 1 (January 1979): 41--45. - -* .. _GZH93: - - Dirk Grunwald, Benjamin Zorn, R. Henderson. 1993. "`Improving the Cache Locality of Memory Allocation `_". SIGPLAN. SIGPLAN '93, Conference on PLDI, June 1993, Albuquerque, New Mexico. - - .. abstract: gzh93.html - -* .. _GRUN92: - - Dirk Grunwald & Benjamin Zorn. 1993. "`CustoMalloc: Efficient Synthesized Memory Allocators `_". Software -- Practice and Experience. 23(8):851--869. - - .. abstract: grun92.html - -* .. _GUDEMAN93: - - David Gudeman. 1993. "`Representing Type Information in Dynamically Typed Languages `_". University of Arizona at Tucson. Technical Report TR 93-27. - - .. abstract: gudeman93.html - -* .. _HARRIS99: - - Timothy Harris. 1999. "`Early storage reclamation in a tracing garbage collector `_". ACM. ACM SIG-PLAN Notices 34:4, pp. 46--53. - - .. abstract: harris99.html - -* .. _HENRIK94: - - Roger Henriksson. 1994. "Scheduling Real Time Garbage Collection". Department of Computer Science at Lund University. LU-CS-TR:94-129. - - .. abstract: henrik94.html - -* .. _HENRIK96: - - Roger Henriksson. 1996. "`Adaptive Scheduling of Incremental Copying Garbage Collection for Interactive Applications `_". NWPER96. - - .. abstract: henrik96.html - -* .. _HENRIKSSON98: - - Roger Henriksson. 1998. "`Scheduling Garbage Collection in Embedded Systems `_". Department of Computer Science at Lund University. Ph.D. thesis. - - .. abstract: henriksson98.html - -* .. _HOSKING91: - - Antony L. Hosking. 1991. "`Main memory management for persistence `_". ACM. Proceedings of the ACM OOPSLA'91 Workshop on Garbage Collection. - -* .. _HMS92: - - Antony L. Hosking, J. Eliot B. Moss, Darko Stefanovic. 1992. "`A comparative performance evaluation of write barrier implementations `_". ACM. OOPSLA'92 Conference Proceedings, ACM SIGPLAN Notices 27(10), pp 92--109. - -* .. _HH93: - - Antony L. Hosking, Richard L. Hudson. 1993. "`Remembered sets can also play cards `_". ACM. Proceedings of the ACM OOPSLA'93 Workshop on Memory Management and Garbage Collection. - -* .. _HM93: - - Antony L. Hosking, J. Eliot B. Moss. 1993. "`Protection traps and alternatives for memory management of an object-oriented language `_". ACM. Proceedings of the Fourteenth ACM Symposium on Operating Systems Principles, ACM Operating Systems Review 27(5), pp 106--119. - -* .. _HMDW91: - - Richard L. Hudson, J. Eliot B. Moss, Amer Diwan, Christopher F. Weight. 1991. "`A Language-Independent Garbage Collector Toolkit `_". University of Massachusetts at Amherst. COINS Technical Report 91--47. - - .. abstract: hmdw91.html - -* .. _HM92: - - Richard L. Hudson, J. Eliot B. Moss. 1992. "`Incremental Collection of Mature Objects `_". Springer-Verlag. LNCS #637 International Workshop on Memory Management, St. Malo, France, Sept. 1992, pp. 388--403. - - .. abstract: hm92.html - -* .. _HMMM97: - - Richard L. Hudson, Ron Morrison, J. Eliot B. Moss, David S. Munro. 1997. "`Garbage Collecting the World: One Car at a Time `_". ACM. Proc. OOPSLA 97, pp. 162--175. - - .. abstract: hmmm97.html - -* .. _ISO90: - - "International Standard ISO/IEC 9899:1990 Programming languages — C". - -* .. _JOHNSTONE97: - - Mark S. Johnstone. 1997. "`Non-Compacting Memory Allocation and Real-Time Garbage Collection `_". University of Texas at Austin. - - .. abstract: johnstone97.html - -* .. _JW98: - - Mark S. Johnstone, Paul R. Wilson. 1998. "`The Memory Fragmentation Problem: Solved? `_". ACM. ISMM'98 pp. 26--36. - - .. abstract: jw98.html - -* .. _JONES92: - - Richard E. Jones. 1992. "`Tail recursion without space leaks `_". *Journal of Functional Programming.* 2(1):73--79. - -* .. _JL92: - - Richard E. Jones, Rafael Lins. 1992. "`Cyclic weighted reference counting without delay `_". Computing Laboratory, The University of Kent at Canterbury. Technical Report 28-92. - - .. abstract: jl92.html - -* .. _JONES96: - - Richard E. Jones, Rafael Lins. 1996. "`Garbage Collection: Algorithms for Automatic Dynamic Memory Management `_". Wiley. ISBN 0-471-94148-4. - - .. abstract: jones96.html - -* .. _ACM98: - - Richard E. Jones. 1998. "`ISMM'98 International Symposium on Memory Management `_". ACM. ISBN 1-58113-114-3. - - .. abstract: acm98.html - -* .. _JONES12: - - Richard E. Jones, Antony Hosking, and Eliot Moss. 2012. "`The Garbage Collection Handbook `_". Chapman & Hall. - -* .. _JOYNER96: - - Ian Joyner. 1996. "`C++??: A Critique of C++ `_.". - -* .. _KANEFSKY89: - - Bob Kanefsky. 1989. "`Recursive Memory Allocation `_". Bob Kanefsky. Songworm 3, p.?. - -* .. _KQH98: - - Jin-Soo Kim, Xiaohan Qin, Yarsun Hsu. 1998. "`Memory Characterization of a Parallel Data Mining Workload `_". IEEE. Proc. Workload Characterization: Methodology and Case Studies, pp. . - - .. abstract: kqh98.html - -* .. _KH00: - - Jin-Soo Kim & Yarsun Hsu. 2000. "Memory system behavior of Java programs: methodology and analysis". ACM. Proc. International conference on measurements and modeling of computer systems, pp. 264--274. - -* .. _KOLODNER92: - - Elliot K. Kolodner. 1992. "Atomic Incremental Garbage Collection and Recovery for a Large Stable Heap". Laboratory for Computer Science at MIT. MIT-LCS-TR-534. - - .. abstract: kolodner92.html - -* .. _LK98: - - Per-Åke Larson & Murali Krishnan. 1998. "`Memory Allocation for Long-Running Server Applications `_". ACM. ISMM'98 pp. 176--185. - - .. abstract: lk98.html - -* .. _LH83: - - Henry Lieberman & Carl Hewitt. 1983. "`A real-time garbage collector based on the lifetimes of objects `_". ACM. 26(6):419--429. - -* .. _MM59: - - J. McCarthy, M. L. Minsky. 1959. "Artificial Intelligence, Quarterly Progress Report no. 53". Research Laboratory of Electronics at MIT. - -* .. _MCCARTHY60: - - J. McCarthy. 1960. "`Recursive Functions of Symbolic Expressions and Their Computation by Machine `_". CACM. - - .. abstract: mccarthy60.html - -* .. _MCCARTHY79: - - John McCarthy. 1979. "`History of Lisp `_". In *History of programming languages I*, pp. 173–185. ACM. - -* .. _PTM98: - - Veljko Milutinovic, Jelica Protic, Milo Tomasevic. 1997. "`Distributed shared memory: concepts and systems `_". IEEE Computer Society Press. ISBN 0-8186-7737-6. - - .. abstract: ptm98.html - -* .. _MINSKY63: - - M. L. Minsky. 1963. "A LISP Garbage Collector Algorithm Using Serial Secondary Storage". MIT. Memorandum MAC-M-129, Artificial Intelligence Project, Memo 58 (revised). - -* .. _MOON84: - - David Moon. 1984. "`Garbage Collection in a Large Lisp System `_". ACM. Symposium on Lisp and Functional Programming, August 1984. - -* .. _MOON85: - - David Moon. 1985. "Architecture of the Symbolics 3600". IEEE. 12th International Symposium on Computer Architecture, pp. 76--83. - -* .. _MOON87: - - David Moon. 1990. "Symbolics Architecture". Wiley. Chapter 3 of *Computers for Artificial Intelligence Processing*, ISBN 0-471-84811-5. - -* .. _MOON91: - - David Moon. 1991. "Genera Retrospective". IEEE. 1991 International Workshop on Object Orientation in Operating Systems, order #2265. - -* .. _MORDEC84: - - Ben-Ari Mordechai. 1984. "Algorithms for On-the-fly Garbage Collection". *TOPLAS* 6(3): 333--344 (1984). - -* .. _MOREAU98: - - Luc Moreau. 1998. "`Hierarchical Distributed Reference Counting `_". ACM. ISMM'98 pp. 57--67. - -* .. _MFH95: - - Greg Morrisett, Matthias Felleisen, Robert Harper. 1995. "`Abstract Models of Memory Management `_". Carnegie Mellon University. CMU-CS-FOX-95-01. - - .. abstract: mfh95.html - -* .. _MBMM99: - - David S. Munro, Alfred Brown, Ron Morrison, J. Eliot B. Moss. 1999. "`Incremental Garbage Collection of a Persistent Object Store using PMOS `_". Morgan Kaufmann. in Advances in Persistent Object Systems, pp. 78--91. - - .. abstract: mbmm99.html - -* .. _NOPH92: - - Scott Nettles, James O'Toole, David Pierce, Nickolas Haines. 1992. "`Replication-Based Incremental Copying Collection `_". IWMM'92. - - .. abstract: noph92.html - -* .. _NETTLES92: - - Scott Nettles. 1992. "`A Larch Specification of Copying Garbage Collection `_". Carnegie Mellon University. CMU-CS-92-219. - - .. abstract: nettles92.html - -* .. _NO93A: - - Scott Nettles & James O'Toole. 1993. "Implementing Orthogonal Persistence: A Simple Optimization Using Replicating Collection". USENIX. IWOOOS'93. - - .. abstract: no93a.html - -* .. _NO93: - - Scott Nettles & James O'Toole. 1993. "`Real-Time Replication Garbage Collection `_". ACM. PLDI'93. - - .. abstract: no93.html - -* .. _NIELSEN77: - - Norman R. Nielsen. 1977. "Dynamic Memory Allocation in Computer Simulation". ACM. CACM 20:11. - - .. abstract: nielsen77.html - -* .. _OTOOLE90: - - James O'Toole. 1990. "Garbage Collecting Locally". - - .. abstract: otoole90.html - -* .. _ON94: - - James O'Toole & Scott Nettles. 1994. "`Concurrent Replicating Garbage Collection `_". ACM. LFP'94. - - .. abstract: on94.html - -* .. _JRR99: - - Simon Peyton Jones, Norman Ramsey, Fermin Reig. 1999. "`C--: a portable assembly language that supports garbage collection `_". Springer-Verlag. International Conference on Principles and Practice of Declarative Programming 1999, LNCS 1702, pp. 1--28. - - .. abstract: jrr99.html - -* .. _PIEPER93: - - John S. Pieper. 1993. "Compiler Techniques for Managing Data Motion". Carnegie Mellon University. Technical report number CMU-CS-93-217. - - .. abstract: pieper93.html - -* .. _PIRINEN98: - - Pekka P. Pirinen. 1998. "Barrier techniques for incremental tracing". ACM. ISMM'98 pp. 20--25. - - .. abstract: pirinen98.html - -* .. _PRINTEZIS96: - - Tony Printezis. 1996. "Disk Garbage Collection Strategies for Persistent Java". Proceedings of the First International Workshop on Persistence and Java. - - .. abstract: printezis96.html - -* .. _PC96: - - Tony Printezis & Quentin Cutts. 1996. "Measuring the Allocation Rate of Napier88". Department of Computing Science at University of Glasgow. TR ?. - -* .. _REINHOLD93: - - M. B. Reinhold. 1993. "`Cache Performance of Garbage Collected Programming Languages `_". Laboratory for Computer Science at MIT. MIT/LCS/TR-581. - - .. abstract: reinhold93.html - -* .. _ROBSON77: - - J. M. Robson. 1977. "Worst case fragmentation of first fit and best fit storage allocation strategies". ACM. ACM Computer Journal, 20(3):242--244. - -* .. _RR97: - - Gustavo Rodriguez-Rivera & Vince Russo. 1997. "Non-intrusive Cloning Garbage Collection with Stock Operating System Support". Software -- Practice and Experience. 27:8. - - .. abstract: rr97.html - -* .. _ROJEMO95: - - Niklas Röjemo. 1995. "Highlights from nhc -- a space-efficient Haskell compiler". Chalmers University of Technology. - - .. abstract: rojemo95.html - -* .. _ROJEMO95A: - - Niklas Röjemo. 1995. "Generational garbage collection for lazy functional languages without temporary space leaks". Chalmers University of Technology. - -* .. _RR96: - - Niklas Röjemo & Colin Runciman. 1996. "Lag, drag, void and use -- heap profiling and space-efficient compilation revisited". ACM, SIGPLAN. ICFP'96, ACM SIGPLAN Notices 31:6, ISBN 0-89791-770-7, pp. 34--41. - - .. abstract: rr96.html - -* .. _RW99: - - David J. Roth, David S. Wise. 1999. "`One-bit counts between unique and sticky `_". ACM. ISMM'98, pp. 49--56. - - .. abstract: rw99.html - -* .. _ROVNER85: - - Paul Rovner. 1985. "`On Adding Garbage Collection and Runtime Types to a Strongly-Typed, Statically-Checked, Concurrent Language `_". Xerox PARC. TR CSL-84-7. - -* .. _RUNCIMAN92: - - Colin Runciman & David Wakeling. 1992. "`Heap Profiling of Lazy Functional Programs `_". University of York. - - .. abstract: runciman92.html - -* .. _RR94: - - Colin Runciman & Niklas Röjemo. 1994. "`New dimensions in heap profiling `_". University of York. - - .. abstract: rr94.html - -* .. _RR96A: - - Colin Runciman & Niklas Röjemo. 1996. "Two-pass heap profiling: a matter of life and death". Department of Computer Science, University of York. - -* .. _SG95: - - Jacob Seligmann & Steffen Grarup. 1995. "`Incremental Mature Garbage Collection Using the Train Algorithm `_". Springer-Verlag. ECOOP'95, Lecture Notes in Computer Science, Vol. 952, pp. 235--252, ISBN 3-540-60160-0. - - .. abstract: sg95.html - -* .. _SB00: - - Manuel Serrano, Hans-J. Boehm. 2000. "`Understanding memory allocation of Scheme programs `_". ACM. Proceedings of International Conference on Functional Programming 2000. - -* .. _SHAPIRO94: - - Marc Shapiro & Paulo Ferreira. 1994. "`Larchant-RDOSS: a distributed shared persistent memory and its garbage collector `_". INRIA. INRIA Rapport de Recherche no. 2399; Cornell Computer Science TR94-1466. - - .. abstract: shapiro94.html - -* .. _SHAW87: - - Robert A. Shaw. 1987. "Improving Garbage Collector Performance in Virtual Memory". Stanford University. CSL-TR-87-323. - -* .. _SHAW88: - - Robert A. Shaw. 1988. "Empirical Analysis of a LISP System". Stanford University. CSL-TR-88-351. - -* .. _SINGHAL92: - - Vivek Singhal, Sheetal V. Kakkad, Paul R. Wilson. 1992. "`Texas: An Efficient, Portable Persistent Store `_". University of Texas at Austin. - - .. abstract: singhal92.html - -* .. _SOBALVARRO88: - - P. G. Sobalvarro. 1988. "`A Lifetime-based Garbage Collector for LISP Systems on General-Purpose Computers `_". MIT. AITR-1417. - - .. abstract: sobalvarro88.html - -* .. _STEELE75: - - Guy L. Steele. 1975. "`Multiprocessing Compactifying Garbage Collection `_". CACM. 18:9 pp. 495--508. - -* .. _STEELE76: - - Guy L. Steele. 1976. "Corrigendum: Multiprocessing Compactifying Garbage Collection". CACM. 19:6 p.354. - -* .. _STEELE77: - - Guy L. Steele. 1977. "Data Representation in PDP-10 MACLISP". MIT. AI Memo 421. - -* .. _SLC99: - - James M. Stichnoth, Guei-Yuan Lueh, Michal Cierniak. 1999. "`Support for Garbage Collection at Every Instruction in a Java Compiler `_". SIGPLAN. Proceedings of the 1999 ACM SIGPLAN Conference on Programming Language Design and Implementation (PLDI). SIGPLAN Notices 34(5). pp. 118--127. - -* .. _SCN84: - - Will R Stoye, T J W Clarke, Arthur C Norman. 1984. "Some Practical Methods for Rapid Combinator Reduction". In LFP 1984, 159--166. - -* .. _TD95: - - David Tarditi & Amer Diwan. 1995. "`Measuring the Cost of Storage Management `_". Carnegie Mellon University. CMU-CS-94-201. - - .. abstract: td95.html - -* .. _TJ94: - - Stephen Thomas, Richard E. Jones. 1994. "Garbage Collection for Shared Environment Closure Reducers". Computing Laboratory, The University of Kent at Canterbury. Technical Report 31-94. - - .. abstract: tj94.html - -* .. _THOMAS95: - - Stephen Thomas. 1995. "Garbage Collection in Shared-Environment Closure Reducers: Space-Efficient Depth First Copying using a Tailored Approach". *Information Processing Letters.* 56:1, pp. 1--7. - -* .. _TT97: - - Mads Tofte & Jean-Pierre Talpin. 1997. "`Region-Based Memory Management `_". Information and Computation 132(2), pp. 109--176. - - .. abstract: tt97.html - -* .. _UNGAR84: - - Dave Ungar. 1984. "`Generation Scavenging: A Non-disruptive High Performance Storage Reclamation Algorithm `_". ACM, SIGSOFT, SIGPLAN. Practical Programming Environments Conference. - -* .. _UNGAR88: - - Dave Ungar & Frank Jackson. 1988. "`Tenuring Policies for Generation-Based Storage Reclamation `_". SIGPLAN. OOPSLA '88 Conference Proceedings, ACM SIGPLAN Notices, Vol. 23, No. 11, pp. 1--17. - - .. abstract: ungar88.html - -* .. _VO96: - - Kiem-Phong Vo. 1996. "Vmalloc: A General and Efficient Memory Allocator". Software -- Practice and Experience. 26(3): 357--374 (1996). - - .. abstract: vo96.html - -* .. _WW76: - - Daniel C. Watson, David S. Wise. 1976. "Tuning Garwick's algorithm for repacking sequential storage". *BIT.* 16, 4 (December 1976): 442--450. - -* .. _WLM92: - - Paul R. Wilson, Michael S. Lam, Thomas G. Moher. 1992. "Caching Considerations for Generational Garbage Collection". ACM. L&FP 92. - - .. abstract: wlm92.html - -* .. _WIL92A: - - Paul R. Wilson, Sheetal V. Kakkad. 1992. "`Pointer Swizzling at Page Fault Time `_". University of Texas at Austin. - - .. abstract: wil92a.html - -* .. _WIL94: - - Paul R. Wilson. 1994. "`Uniprocessor Garbage Collection Techniques `_". University of Texas. - - .. abstract: wil94.html - -* .. _WIL95: - - Paul R. Wilson, Mark S. Johnstone, Michael Neely, David Boles. 1995. "`Dynamic Storage Allocation: A Survey and Critical Review `_". University of Texas at Austin. - - .. abstract: wil95.html - -* .. _WISE78: - - David S. Wise. 1978. "`The double buddy system `_". Department of Computer Science at Indiana University. Technical Report 79. - -* .. _WISE79: - - David S. Wise. 1979. "`Morris's garbage compaction algorithm restores reference counts `_". TOPLAS. 1, 1 (July l979): 115--120. - -* .. _WISE85: - - David S. Wise. 1985. "`Design for a multiprocessing heap with on-board reference counting `_". Springer-Verlag. In J.-P. Jouannaud (ed.), Functional Programming Languages and Computer Architecture, Lecture Notes in Computer Science 201: 289--304. - -* .. _WISE92: - - .. _WISE93: - - David S. Wise. 1993. "`Stop-and-copy and one-bit reference counting `_". *Information Processing Letters.* 46, 5 (July 1993): 243--249. - - .. abstract: wise92.html - -* .. _WW95: - - David S. Wise, Joshua Walgenbach. 1996. "`Static and Dynamic Partitioning of Pointers as Links and Threads `_". SIGPLAN. Proc. 1996 ACM SIGPLAN Intl. Conf. on Functional Programming, SIGPLAN Not. 31, 6 (June 1996), pp. 42--49. - -* .. _WHHHO94: - - David S. Wise, Brian Heck, Caleb Hess, Willie Hunt, Eric Ost. 1997. "`Uniprocessor Performance of a Reference-Counting Hardware Heap `_". *LISP and Symbolic Computation.* 10, 2 (July 1997), pp. 159--181. - -* .. _WITHINGTON91: - - P. Tucker Withington. 1991. "`How Real is 'Real-Time' Garbage Collection? `_". ACM. OOPSLA/ECOOP '91 Workshop on Garbage Collection in Object-Oriented Systems. - - .. abstract: withington91.html - -* .. _YIP91: - - G. May Yip. 1991. "`Incremental, Generational Mostly-Copying Garbage Collection in Uncooperative Environments `_". Digital Equipment Corporation. - - .. abstract: yip91.html - -* .. _YUASA90: - - Taiichi Yuasa. 1990. "Real-Time Garbage Collection on General-Purpose Machines". Journal of Software and Systems. 11:3 pp. 181--198. - -* .. _ZORN88: - - Benjamin Zorn & Paul Hilfinger. 1988. "`A Memory Allocation Profiler for C and Lisp Programs `_". USENIX. Proceedings for the Summer 1988 USENIX Conference, pp. 223--237. - - .. abstract: zorn88.html - -* .. _ZORN89: - - Benjamin Zorn. 1989. "`Comparative Performance Evaluation of Garbage Collection Algorithms `_". Computer Science Division (EECS) of University of California at Berkeley. Technical Report UCB/CSD 89/544 and PhD thesis. - - .. abstract: zorn89.html - -* .. _ZORN90B: - - Benjamin Zorn. 1990. "Comparing Mark-and-sweep and Stop-and-copy Garbage Collection". ACM. Conference on Lisp and Functional Programming, pp. 87--98. - - .. abstract: zorn90b.html - -* .. _ZORN90: - - Benjamin Zorn. 1990. "`Barrier Methods for Garbage Collection `_". University of Colorado at Boulder. Technical Report CU-CS-494-90. - - .. abstract: zorn90.html - -* .. _ZORN91: - - Benjamin Zorn. 1991. "`The Effect of Garbage Collection on Cache Performance `_". University of Colorado at Boulder. Technical Report CU-CS-528-91. - - .. abstract: zorn91.html - -* .. _ZORN92B: - - Benjamin Zorn & Dirk Grunwald. 1992. "`Empirical Measurements of Six Allocation-intensive C Programs `_". ACM, SIGPLAN. SIGPLAN notices, 27(12):71--80. - - .. abstract: zorn92b.html - -* .. _ZORN92: - - Benjamin Zorn. 1993. "`The Measured Cost of Conservative Garbage Collection `_". Software -- Practice and Experience. 23(7):733--756. - - .. abstract: zorn92.html - -* .. _ZORN92A: - - Benjamin Zorn & Dirk Grunwald. 1994. "`Evaluating Models of Memory Allocation `_". ACM. Transactions on Modeling and Computer Simulation 4(1):107--131. - - .. abstract: zorn92a.html - diff --git a/mps/manual/source/mmref/credit.rst b/mps/manual/source/mmref/credit.rst index 816cfb0c870..0ce0bd0c8b3 100644 --- a/mps/manual/source/mmref/credit.rst +++ b/mps/manual/source/mmref/credit.rst @@ -27,7 +27,7 @@ Reference. The Adaptive Memory Management Group no longer exists, and Harlequin has become a part of `Global Graphics `_. However, most of the group's work -has been aquired by `Ravenbrook Limited`, whose directors are Richard +has been aquired by `Ravenbrook Limited`_, whose directors are Richard Brooksby, the group's chief architect and manager, and Nick Barnes, a senior group member. diff --git a/mps/manual/source/mmref/faq.rst b/mps/manual/source/mmref/faq.rst index 190ecaecdb4..594f1656b97 100644 --- a/mps/manual/source/mmref/faq.rst +++ b/mps/manual/source/mmref/faq.rst @@ -22,11 +22,12 @@ garbage collection>` for :term:`C` exist as add-on libraries. .. link:: - `Boehm–Weiser collector `_. + `Memory Pool System `_, + `Boehm–Demers–Weiser collector `_. -Why do I need to test the return value from ``malloc``? Surely it always succeeds? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Why do I need to test the return value from malloc? Surely it always succeeds? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For small programs, and during light testing, it is true that :term:`malloc` usually succeeds. Unfortunately, there are all sorts of @@ -73,8 +74,8 @@ when out of memory, wrap :term:`malloc` in something like this:: Undefined behavior is worth eliminating even in small programs. -What's the point of having a garbage collector? Why not use ``malloc`` and ``free``? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +What's the point of having a garbage collector? Why not use malloc and free? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :term:`Manual memory management`, such as :term:`malloc` and :term:`free (2)`, forces the programmer to keep track of which memory @@ -90,12 +91,12 @@ problem, rather than the tedious details of the implementation. .. seealso:: :term:`garbage collection` -What's wrong with ANSI ``malloc`` in the C library? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +What's wrong with ANSI malloc in the C library? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:term:`Malloc` provides a very basic :term:`manual memory management` -service. However, it does not provide the following things, which may -be desirable in your memory manager: +The :term:`malloc` function provides a very basic :term:`manual memory +management` service. However, it does not provide the following +things, which may be desirable in your memory manager: * high performance for specified block sizes; * :term:`tagged references`; @@ -130,11 +131,12 @@ semi-conservative garbage collectors for C++. .. link:: - `Boehm–Weiser collector `_. + `Memory Pool System `_, + `Boehm–Demers–Weiser collector `_. -Why is ``delete`` so slow? -^^^^^^^^^^^^^^^^^^^^^^^^^^ +Why is delete so slow? +^^^^^^^^^^^^^^^^^^^^^^ Often ``delete`` must perform a more complex task than simply freeing the memory associated with an object; this is known as @@ -163,12 +165,12 @@ In :term:`C++`, it may be that class libraries expect you to call Failing this, if there is a genuine :term:`memory leak` in a class library for which you don't have the source, then the only thing you -can try is to add a :term:`garbage collector`. The Boehm–Weiser -collector will work with C++. +can try is to add a :term:`garbage collector`. .. link:: - `Boehm–Weiser collector `_. + `Memory Pool System `_, + `Boehm–Demers–Weiser collector `_. Can't I get all the benefits of garbage collection using C++ constructors and destructors? @@ -400,7 +402,7 @@ Where can I find out more about garbage collection? Many modern languages have :term:`garbage collection` built in, and the language documentation should give details. For some other languages, garbage collection can be added, for example via the -Boehm–Weiser collector. +Memory Pool System, or the Boehm–Demers–Weiser collector. .. seealso:: :term:`garbage collection` @@ -408,22 +410,25 @@ Boehm–Weiser collector. .. link:: - `Boehm–Weiser collector `_, + `Memory Pool System `_, + `Boehm–Demers–Weiser collector `_, `GC-LIST FAQ `_. Where can I get a garbage collector? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The Boehm–Weiser collector is suitable for C or C++. The best way to -get a garbage collector, however, is to program in a language that -provides garbage collection. +The Memory Pool System and the Boehm–Demers–Weiser collector are +suitable for C or C++. The best way to get a garbage collector, +however, is to program in a language that provides garbage collection +natively. .. seealso:: :term:`garbage collection` .. link:: - `Boehm–Weiser collector `_. + `Memory Pool System `_, + `Boehm–Demers–Weiser collector `_. Why does my program use so much memory? diff --git a/mps/manual/source/mmref/index.rst b/mps/manual/source/mmref/index.rst index eb6ef48257e..d87db74339d 100644 --- a/mps/manual/source/mmref/index.rst +++ b/mps/manual/source/mmref/index.rst @@ -1,3 +1,5 @@ +.. _mmref-intro: + Introduction to memory management ################################# @@ -9,5 +11,3 @@ Introduction to memory management alloc recycle lang - faq - diff --git a/mps/manual/source/mmref/lang.rst b/mps/manual/source/mmref/lang.rst index 0d081b82a30..247eb7c6c25 100644 --- a/mps/manual/source/mmref/lang.rst +++ b/mps/manual/source/mmref/lang.rst @@ -53,8 +53,9 @@ Memory management in various languages library functions for :term:`memory (2)` management in C, :term:`malloc` and :term:`free (2)`, have become almost synonymous with :term:`manual memory management`), although - with the Boehm-Weiser :term:`collector (1)`, it is now - possible to use :term:`garbage collection`. + with the Memory Pool System, or the Boehm–Demers–Weiser + collector, it is now possible to use :term:`garbage + collection`. The language is notorious for fostering memory management bugs, including: @@ -86,7 +87,8 @@ Memory management in various languages .. link:: - `Boehm-Weiser collector `_, + `Memory Pool System `_, + `Boehm–Demers–Weiser collector `_, `C standardization `_, `comp.lang.c Frequently Asked Questions `_. @@ -148,11 +150,11 @@ Memory management in various languages The :term:`garbage collector` in the .NET Framework is configurable to run in soft real time, or in batch mode. - The Mono runtime comes with two collectors: the Boehm–Weiser - :term:`conservative collector `, and a :term:`generational ` :term:`copying collector `. + The Mono runtime comes with two collectors: the + Boehm–Demers–Weiser :term:`conservative collector + `, and a :term:`generational + ` :term:`copying collector + `. .. link:: @@ -173,9 +175,9 @@ Memory management in various languages abstraction level of C++ makes the bookkeeping required for :term:`manual memory management` even harder. Although the standard library provides only manual memory management, with - the Boehm-Weiser :term:`collector (1)`, it is now possible to - use :term:`garbage collection`. :term:`Smart pointers` are - another popular solution. + the Memory Pool System, or the Boehm–Demers–Weiser collector, + it is now possible to use :term:`garbage collection`. + :term:`Smart pointers` are another popular solution. The language is notorious for fostering memory management bugs, including: @@ -222,6 +224,8 @@ Memory management in various languages .. link:: + `Memory Pool System `_, + `Boehm–Demers–Weiser collector `_, `comp.lang.c++ FAQ `_, `C++ standardization `_. diff --git a/mps/manual/source/pool/amc.rst b/mps/manual/source/pool/amc.rst index 4f326b9f78f..e09cef8cb14 100644 --- a/mps/manual/source/pool/amc.rst +++ b/mps/manual/source/pool/amc.rst @@ -24,8 +24,8 @@ except for blocks that are :term:`pinned ` by It uses :term:`generational garbage collection`. That is, it exploits assumptions about object lifetimes and inter-connection variously -referred to as "the generational hypothesis". In particular, the -following tendencies will be efficiently exploited by an AMC pool: +referred to as "the :term:`generational hypothesis`". In particular, +the following tendencies will be efficiently exploited by an AMC pool: - most objects die young; @@ -72,8 +72,10 @@ AMC properties * Blocks are :term:`scanned `. -* Blocks may only be referenced by :term:`base pointers` (unless they - have :term:`in-band headers`). +* Blocks may be referenced by :term:`interior pointers` (unless + :c:macro:`MPS_KEY_INTERIOR` is set to ``FALSE``, in which case only + :term:`base pointers`, or :term:`client pointers` if the blocks + have :term:`in-band headers`, are supported). * Blocks may be protected by :term:`barriers (1)`. diff --git a/mps/manual/source/pool/ams.rst b/mps/manual/source/pool/ams.rst index ae1d43f8bed..59d5ccfbd57 100644 --- a/mps/manual/source/pool/ams.rst +++ b/mps/manual/source/pool/ams.rst @@ -180,19 +180,21 @@ AMS interface class. When creating a debugging AMS pool, :c:func:`mps_pool_create_k` - takes three keyword arguments: :c:macro:`MPS_KEY_FORMAT` and - :c:macro:`MPS_KEY_CHAIN` are as described above, and - :c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` specifies the debugging - options. See :c:type:`mps_debug_option_s`. + accepts the following keyword arguments: + :c:macro:`MPS_KEY_FORMAT`, :c:macro:`MPS_KEY_CHAIN`, + :c:macro:`MPS_KEY_GEN`, and + :c:macro:`MPS_KEY_AMS_SUPPORT_AMBIGUOUS` are as described above, + and :c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` specifies the debugging + options. See :c:type:`mps_pool_debug_option_s`. .. deprecated:: starting with version 1.112. - When using :c:func:`mps_pool_create`, pass the format, - chain, and debugging options like this:: + When using :c:func:`mps_pool_create`, pass the arguments like + this:: mps_res_t mps_pool_create(mps_pool_t *pool_o, mps_arena_t arena, mps_class_t mps_class_ams_debug(), - mps_debug_option_s debug_option, + mps_pool_debug_option_s debug_option, mps_fmt_t fmt, mps_chain_t chain, mps_bool_t support_ambiguous) diff --git a/mps/manual/source/pool/intro.rst b/mps/manual/source/pool/intro.rst index ed8f83b8d4a..17b97cb06a4 100644 --- a/mps/manual/source/pool/intro.rst +++ b/mps/manual/source/pool/intro.rst @@ -100,35 +100,35 @@ it makes no sense to ask whether they may contain :term:`weak references (1)`. -============================================= ===== ===== ===== ===== ===== ===== ===== ===== ===== ===== -Property AMC AMCZ AMS AWL LO MFS MV MVFF MVT SNC -============================================= ===== ===== ===== ===== ===== ===== ===== ===== ===== ===== -Supports :c:func:`mps_alloc`? no no no no no yes yes yes no no -Supports :c:func:`mps_free`? no no no no no yes yes yes yes no -Supports allocation points? yes yes yes yes yes no yes yes yes yes -Supports allocation frames? yes yes yes yes yes no no yes yes yes -Supports segregated allocation caches? no no no no no yes yes yes no no -Timing of collections? [2]_ auto auto auto auto auto --- --- --- --- --- -May contain references? [3]_ yes no yes yes no no no no no yes -May contain exact references? [4]_ yes --- yes yes --- --- --- --- --- yes -May contain ambiguous references? [4]_ no --- no no --- --- --- --- --- no -May contain weak references? [4]_ no --- no yes --- --- --- --- --- no -Allocations fixed or variable in size? var var var var var fixed var var var var -Alignment? [5]_ conf conf conf conf conf [6]_ [6]_ [7]_ [7]_ conf -Dependent objects? [8]_ no --- no yes --- --- --- --- --- no -May use remote references? [9]_ no --- no no --- --- --- --- --- no -Blocks are automatically managed? [10]_ yes yes yes yes yes no no no no no -Blocks are promoted between generations yes yes no no no --- --- --- --- --- -Blocks are manually managed? [10]_ no no no no no yes yes yes yes yes -Blocks are scanned? [11]_ yes no yes yes no no no no no yes -Blocks support base pointers only? [12]_ no no yes yes yes --- --- --- --- yes -Blocks support internal pointers? [12]_ yes yes no no no --- --- --- --- no -Blocks may be protected by barriers? yes no yes yes yes no no no no yes -Blocks may move? yes yes no no no no no no no no -Blocks may be finalized? yes yes yes yes yes no no no no no -Blocks must be formatted? [11]_ yes yes yes yes yes no no no no yes -Blocks may use :term:`in-band headers`? yes yes yes yes yes --- --- --- --- no -============================================= ===== ===== ===== ===== ===== ===== ===== ===== ===== ===== +.. csv-table:: + :header: "Property", ":ref:`AMC `", ":ref:`AMCZ `", ":ref:`AMS `", ":ref:`AWL `", ":ref:`LO `", ":ref:`MFS `", ":ref:`MV `", ":ref:`MVFF `", ":ref:`MVT `", ":ref:`SNC `" + :widths: 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + + Supports :c:func:`mps_alloc`?, no, no, no, no, no, yes, yes, yes, no, no + Supports :c:func:`mps_free`?, no, no, no, no, no, yes, yes, yes, yes, no + Supports allocation points?, yes, yes, yes, yes, yes, no, yes, yes, yes, yes + Supports allocation frames?, yes, yes, yes, yes, yes, no, no, yes, yes, yes + Supports segregated allocation caches?, no, no, no, no, no, yes, yes, yes, no, no + Timing of collections? [2]_, auto, auto, auto, auto, auto, ---, ---, ---, ---, --- + May contain references? [3]_, yes, no, yes, yes, no, no, no, no, no, yes + May contain exact references? [4]_, yes, ---, yes, yes, ---, ---, ---, ---, ---, yes + May contain ambiguous references? [4]_, no, ---, no, no, ---, ---, ---, ---, ---, no + May contain weak references? [4]_, no, ---, no, yes, ---, ---, ---, ---, ---, no + Allocations fixed or variable in size?, var, var, var, var, var, fixed, var, var, var, var + Alignment? [5]_, conf, conf, conf, conf, conf, [6]_, conf, [7]_, [7]_, conf + Dependent objects? [8]_, no, ---, no, yes, ---, ---, ---, ---, ---, no + May use remote references? [9]_, no, ---, no, no, ---, ---, ---, ---, ---, no + Blocks are automatically managed? [10]_, yes, yes, yes, yes, yes, no, no, no, no, no + Blocks are promoted between generations, yes, yes, no, no, no, ---, ---, ---, ---, --- + Blocks are manually managed? [10]_, no, no, no, no, no, yes, yes, yes, yes, yes + Blocks are scanned? [11]_, yes, no, yes, yes, no, no, no, no, no, yes + Blocks support base pointers only? [12]_, no, no, yes, yes, yes, ---, ---, ---, ---, yes + Blocks support internal pointers? [12]_, yes, yes, no, no, no, ---, ---, ---, ---, no + Blocks may be protected by barriers?, yes, no, yes, yes, yes, no, no, no, no, yes + Blocks may move?, yes, yes, no, no, no, no, no, no, no, no + Blocks may be finalized?, yes, yes, yes, yes, yes, no, no, no, no, no + Blocks must be formatted? [11]_, yes, yes, yes, yes, yes, no, no, no, no, yes + Blocks may use :term:`in-band headers`?, yes, yes, yes, yes, yes, ---, ---, ---, ---, no .. note:: @@ -151,13 +151,13 @@ Blocks may use :term:`in-band headers`? yes yes yes yes yes .. [5] "Alignment" is "conf" if the client program may specify :term:`alignment` for each pool. - .. [6] The alignment of blocks allocated from :ref:`pool-mv` pools - is platform-dependent. + .. [6] The alignment of blocks allocated from :ref:`pool-mfs` + pools is the platform's :term:`natural alignment`, + :c:macro:`MPS_PF_ALIGN`. .. [7] :ref:`pool-mvt` and :ref:`pool-mvff` pools have - configurable alignment, but it may not be smaller than the - :term:`natural alignment` for the :term:`platform` (see - :c:macro:`MPS_PF_ALIGN`). + configurable alignment, but it may not be smaller than + ``sizeof(void *)``. .. [8] In pools with this property, each object may specify an :term:`dependent object` which the client program diff --git a/mps/manual/source/pool/mv.rst b/mps/manual/source/pool/mv.rst index 361cd41c0b5..f38df2e37b6 100644 --- a/mps/manual/source/pool/mv.rst +++ b/mps/manual/source/pool/mv.rst @@ -38,9 +38,7 @@ MV properties * Allocations may be variable in size. -* The :term:`alignment` of blocks is not configurable: it is the - :term:`natural alignment` of the platform (see - :c:macro:`MPS_PF_ALIGN`). +* The :term:`alignment` of blocks is configurable. * Blocks do not have :term:`dependent objects`. @@ -73,7 +71,13 @@ MV interface :term:`pool`. When creating an MV pool, :c:func:`mps_pool_create_k` may take - three :term:`keyword arguments`: + the following :term:`keyword arguments`: + + * :c:macro:`MPS_KEY_ALIGN` (type :c:type:`mps_align_t`, default is + :c:macro:`MPS_PF_ALIGN`) is the + :term:`alignment` of addresses for allocation (and freeing) in + the pool. If an unaligned size is passed to :c:func:`mps_alloc` or + :c:func:`mps_free`, it will be rounded up to the pool's alignment. * :c:macro:`MPS_KEY_EXTEND_BY` (type :c:type:`size_t`, default 65536) is the :term:`size` of segment that the pool will @@ -119,19 +123,20 @@ MV interface class. When creating a debugging MV pool, :c:func:`mps_pool_create_k` - takes four keyword arguments: :c:macro:`MPS_KEY_EXTEND_SIZE`, - :c:macro:`MPS_KEY_MEAN_SIZE`, :c:macro:`MPS_KEY_MAX_SIZE` are as - described above, and :c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` - specifies the debugging options. See :c:type:`mps_debug_option_s`. + takes the following keyword arguments: :c:macro:`MPS_KEY_ALIGN`, + :c:macro:`MPS_KEY_EXTEND_SIZE`, :c:macro:`MPS_KEY_MEAN_SIZE`, + :c:macro:`MPS_KEY_MAX_SIZE` are as described above, and + :c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` specifies the debugging + options. See :c:type:`mps_debug_option_s`. .. deprecated:: starting with version 1.112. - When using :c:func:`mps_pool_create`, pass the debugging - options, segment size, mean size, and maximum size like this:: + When using :c:func:`mps_pool_create`, pass the arguments like + this:: mps_res_t mps_pool_create(mps_pool_t *pool_o, mps_arena_t arena, mps_class_t mps_class_mv_debug(), - mps_debug_option_s debug_option, + mps_pool_debug_option_s debug_option, mps_size_t extend_size, mps_size_t average_size, mps_size_t maximum_size) diff --git a/mps/manual/source/pool/mvff.rst b/mps/manual/source/pool/mvff.rst index e688a00d09d..208bd5f2686 100644 --- a/mps/manual/source/pool/mvff.rst +++ b/mps/manual/source/pool/mvff.rst @@ -80,7 +80,7 @@ MVFF properties * Allocations may be variable in size. * The :term:`alignment` of blocks is configurable, but may not be - smaller than the :term:`natural alignment` of the platform. + smaller than ``sizeof(void *)``. * Blocks do not have :term:`dependent objects`. @@ -127,10 +127,10 @@ MVFF interface * :c:macro:`MPS_KEY_ALIGN` (type :c:type:`mps_align_t`, default is :c:macro:`MPS_PF_ALIGN`) is the :term:`alignment` of addresses for allocation (and freeing) in - the pool. If an unaligned size is passed to :c:func:`mps_alloc` or - :c:func:`mps_free`, it will be rounded up to the pool's alignment. - The minimum alignment supported by pools of this class is - ``sizeof(void *)``. + the pool. If an unaligned size is passed to :c:func:`mps_alloc` + or :c:func:`mps_free`, it will be rounded up to the pool's + alignment. The minimum alignment supported by pools of this + class is ``sizeof(void *)``. * :c:macro:`MPS_KEY_MVFF_ARENA_HIGH` (type :c:type:`mps_bool_t`, default false) determines whether new segments are acquired at high @@ -201,16 +201,16 @@ MVFF interface :c:macro:`MPS_KEY_MVFF_SLOT_HIGH`, and :c:macro:`MPS_KEY_MVFF_FIRST_FIT` are as described above, and :c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` specifies the debugging - options. See :c:type:`mps_debug_option_s`. + options. See :c:type:`mps_pool_debug_option_s`. .. deprecated:: starting with version 1.112. - When using :c:func:`mps_pool_create`, pass the debugging - options, and other arguments like this:: + When using :c:func:`mps_pool_create`, pass the arguments like + this:: mps_res_t mps_pool_create(mps_pool_t *pool_o, mps_arena_t arena, mps_class_t mps_class_mvff_debug(), - mps_debug_option_s debug_option, + mps_pool_debug_option_s debug_option, size_t extend_size, size_t average_size, mps_align_t alignment, diff --git a/mps/manual/source/pool/mvt.rst b/mps/manual/source/pool/mvt.rst index f3bbba705a0..4f89b8d5178 100644 --- a/mps/manual/source/pool/mvt.rst +++ b/mps/manual/source/pool/mvt.rst @@ -78,6 +78,9 @@ MVT properties * Allocations may be variable in size. +* The :term:`alignment` of blocks is configurable, but may not be + smaller than ``sizeof(void *)``. + * Blocks do not have :term:`dependent objects`. * Blocks are not automatically :term:`reclaimed`. @@ -117,7 +120,7 @@ MVT interface the pool. If an unaligned size is passed to :c:func:`mps_alloc` or :c:func:`mps_free`, it will be rounded up to the pool's alignment. The minimum alignment supported by pools of this class is - ``sizeof(void *)``. + ``sizeof(void *)``. * :c:macro:`MPS_KEY_MIN_SIZE` (type :c:type:`size_t`, default is :c:macro:`MPS_PF_ALIGN`) is the diff --git a/mps/manual/source/release.rst b/mps/manual/source/release.rst index 0ac53f9020a..fba274d4a55 100644 --- a/mps/manual/source/release.rst +++ b/mps/manual/source/release.rst @@ -40,12 +40,11 @@ New features the lowest generation whose new size was within its capacity.) - Interface changes ................. -#. There is now a default value (currently 1 \ :term:`megabyte`) for - the :c:macro:`MPS_KEY_ARENA_SIZE` keyword argument to +#. There is now a default value (currently 256 \ :term:`megabytes`) + for the :c:macro:`MPS_KEY_ARENA_SIZE` keyword argument to :c:func:`mps_arena_create_k` when creating a virtual memory arena. See :c:func:`mps_arena_class_vm`. @@ -54,6 +53,20 @@ Interface changes the value ``FALSE`` is appropriate only when you know that all references are exact. See :ref:`pool-ams`. +#. It is now possible to configure the alignment of objects allocated + in a :ref:`pool-mv` pool, by passing the :c:macro:`MPS_KEY_ALIGN` + keyword argument to :c:func:`mps_pool_create_k`. + +#. The alignment requirements for :ref:`pool-mvff` and :ref:`pool-mvt` + pools have been relaxed on the platforms ``w3i3mv`` and ``w3i6mv``. + On all platforms it is now possible to specify alignments down to + ``sizeof(void *)`` as the alignment for pools of these classes. + +#. The sizes of the templates in a :c:type:`mps_pool_debug_option_s` + structure no longer have to be related to the alignment of the + pools that they are used with. This makes it easier to reuse these + structures. + Other changes ............. @@ -77,19 +90,38 @@ Other changes .. _job003745: https://www.ravenbrook.com/project/mps/issue/job003745/ +#. The debugging version of the :ref:`pool-mvff` pool class, + :c:func:`mps_class_mvff_debug`, no longer triggers an assertion + failure if you allocate a large object. See job003751_. + + .. _job003751: https://www.ravenbrook.com/project/mps/issue/job003751/ + #. :program:`mpseventtxt` now successfully processes a telemetry log containing multiple labels associated with the same address. See job003756_. .. _job003756: https://www.ravenbrook.com/project/mps/issue/job003756/ -#. :ref:`pool-ams` pools get reliably collected, even in the case - where an AMS pool is the only pool on its generation chain and is - allocating into some generation other than the nursery. See - job003771_. +#. :ref:`pool-ams`, :ref:`pool-awl` and :ref:`pool-lo` pools get + reliably collected, even in the case where the pool is the only + pool on its generation chain and is allocating into some generation + other than the nursery. See job003771_. .. _job003771: https://www.ravenbrook.com/project/mps/issue/job003771/ +#. Allocation into :ref:`pool-awl` pools again reliably provokes + garbage collections of the generation that the pool belongs to. (In + release 1.113.0, the generation would only be collected if a pool + of some other class allocated into it.) See job003772_. + + .. _job003772: https://www.ravenbrook.com/project/mps/issue/job003772/ + +#. All unreachable objects in :ref:`pool-lo` pools are finalized. + (Previously, objects on a segment attached to an allocation point + were not finalized until the allocation point was full.) See + job003773_. + + .. _job003773: https://www.ravenbrook.com/project/mps/issue/job003773/ .. _release-notes-1.113: @@ -97,6 +129,44 @@ Other changes Release 1.113.0 --------------- +New features +............ + +#. In previous releases there was an implicit connection between + blocks allocated by :ref:`pool-awl` and :ref:`pool-lo` pools, and + blocks allocated by other automatically managed pool classes. + + In particular, blocks allocated by AWL and LO pools were garbage + collected together with blocks allocated by :ref:`pool-ams` pools, + and blocks allocated by :ref:`pool-amc` pools in generation 1 of + their chains. + + This is no longer the case: to arrange for blocks to be collected + together you need to ensure that they are allocated in the *same* + generation chain, using the :c:macro:`MPS_KEY_CHAIN` and + :c:macro:`MPS_KEY_GEN` keyword arguments to + :c:func:`mps_pool_create_k`. + + So if you have code like this:: + + res = mps_pool_create(&my_amc, arena, mps_class_amc(), my_chain); + res = mps_pool_create(&my_awl, arena, mps_class_awl()); + + and you want to retain the connection between these pools, then you + must ensure that they use the same generation chain:: + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_CHAIN, my_chain); + res = mps_pool_create_k(&my_amc, arena, mps_class_amc(), args); + } MPS_ARGS_END(args); + + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_CHAIN, my_chain); + MPS_ARGS_ADD(args, MPS_KEY_GEN, 1); + res = mps_pool_create_k(&my_awl, arena, mps_class_awl(), args); + } MPS_ARGS_END(args); + + Interface changes ................. @@ -169,17 +239,23 @@ Interface changes #. Functions that take a variable number of arguments (:c:func:`mps_arena_create`, :c:func:`mps_pool_create`, - :c:func:`mps_ap_create`, :c:func:`mps_fmt_create_A`) and their - ``va_list`` alternatives (:c:func:`mps_arena_create_v` etc.) are - now deprecated in favour of functions that use a :term:`keyword - argument` interface (:c:func:`mps_arena_create_k`, - :c:func:`mps_pool_create_k`, :c:func:`mps_ap_create_k`, - :c:func:`mps_fmt_create_k`). The new interface provides better - reporting of errors, provides default values for arguments, and - provides forward compatibility. See :ref:`topic-keyword`. + :c:func:`mps_ap_create`) and their ``va_list`` alternatives + (:c:func:`mps_arena_create_v` etc.) are now deprecated in favour of + functions that use a :term:`keyword argument` interface + (:c:func:`mps_arena_create_k`, :c:func:`mps_pool_create_k`, + :c:func:`mps_ap_create_k`). - The old interface continues to be supported, but new features will - become available through the keyword interface only. + Similarly, the object format variant structures + (:c:type:`mps_fmt_A_s` etc.) and the functions that take them as + arguments (:c:func:`mps_fmt_create_A` etc.) are now deprecated in + favour of :c:func:`mps_fmt_create_k`. + + The new interfaces provide better reporting of errors, default + values for arguments, and forward compatibility. See + :ref:`topic-keyword`. + + The old interfaces continue to be supported for now, but new + features will become available through the keyword interface only. #. :ref:`pool-mfs` pools no longer refuse to manage blocks that are smaller than the platform alignment. They now round up smaller @@ -213,8 +289,9 @@ Other changes .. _job003435: https://www.ravenbrook.com/project/mps/issue/job003435/ -#. :ref:`pool-mvt` no longer triggers an assertion failure when it - runs out of space on its reserved block queue. See job003486_. +#. An :ref:`pool-mvt` pool no longer triggers an assertion failure + when it runs out of space on its reserved block queue. See + job003486_. .. _job003486: https://www.ravenbrook.com/project/mps/issue/job003486/ @@ -251,13 +328,13 @@ New features ` loads an event stream into a SQLite database for further analysis. See :ref:`topic-telemetry`. -#. The new pool class MFS provide manually managed allocation of - fixed-size objects. See :ref:`pool-mfs`. +#. The new pool class :ref:`pool-mfs` provides manually managed + allocation of fixed-size objects. -#. The new pool class MVT provide manually managed allocation of - variable-size objects using a *temporal fit* allocation policy - (that is, objects that are allocated togther are expected to be - freed together). See :ref:`pool-mvt`. +#. The new pool class :ref:`pool-mvt` provides manually managed + allocation of variable-size objects using a *temporal fit* + allocation policy (that is, objects that are allocated togther are + expected to be freed together). Interface changes diff --git a/mps/manual/source/themes/mmref/layout.html b/mps/manual/source/themes/mmref/layout.html new file mode 100644 index 00000000000..961baeda428 --- /dev/null +++ b/mps/manual/source/themes/mmref/layout.html @@ -0,0 +1,41 @@ +{# + scrolls/layout.html + ~~~~~~~~~~~~~~~~~~~ + + Sphinx layout template for the scrolls theme, originally written + by Armin Ronacher. + + :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +#} +{%- extends "basic/layout.html" %} +{% set script_files = script_files + ['_static/theme_extras.js'] %} +{% set css_files = css_files + ['_static/print.css'] %} +{# do not display relbars #} +{% block relbar1 %}{% endblock %} +{% block relbar2 %}{% endblock %} +{% block content %} +
+ +
+ {%- if prev and '/' not in prev.link and 'mmref-' not in prev.link %} + « {{ prev.title }} | + {%- endif %} + {{ title }} + {%- if next and '/' not in next.link and 'mmref-' not in next.link %} + | {{ next.title }} » + {%- endif %} +
+
+ {%- if display_toc %} +
+

{{ _('Table Of Contents') }}

+ {{ toc }} +
+ {%- endif %} + {% block body %}{% endblock %} +
+
+{% endblock %} diff --git a/mps/manual/source/themes/mmref/static/metal.png b/mps/manual/source/themes/mmref/static/metal.png new file mode 100644 index 00000000000..2f9f1ad0847 Binary files /dev/null and b/mps/manual/source/themes/mmref/static/metal.png differ diff --git a/mps/manual/source/themes/mmref/static/mmref.css_t b/mps/manual/source/themes/mmref/static/mmref.css_t new file mode 100644 index 00000000000..f16758a7391 --- /dev/null +++ b/mps/manual/source/themes/mmref/static/mmref.css_t @@ -0,0 +1,161 @@ +/* -*- css -*- */ + +@import url('scrolls.css'); + +sup { + vertical-align: top; + font-size: 80%; +} + +dl.glossary dt { + font-family: {{ theme_headfont }}; +} + +div.header { + background-image: none; + background-color: {{ theme_headerbg }}; + border-top: none; +} + +h1.heading { + height: auto; + text-align: center; + padding-top: 10px; + padding-bottom: 10px; +} + +h1.heading:hover { + background: {{ theme_headerhover }}; +} + +h1.heading a { + background-image: none; + display: block; + width: 100%; + height: auto; + font-size: 150%; +} + +h1.heading span { + display: block; + color: {{ theme_textcolor }}; +} + +a, a:visited, a.reference.internal { + text-decoration: none; +} + +a.reference em { + font-style: normal; +} + +a.reference.internal:hover { + text-decoration: none; + border-bottom: 1px solid {{ theme_underlinecolor }}; +} + +.xref.std-term { + font-style: normal; + color: {{ theme_textcolor }}; + border-bottom: 1px dotted {{ theme_underlinecolor }}; +} + +div.seealso, div.admonition { + background: url(metal.png); + border: none; +} + +p.admonition_title:after { + content: ":"; +} + +div.admonition p.admonition-title + p + p { + margin-top: 1em; +} + +div.figure { + margin-top: 1em; + margin-bottom: 1em; +} + +div.figure img { + max-width: 100%; +} + +.align-center { + text-align: center; +} + +img.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +dl.glossary dt { + font-size: 120%; + margin-top: 1em; +} + +p.glossary-alphabet { + font-weight: bold; + text-align: center; +} + + +div.admonition-ref-glossary, div.admonition-ref-bibliography, div.admonition-ref-mmref-intro, div.admonition-ref-mmref-faq { + width: 45%; + display: inline-block; + vertical-align: top; +} + +div.admonition-ref-glossary, div.admonition-ref-mmref-intro { + height:400px; +} + +div.admonition-ref-bibliography, div.admonition-ref-mmref-faq { + height:230px; +} + +div.admonition-ref-glossary, div.admonition-ref-bibliography { + margin-right: 1%; +} + +div.admonition-ref-mmref-intro, div.admonition-ref-mmref-faq { + margin-left: 1%; +} + +div.admonition a.image-reference img { + width: 90%; + margin-left: 5%; + margin-top: 5px; +} + +div#home h1 { + display: none; +} + +div#home h1 + p { + margin-top: 0; + padding-top: 15px; +} + +/* Format the glossary index in two columns. */ + +div#memory-management-glossary div#all { + -webkit-columns: 2; + -moz-columns: 2; + -o-columns: 2; + -ms-columns: 2; + columns: 2; + padding-top: 1em; +} + +div#memory-management-glossary div#all h2 { + display: none; +} + +div#memory-management-glossary div#all a.reference.internal:after { + content: "\A"; + white-space: pre; +} diff --git a/mps/manual/source/themes/mmref/static/watermark.png b/mps/manual/source/themes/mmref/static/watermark.png new file mode 100644 index 00000000000..5bca5d2008b Binary files /dev/null and b/mps/manual/source/themes/mmref/static/watermark.png differ diff --git a/mps/manual/source/themes/mmref/static/watermark.svg b/mps/manual/source/themes/mmref/static/watermark.svg new file mode 100644 index 00000000000..ca9159234e1 --- /dev/null +++ b/mps/manual/source/themes/mmref/static/watermark.svg @@ -0,0 +1,237 @@ + + + + + + + + + + image/svg+xml + + + + + + + + 2e2f 6d70 7369 2e63 0073 697a 6520 3e203000 6d70 735f 6172 656e 615f 6f20 213d204e 554c 4c00 6d70 735f 706f 6f6c 5f6f2021 3d20 4e55 4c4c 006d 7073 5f66 6d745f6f 2021 3d20 4e55 4c4c 006d 7073 5f666d74 5f41 2021 3d20 4e55 4c4c 006d 70735f66 6d74 5f42 2021 3d20 4e55 4c4c 006d7073 5f66 6d74 2021 3d20 4e55 4c4c 006d7073 5f66 6d74 5f66 6978 6564 2021 3d204e55 4c4c 0054 4553 5454 2846 6f72 6d61742c 2066 6f72 6d61 7429 0054 4553 54542850 6f6f 6c2c 2070 6f6f 6c29 0070 5f6f2021 3d20 4e55 4c4c 006d 7073 5f61 705f6f20 213d 204e 554c 4c00 6d70 735f 61702021 3d20 4e55 4c4c 0054 4553 5454 28427566 6665 722c 2062 7566 2900 5445 53545428 4275 6666 6572 2c20 4275 6666 65724f66 4150 286d 7073 5f61 7029 2900 6d70735f 6170 2d3e 696e 6974 203d 3d20 6d70735f 6170 2d3e 616c 6c6f 6300 7020 213d204e 554c 4c00 7020 3d3d 206d 7073 5f61702d 3e69 6e69 7400 2876 6f69 6420 2a292828 6368 6172 202a 296d 7073 5f61 702d3e69 6e69 7420 2b20 7369 7a65 2920 3d3d206d 7073 5f61 702d 3e61 6c6c 6f63 00667261 6d65 5f6f 2021 3d20 4e55 4c4c 0053697a 6549 7341 6c69 676e 6564 2873 697a652c 2042 7566 6665 7250 6f6f 6c28 62756629 2d3e 616c 6967 6e6d 656e 7429 006d7073 5f73 6163 5f6f 2021 3d20 4e55 4c4c0054 4553 5454 2853 4143 2c20 7361 63290054 4553 5454 2853 4143 2c20 5341 434f6645 7874 6572 6e61 6c53 4143 286d 7073 + + diff --git a/mps/manual/source/themes/mmref/theme.conf b/mps/manual/source/themes/mmref/theme.conf new file mode 100644 index 00000000000..e0ef30ece1b --- /dev/null +++ b/mps/manual/source/themes/mmref/theme.conf @@ -0,0 +1,18 @@ +# Colour scheme: + +[theme] +inherit = scrolls +stylesheet = mmref.css + +[options] +headerbg = transparent +headerhover = #81A8B8 +subheadlinecolor = #000000 +linkcolor = #5D7985 +visitedlinkcolor = #5D7985 +admonitioncolor = #A4BCC2 +textcolor = #000000 +underlinecolor = #A4BCC2 + +bodyfont = 'Optima', sans-serif +headfont = 'Verdana', sans-serif diff --git a/mps/manual/source/themes/mps/static/mps.css_t b/mps/manual/source/themes/mps/static/mps.css_t index b21939b7af6..5904c165580 100644 --- a/mps/manual/source/themes/mps/static/mps.css_t +++ b/mps/manual/source/themes/mps/static/mps.css_t @@ -139,6 +139,9 @@ dl.glossary dt, dl.type dt, dl.function dt, dl.macro dt { margin-top: 2em; margin-bottom: 1em; font-size: 120%; +} + +dl.type dt, dl.function dt, dl.macro dt { /* Use a hanging indent so that long wrapped prototypes are easier to read. */ padding-left: 4em; text-indent: -4em; @@ -185,7 +188,7 @@ p.glossary-alphabet { } sup { - vertical-align: 20%; + vertical-align: top; font-size: 80%; } @@ -217,3 +220,22 @@ li.toctree-l1, li.toctree-l2, li.toctree-l3 { padding-top: 0 !important; } +/* Format the glossary index in two columns. */ + +div#memory-management-glossary div#all { + -webkit-columns: 2; + -moz-columns: 2; + -o-columns: 2; + -ms-columns: 2; + columns: 2; + padding-top: 1em; +} + +div#memory-management-glossary div#all h2 { + display: none; +} + +div#memory-management-glossary div#all a.reference.internal:after { + content: "\A"; + white-space: pre; +} diff --git a/mps/manual/source/topic/arena.rst b/mps/manual/source/topic/arena.rst index 86e0a39550f..8fd61fed6bf 100644 --- a/mps/manual/source/topic/arena.rst +++ b/mps/manual/source/topic/arena.rst @@ -236,7 +236,7 @@ Virtual memory arenas accepts one :term:`keyword argument` on all platforms: * :c:macro:`MPS_KEY_ARENA_SIZE` (type :c:type:`size_t`, default - 2\ :superscript:`20`) is the initial amount of virtual address + 256\ :term:`megabytes`) is the initial amount of virtual address space, in :term:`bytes (1)`, that the arena will reserve (this space is initially reserved so that the arena can subsequently use it without interference from other parts of the program, but diff --git a/mps/manual/source/topic/cache.rst b/mps/manual/source/topic/cache.rst index fc523161a21..589e5e86db0 100644 --- a/mps/manual/source/topic/cache.rst +++ b/mps/manual/source/topic/cache.rst @@ -170,9 +170,9 @@ Cache interface The size classes are described by an array of element type :c:type:`mps_sac_class_s`. This array is used to initialize the - segregated allocation cache, and is not needed - after:c:func:`mps_sac_create` returns. The following constraints - apply to the array: + segregated allocation cache, and is not needed after + :c:func:`mps_sac_create` returns. The following constraints apply + to the array: * You must specify at least one size class. diff --git a/mps/manual/source/topic/collection.rst b/mps/manual/source/topic/collection.rst index adf7c058e9e..f061d946ad8 100644 --- a/mps/manual/source/topic/collection.rst +++ b/mps/manual/source/topic/collection.rst @@ -134,8 +134,12 @@ For example:: ``chain`` is the generation chain. - It is an error to destroy a generation chain if there exists a - :term:`pool` using the chain. The pool must be destroyed first. + It is an error to destroy a generation chain if there is a garbage + collection in progress on the chain, or if there are any + :term:`pools` using the chain. Before calling this function, the + arena should be parked (by calling :c:func:`mps_arena_park`) to + ensure that there are no collections in progress, and pools using + the chain must be destroyed. .. index:: diff --git a/mps/manual/source/topic/debugging.rst b/mps/manual/source/topic/debugging.rst index 70494aa20cf..61025e31367 100644 --- a/mps/manual/source/topic/debugging.rst +++ b/mps/manual/source/topic/debugging.rst @@ -50,9 +50,9 @@ debugging: for the pattern at any time by calling :c:func:`mps_pool_check_free_space`. -The :term:`client program` specifies templates for both of these -features via the :c:type:`mps_pool_debug_option_s` structure. This -allows it to specify patterns: +The :term:`client program` may optionally specify templates for both +of these features via the :c:type:`mps_pool_debug_option_s` structure. +This allows it to specify patterns: * that mimic illegal data values; @@ -66,8 +66,8 @@ allows it to specify patterns: For example:: mps_pool_debug_option_s debug_options = { - (const void *)"postpost", 8, - (const void *)"freefree", 8, + "fencepost", 9, + "free", 4, }; mps_pool_t pool; mps_res_t res; @@ -81,7 +81,7 @@ For example:: .. c:type:: mps_pool_debug_option_s - The type of the structure passed as the + The type of the structure passed as the value for the optional :c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` keyword argument to :c:func:`mps_pool_create_k` when creating a debugging :term:`pool class`. :: @@ -104,10 +104,6 @@ For example:: ``free_size`` is the :term:`size` of ``free_template`` in bytes, or zero if the debugging pool should not splat free space. - Both ``fence_size`` and ``free_size`` must be a multiple of the - :term:`alignment` of the :term:`pool`, and also a multiple of the - alignment of the pool's :term:`object format` if it has one. - The debugging pool will copy the ``fence_size`` bytes pointed to by ``fence_template`` in a repeating pattern onto each fencepost during allocation, and it will copy the bytes pointed to by @@ -118,6 +114,13 @@ For example:: pieces smaller than the given size, for example to pad out part of a block that was left unused because of alignment requirements. + If the client omits to pass the + :c:macro:`MPS_KEY_POOL_DEBUG_OPTIONS` keyword argument to + :c:func:`mps_pool_create_k`, then the fencepost template consists + of the four bytes ``50 4F 53 54`` (``POST`` in ASCII), and the + free space template consists of the four bytes ``46 52 45 45`` + (``FREE`` in ASCII). + .. c:function:: void mps_pool_check_fenceposts(mps_pool_t pool) diff --git a/mps/manual/source/topic/error.rst b/mps/manual/source/topic/error.rst index 29fefc6e1b6..e65f6eafebd 100644 --- a/mps/manual/source/topic/error.rst +++ b/mps/manual/source/topic/error.rst @@ -233,6 +233,13 @@ cause), please :ref:`let us know ` so that we can improve this documentation. +``buffer.c: BufferIsReady(buffer)`` + + The client program called :c:func:`mps_reserve` twice on the same + :term:`allocation point` without calling :c:func:`mps_commit`. See + :ref:`topic-allocation-point-protocol`. + + ``dbgpool.c: fencepost check on free`` The client program wrote to a location after the end, or before @@ -262,6 +269,15 @@ this documentation. :term:`format methods` and :term:`stepper functions`. +``locus.c: chain->activeTraces == TraceSetEMPTY)`` + + The client program called :c:func:`mps_chain_destroy`, but there + was a garbage collection in progress on that chain. + + Park the arena before destroying the chain by calling + :c:func:`mps_arena_park`. + + ``mpsi.c: SizeIsAligned(size, BufferPool(buf)->alignment)`` The client program reserved a block by calling @@ -269,7 +285,7 @@ this documentation. alignment required by the pool's :term:`object format`. -``pool.c: (pool->class->attr & AttrALLOC) != 0`` +``pool.c: PoolHasAttr(pool, AttrALLOC)`` The client program called :c:func:`mps_alloc` on a pool that does not support this form of allocation. Use an :term:`allocation diff --git a/mps/manual/source/topic/finalization.rst b/mps/manual/source/topic/finalization.rst index 745feefed16..5b65d9945dd 100644 --- a/mps/manual/source/topic/finalization.rst +++ b/mps/manual/source/topic/finalization.rst @@ -51,7 +51,7 @@ the block was allocated. to do the finalization. In such an implementation, the client program's finalization code may end up running concurrently with other code that accesses the underlying resource, and so access to - the resource need to be guarded with a lock, but then an unlucky + the resource needs to be guarded with a lock, but then an unlucky scheduling of finalization can result in deadlock. See :ref:`Boehm (2002) ` for a detailed discussion of this issue. @@ -170,8 +170,9 @@ Cautions #. The MPS does not finalize objects in the context of :c:func:`mps_arena_destroy` or :c:func:`mps_pool_destroy`. - :c:func:`mps_pool_destroy` should therefore not be invoked on pools - containing objects registered for finalization. + Moreover, if you have pools containing objects registered for + finalization, you must destroy these pools by following the “safe + tear-down” procedure described under :c:func:`mps_pool_destroy`. .. note:: @@ -189,11 +190,6 @@ Cautions .. note:: - You can safely destroy pools containing objects registered for - finalization if you follow the "safe tear-down" procedure - described under :c:func:`mps_pool_destroy`, but the objects do - not get finalized. - The only reliable way to ensure that all finalizable objects are finalized is to maintain a table of :term:`weak references (1)` to all such objects. The weak references don't diff --git a/mps/manual/source/topic/interface.rst b/mps/manual/source/topic/interface.rst index a939ddf302b..54637557f60 100644 --- a/mps/manual/source/topic/interface.rst +++ b/mps/manual/source/topic/interface.rst @@ -194,7 +194,7 @@ out parameter, like this:: res = mps_alloc((mps_addr_t *)&fp, pool, sizeof(struct foo)); This is known as :term:`type punning`, and its behaviour is not -defined in ANSI/ISO Standard C. See :ref:`ISO/IEC 9899:1990 ` +defined in ANSI/ISO Standard C. See :ref:`ISO/IEC 9899:1990 ` §6.3.2.3, which defines the conversion of a pointer from one type to another: the behaviour of this cast is not covered by any of the cases in the standard. @@ -209,7 +209,7 @@ Instead, we recommend this approach:: This has defined behaviour because conversion from ``void *`` to any other :term:`object pointer` type is defined by :ref:`ISO/IEC -9899:1990 ` §6.3.2.3.1. +9899:1990 ` §6.3.2.3.1. .. index:: @@ -219,7 +219,7 @@ Macros ------ #. For function-like macros, the MPS follows the same convention as - the Standard C library. To quote :ref:`ISO/IEC 9899:1990 ` + the Standard C library. To quote :ref:`ISO/IEC 9899:1990 ` §7.1.7: Any function declared in a header may additionally be diff --git a/mps/manual/source/topic/keyword.rst b/mps/manual/source/topic/keyword.rst index 3b3fbcb01ad..a97201b4fd9 100644 --- a/mps/manual/source/topic/keyword.rst +++ b/mps/manual/source/topic/keyword.rst @@ -86,7 +86,7 @@ now :c:macro:`MPS_KEY_ARGS_END`. Keyword Type & field in ``arg.val`` See ======================================== ====================================================== ========================================================== :c:macro:`MPS_KEY_ARGS_END` *none* *see above* - :c:macro:`MPS_KEY_ALIGN` :c:type:`mps_align_t` ``align`` :c:func:`mps_class_mvff`, :c:func:`mps_class_mvt` + :c:macro:`MPS_KEY_ALIGN` :c:type:`mps_align_t` ``align`` :c:func:`mps_class_mv`, :c:func:`mps_class_mvff`, :c:func:`mps_class_mvt` :c:macro:`MPS_KEY_AMS_SUPPORT_AMBIGUOUS` :c:type:`mps_bool_t` ``b`` :c:func:`mps_class_ams` :c:macro:`MPS_KEY_ARENA_CL_BASE` :c:type:`mps_addr_t` ``addr`` :c:func:`mps_arena_class_cl` :c:macro:`MPS_KEY_ARENA_SIZE` :c:type:`size_t` ``size`` :c:func:`mps_arena_class_vm`, :c:func:`mps_arena_class_cl` diff --git a/mps/manual/source/topic/plinth.rst b/mps/manual/source/topic/plinth.rst index a7e94d21a0b..daba2ad62de 100644 --- a/mps/manual/source/topic/plinth.rst +++ b/mps/manual/source/topic/plinth.rst @@ -296,7 +296,7 @@ Library module This function is intended to have the same semantics as the :c:func:`fputc` function of the ANSI C Standard (:ref:`ISO/IEC - 9899:1990 ` §7.11.7.3). + 9899:1990 ` §7.11.7.3). .. note:: @@ -314,7 +314,7 @@ Library module This function is intended to have the same semantics as the :c:func:`fputs` function of the ANSI C Standard (:ref:`ISO/IEC - 9899:1990 ` §7.11.7.4). + 9899:1990 ` §7.11.7.4). Return a non-negative integer if successful, or :c:func:`mps_lib_get_EOF` if not. @@ -383,7 +383,7 @@ Library module This function is intended to have the same semantics as the :c:func:`memcmp` function of the ANSI C Standard (:ref:`ISO/IEC - 9899:1990 ` §7.11.4.1). + 9899:1990 ` §7.11.4.1). .. note:: @@ -406,7 +406,7 @@ Library module This function is intended to have the same semantics as the :c:func:`memcpy` function of the ANSI C Standard (:ref:`ISO/IEC - 9899:1990 ` §7.11.2.1). + 9899:1990 ` §7.11.2.1). The MPS never passes overlapping blocks to :c:func:`mps_lib_memcpy`. @@ -432,7 +432,7 @@ Library module This function is intended to have the same semantics as the :c:func:`memset` function of the ANSI C Standard (:ref:`ISO/IEC - 9899:1990 ` §7.11.6.1). + 9899:1990 ` §7.11.6.1). .. note:: diff --git a/mps/manual/source/topic/scanning.rst b/mps/manual/source/topic/scanning.rst index 474ec637dd8..a36da60bf0c 100644 --- a/mps/manual/source/topic/scanning.rst +++ b/mps/manual/source/topic/scanning.rst @@ -60,8 +60,8 @@ region to be scanned. They must carry out the following steps: function as soon as practicable. #. If :c:func:`MPS_FIX2` returns :c:macro:`MPS_RES_OK`, it may have - updated the reference. If necessary, make sure that the updated - reference is stored back to the region being scanned. + updated the reference. Make sure that the updated reference is + stored back into the region being scanned. #. Call the macro :c:func:`MPS_SCAN_END` on the scan state. @@ -463,15 +463,18 @@ Fixing interface :term:`Fix` a :term:`reference`. - ``ss`` is the :term:`scan state` that was passed to the scan method. + ``ss`` is the :term:`scan state` that was passed to the + :term:`scan method`. ``ref_io`` points to the reference. - Returns :c:macro:`MPS_RES_OK` if successful: in this case the - reference may have been updated, and the scan method must continue - to scan the :term:`block`. If it returns any other result, the - :term:`scan method` must return that result as soon as possible, - without fixing any further references. + Returns :c:macro:`MPS_RES_OK` if successful. In this case the + reference may have been updated, and so the scan method must store + the updated reference back to the region being scanned. The scan + method must continue to scan the :term:`block`. + + If it returns any other result, the scan method must return that + result as soon as possible, without fixing any further references. This macro must only be used within a :term:`scan method`, between :c:func:`MPS_SCAN_BEGIN` and :c:func:`MPS_SCAN_END`. diff --git a/mps/procedure/release-build.rst b/mps/procedure/release-build.rst index ee65b5f645d..6624e0b8341 100644 --- a/mps/procedure/release-build.rst +++ b/mps/procedure/release-build.rst @@ -102,6 +102,10 @@ All relative paths are relative to On other platforms they are as shown above. +#. Check that there are no performance regressions by comparing the + benchmarks (``djbench`` and ``gcbench``) for the last release and + this one. + 5. Making the release (automated procedure) ------------------------------------------- diff --git a/mps/test/function/10.c b/mps/test/function/10.c index 912901881a5..47cb48f5e0d 100644 --- a/mps/test/function/10.c +++ b/mps/test/function/10.c @@ -10,6 +10,7 @@ END_HEADER #include "testlib.h" #include "mpscamc.h" +#define OBJSIZE (1u << 20) #define genCOUNT (3) static mps_gen_param_s testChain[genCOUNT] = { @@ -26,7 +27,7 @@ static mps_res_t myscan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit) static mps_addr_t myskip(mps_addr_t object) { - return (mps_addr_t) ((char *) object + 1); + return (mps_addr_t) ((char *) object + OBJSIZE); } static void mycopy(mps_addr_t object, mps_addr_t to) @@ -99,12 +100,13 @@ static void test(void) for(i=0; i<1000; i++) { do - { die(mps_reserve(&p, ap, 1024*1024), "Reserve: "); + { die(mps_reserve(&p, ap, OBJSIZE), "Reserve: "); } - while (!mps_commit(ap, p, 1024*1024)); + while (!mps_commit(ap, p, OBJSIZE)); comment("%i megabytes allocated", i); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_fmt_destroy(format); diff --git a/mps/test/function/103.c b/mps/test/function/103.c index c8c27225160..fc4a7fa160a 100644 --- a/mps/test/function/103.c +++ b/mps/test/function/103.c @@ -136,6 +136,7 @@ static void test(void) mps_arena_collect(arena); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/104.c b/mps/test/function/104.c index b267b9b07fe..6faba1afab9 100644 --- a/mps/test/function/104.c +++ b/mps/test/function/104.c @@ -213,6 +213,7 @@ static void test(void) comment("ok"); } + mps_arena_park(arena); mps_ap_destroy(apamc); mps_ap_destroy(aplo); mps_ap_destroy(apawl); diff --git a/mps/test/function/105.c b/mps/test/function/105.c index 888a0f74fa3..c3bf5c69a16 100644 --- a/mps/test/function/105.c +++ b/mps/test/function/105.c @@ -67,6 +67,7 @@ static void test(void) b = allocone(apamc, 1, mps_rank_exact()); a = allocone(apweak, 1, mps_rank_weak()); + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_ap_destroy(apweak); diff --git a/mps/test/function/106.c b/mps/test/function/106.c index 8f751cfaeb2..2ba628970c0 100644 --- a/mps/test/function/106.c +++ b/mps/test/function/106.c @@ -102,6 +102,7 @@ static void test(void) c = conc(string_ch("Hello there"), string_ch(" folks!")); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/107.c b/mps/test/function/107.c index 0a83d04a15d..91b5f02fc21 100644 --- a/mps/test/function/107.c +++ b/mps/test/function/107.c @@ -103,6 +103,7 @@ static void test(void) z = alloclo(ap, 0x4000); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/108.c b/mps/test/function/108.c index 10631e5eb5c..6dab9f5839d 100644 --- a/mps/test/function/108.c +++ b/mps/test/function/108.c @@ -82,6 +82,7 @@ static void test(void) b = allocdumb(apamc, 0x400*64, 0); } + mps_arena_park(arena); mps_ap_destroy(aplo); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/109.c b/mps/test/function/109.c index 0fccb523989..bccb5cfec7f 100644 --- a/mps/test/function/109.c +++ b/mps/test/function/109.c @@ -267,6 +267,7 @@ static void test(void) report("count2", "%d", final_count); + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_ap_destroy(aplo); diff --git a/mps/test/function/11.c b/mps/test/function/11.c index 286b06a385f..72009080e7f 100644 --- a/mps/test/function/11.c +++ b/mps/test/function/11.c @@ -77,6 +77,7 @@ static void test(void) comment("%d: %x", j, (int) a); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/110.c b/mps/test/function/110.c index e61536040de..bbf5aa0dc6d 100644 --- a/mps/test/function/110.c +++ b/mps/test/function/110.c @@ -262,6 +262,7 @@ static void test(void) finalpoll(&z, FINAL_DISCARD); } + mps_arena_park(arena); mps_root_destroy(root0); mps_root_destroy(root1); comment("Destroyed roots."); @@ -280,6 +281,7 @@ static void test(void) report("count2", "%d", final_count); + mps_arena_park(arena); mps_pool_destroy(poolamc); mps_pool_destroy(poolawl); mps_pool_destroy(poollo); diff --git a/mps/test/function/111.c b/mps/test/function/111.c index f7544f82917..6117a82d29c 100644 --- a/mps/test/function/111.c +++ b/mps/test/function/111.c @@ -193,6 +193,7 @@ static void test(void) /* now to test leaving messages open for a long time! */ + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_ap_destroy(aplo); diff --git a/mps/test/function/112.c b/mps/test/function/112.c index 6626add3d9e..e22a67e4dfc 100644 --- a/mps/test/function/112.c +++ b/mps/test/function/112.c @@ -67,6 +67,7 @@ static void test(void) { /* (total allocated is 1000 M) */ + mps_arena_park(arena); mps_root_destroy(root0); mps_root_destroy(root1); comment("Destroyed roots."); diff --git a/mps/test/function/113.c b/mps/test/function/113.c index 83a42fb1709..a897c7fb60a 100644 --- a/mps/test/function/113.c +++ b/mps/test/function/113.c @@ -73,9 +73,9 @@ static void test(void) b = allocone(apamc, 1, mps_rank_exact()); - for (j=1; j<100; j++) + for (j=1; j<=10; j++) { - comment("%i of 100.", j); + comment("%i of 10.", j); a = allocone(apamc, 5, mps_rank_exact()); b = a; c = a; @@ -100,6 +100,7 @@ static void test(void) } } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); comment("Destroyed aps."); diff --git a/mps/test/function/114.c b/mps/test/function/114.c index 6510796a9bb..c273f30cd52 100644 --- a/mps/test/function/114.c +++ b/mps/test/function/114.c @@ -73,9 +73,9 @@ static void test(void) b = allocone(apamc, 1, mps_rank_exact()); - for (j=1; j<100; j++) + for (j=1; j<=10; j++) { - comment("%i of 100.", j); + comment("%i of 10.", j); a = allocone(apamc, 5, mps_rank_exact()); b = a; c = a; @@ -100,6 +100,7 @@ static void test(void) } } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); comment("Destroyed aps."); diff --git a/mps/test/function/116.c b/mps/test/function/116.c index b065af6103e..8c87bae4458 100644 --- a/mps/test/function/116.c +++ b/mps/test/function/116.c @@ -42,6 +42,7 @@ static void test(void) cdie(mps_arena_create(&arena, mps_arena_class_vm(), (size_t) (1024*1024*30)), "create arena"); + die(mps_arena_commit_limit_set(arena, 1ul << 20), "commit_limit_set"); cdie(mps_thread_reg(&thread, arena), "register thread"); @@ -96,6 +97,7 @@ static void test(void) report("postdie", "%s", err_text(res)); } + mps_arena_park(arena); mps_ap_destroy(ap2); comment("Destroyed ap."); diff --git a/mps/test/function/118.c b/mps/test/function/118.c index 4185a586dd6..a77ec4f8f8b 100644 --- a/mps/test/function/118.c +++ b/mps/test/function/118.c @@ -127,6 +127,7 @@ static void test(void) /* now simulate rest of commit */ (void)(busy_ap->limit != 0 || mps_ap_trip(busy_ap, busy_init, objSIZE)); + mps_arena_park(arena); mps_ap_destroy(busy_ap); mps_ap_destroy(ap); mps_pool_destroy(pool); diff --git a/mps/test/function/12.c b/mps/test/function/12.c index 9ac936dc763..5ebaec0dbb4 100644 --- a/mps/test/function/12.c +++ b/mps/test/function/12.c @@ -181,6 +181,7 @@ static void test(void) mps_ap_destroy(ap[i]); } + mps_arena_park(arena); mps_pool_destroy(pool); comment("Destroyed pool."); mps_chain_destroy(chain); diff --git a/mps/test/function/122.c b/mps/test/function/122.c index 4ba0c0c6090..349e5699628 100644 --- a/mps/test/function/122.c +++ b/mps/test/function/122.c @@ -156,6 +156,7 @@ static void test(void) report("count2", "%ld", rootcount); report("countspec", "%ld", speccount); + mps_arena_park(arena); mps_ap_destroy(apamc); mps_ap_destroy(aplo); mps_ap_destroy(apawl); diff --git a/mps/test/function/123.c b/mps/test/function/123.c index 280f05760cc..4166f30f685 100644 --- a/mps/test/function/123.c +++ b/mps/test/function/123.c @@ -95,6 +95,7 @@ static void test(void) setref(a, 0, b); } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); comment("Destroyed aps."); diff --git a/mps/test/function/124.c b/mps/test/function/124.c index 5b84bea864f..cb4620b49de 100644 --- a/mps/test/function/124.c +++ b/mps/test/function/124.c @@ -30,7 +30,7 @@ static mps_gen_param_s testChain[genCOUNT] = { #define BACKITER (32) #define RAMPSIZE (128) -#define ITERATIONS (1000000ul) +#define ITERATIONS (100000ul) #define RAMP_INTERFACE /* @@ -137,6 +137,7 @@ static void test(void) } } + mps_arena_park(arena); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); mps_chain_destroy(chain); diff --git a/mps/test/function/125.c b/mps/test/function/125.c index d457f9adc99..1ca2f61fe4a 100644 --- a/mps/test/function/125.c +++ b/mps/test/function/125.c @@ -79,6 +79,7 @@ static void test(void) mps_arena_collect(arena); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/126.c b/mps/test/function/126.c index 988205232f9..f72256a1095 100644 --- a/mps/test/function/126.c +++ b/mps/test/function/126.c @@ -81,6 +81,7 @@ static void test(void) comment("reserved %ld, committed %ld", mps_arena_reserved(arena), mps_arena_committed(arena)); + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/127.c b/mps/test/function/127.c index afdb013e7c5..55c67961987 100644 --- a/mps/test/function/127.c +++ b/mps/test/function/127.c @@ -24,7 +24,7 @@ END_HEADER #define BACKITER (32) #define RAMPSIZE (128) -#define ITERATIONS (1000000ul) +#define ITERATIONS (100000ul) /* #define RAMP_INTERFACE @@ -137,6 +137,7 @@ static void test(void) { } } + mps_arena_park(arena); mps_ap_destroy(apamc); comment("Destroyed ap."); diff --git a/mps/test/function/128.c b/mps/test/function/128.c index bfb406a23a2..6414e41d976 100644 --- a/mps/test/function/128.c +++ b/mps/test/function/128.c @@ -24,7 +24,7 @@ END_HEADER #define BACKITER (32) #define RAMPSIZE (128) -#define ITERATIONS (1000000ul) +#define ITERATIONS (100000ul) /* #define RAMP_INTERFACE @@ -137,6 +137,7 @@ static void test(void) { } } + mps_arena_park(arena); mps_ap_destroy(apamc); comment("Destroyed ap."); diff --git a/mps/test/function/129.c b/mps/test/function/129.c index dd59be650f5..94512cf6e16 100644 --- a/mps/test/function/129.c +++ b/mps/test/function/129.c @@ -24,7 +24,7 @@ END_HEADER #define BACKITER (32) #define RAMPSIZE (128) -#define ITERATIONS (1000000ul) +#define ITERATIONS (100000ul) #define RAMP_INTERFACE /* @@ -136,6 +136,7 @@ static void test(void) { } } + mps_arena_park(arena); mps_ap_destroy(apamc); comment("Destroyed ap."); diff --git a/mps/test/function/12p.c b/mps/test/function/12p.c index f748b8eddb7..db389e7f14f 100644 --- a/mps/test/function/12p.c +++ b/mps/test/function/12p.c @@ -196,6 +196,7 @@ cells = allocone(ap[0], NCELLS); mps_ap_destroy(ap[i]); } + mps_arena_park(arena); mps_pool_destroy(pool); comment("Destroyed pool."); diff --git a/mps/test/function/13.c b/mps/test/function/13.c index cd7662a8098..aa5ca31b3ee 100644 --- a/mps/test/function/13.c +++ b/mps/test/function/13.c @@ -192,6 +192,7 @@ cells = allocone(ap[0], NCELLS); mps_ap_destroy(ap[i]); } + mps_arena_park(arena); mps_pool_destroy(pool); comment("Destroyed pool."); diff --git a/mps/test/function/130.c b/mps/test/function/130.c index 1a4a6da3fec..f871d5944f8 100644 --- a/mps/test/function/130.c +++ b/mps/test/function/130.c @@ -19,8 +19,6 @@ static mps_gen_param_s testChain[genCOUNT] = { { 6000, 0.90 }, { 8000, 0.65 }, { 16000, 0.50 } }; -void *stackpointer; - mps_pool_t poolmv; mps_arena_t arena; @@ -28,28 +26,18 @@ mps_arena_t arena; static void test(void) { mps_pool_t pool; - mps_thr_t thread; - mps_root_t root; - mps_fmt_t format; mps_chain_t chain; mps_ap_t ap, ap2; - - mycell *a, *b; - + mycell *a[2]; mps_res_t res; int i; /* create an arena that can't grow beyond 30 M */ cdie(mps_arena_create(&arena, mps_arena_class_vm(), (size_t) (1024*1024*30)), "create arena"); - mps_arena_commit_limit_set(arena, (size_t) (1024*1024*40)); - - cdie(mps_thread_reg(&thread, arena), "register thread"); - cdie(mps_root_create_reg(&root, arena, mps_rank_ambig(), 0, thread, - mps_stack_scan_ambig, stackpointer, 0), - "create root"); + die(mps_arena_commit_limit_set(arena, 1u << 20), "commit_limit_set"); cdie(mps_fmt_create_A(&format, arena, &fmtA), "create format"); @@ -64,12 +52,14 @@ static void test(void) /* allocate until full */ i = 0; - b = NULL; + a[0] = a[1] = NULL; + cdie(mps_root_create_table(&root, arena, mps_rank_ambig(), 0, (void *)&a, 2), + "create root"); - while (allocrone(&a, ap, 128, mps_rank_exact()) == MPS_RES_OK) { + while (allocrone(&a[0], ap, 128, mps_rank_exact()) == MPS_RES_OK) { i++; - setref(a, 0, b); - b = a; + setref(a[0], 0, a[1]); + a[1] = a[0]; } comment("%d objs allocated.", i); @@ -81,7 +71,7 @@ static void test(void) mps_ap_destroy(ap); for (i = 0; i < 10; i++) { - res = allocrone(&a, ap2, 128, mps_rank_exact()); + res = allocrone(&a[0], ap2, 128, mps_rank_exact()); report("predie", "%s", err_text(res)); } @@ -90,17 +80,17 @@ static void test(void) mps_root_destroy(root); for (i = 0; i < 10; i++) { - res = allocrone(&a, ap2, 128, mps_rank_exact()); + res = allocrone(&a[0], ap2, 128, mps_rank_exact()); report("postdie", "%s", err_text(res)); } - die(allocrone(&a, ap2, 128, mps_rank_exact()), "alloc failed"); + die(allocrone(&a[0], ap2, 128, mps_rank_exact()), "alloc failed"); + mps_arena_park(arena); mps_ap_destroy(ap2); mps_pool_destroy(pool); mps_chain_destroy(chain); mps_fmt_destroy(format); - mps_thread_dereg(thread); mps_arena_destroy(arena); comment("Destroyed arena."); } @@ -108,9 +98,6 @@ static void test(void) int main(void) { - void *m; - stackpointer=&m; /* hack to get stack pointer */ - easy_tramp(test); pass(); return 0; diff --git a/mps/test/function/131.c b/mps/test/function/131.c index 5256efeb141..0b0ae7be024 100644 --- a/mps/test/function/131.c +++ b/mps/test/function/131.c @@ -24,8 +24,6 @@ static mps_gen_param_s testChain[genCOUNT] = { { 6000, 0.90 }, { 8000, 0.65 }, { 16000, 0.50 } }; -void *stackpointer; - mps_pool_t poolmv; mps_arena_t arena; @@ -33,28 +31,18 @@ mps_arena_t arena; static void test(void) { mps_pool_t pool; - mps_thr_t thread; - mps_root_t root; - mps_fmt_t format; mps_chain_t chain; mps_ap_t ap, ap2; - - mycell *a, *b; - + mycell *a[2]; mps_res_t res; int i; - /* create an arena that can't grow beyond 30 M */ - cdie(mps_arena_create(&arena, mps_arena_class_vm(), (size_t) (1024*1024*40)), + /* create an arena that can't grow beyond 1 M */ + cdie(mps_arena_create(&arena, mps_arena_class_vm(), (size_t) (1024*1024*4)), "create arena"); - mps_arena_commit_limit_set(arena, (size_t) (1024*1024*30)); - - cdie(mps_thread_reg(&thread, arena), "register thread"); - cdie(mps_root_create_reg(&root, arena, mps_rank_ambig(), 0, thread, - mps_stack_scan_ambig, stackpointer, 0), - "create root"); + mps_arena_commit_limit_set(arena, (size_t) (1024*1024*1)); cdie( mps_fmt_create_A(&format, arena, &fmtA), @@ -71,12 +59,14 @@ static void test(void) /* allocate until full */ i = 0; - b = NULL; + a[0] = a[1] = NULL; + cdie(mps_root_create_table(&root, arena, mps_rank_ambig(), 0, (void *)&a, 2), + "create root"); - while (allocrone(&a, ap, 128, mps_rank_exact()) == MPS_RES_OK) { + while (allocrone(&a[0], ap, 128, mps_rank_exact()) == MPS_RES_OK) { i++; - setref(a, 0, b); - b = a; + setref(a[0], 0, a[1]); + a[1] = a[0]; } comment("%d objs allocated.", i); @@ -88,7 +78,7 @@ static void test(void) mps_ap_destroy(ap); for (i = 0; i < 10; i++) { - res = allocrone(&a, ap2, 128, mps_rank_exact()); + res = allocrone(&a[0], ap2, 128, mps_rank_exact()); report("predie", "%s", err_text(res)); } @@ -97,15 +87,15 @@ static void test(void) mps_root_destroy(root); for (i = 0; i < 10; i++) { - res = allocrone(&a, ap2, 128, mps_rank_exact()); + res = allocrone(&a[0], ap2, 128, mps_rank_exact()); report("postdie", "%s", err_text(res)); } + mps_arena_park(arena); mps_ap_destroy(ap2); mps_pool_destroy(pool); mps_chain_destroy(chain); mps_fmt_destroy(format); - mps_thread_dereg(thread); mps_arena_destroy(arena); comment("Destroyed arena."); } @@ -113,9 +103,6 @@ static void test(void) int main(void) { - void *m; - stackpointer=&m; /* hack to get stack pointer */ - easy_tramp(test); pass(); return 0; diff --git a/mps/test/function/132.c b/mps/test/function/132.c index f7cae603ce8..b69e6cbcecf 100644 --- a/mps/test/function/132.c +++ b/mps/test/function/132.c @@ -23,8 +23,8 @@ OUTPUT_SPEC spill5 <= 0 grow5 = 0 avail5 > 1500000 - allocfail2 > 10000 - failres2 = MEMORY + allocfail2 > 5000 + failres2 = COMMIT_LIMIT shrink6 > 1000000 spill6 <= 0 completed = yes @@ -166,6 +166,7 @@ static void test(void) report("spill6", "%d", commit6-mps_arena_commit_limit(arena)); report("shrink6", "%d", avail5-avail6); + mps_arena_park(arena); mps_root_destroy(root); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/133.c b/mps/test/function/133.c index 0556d5cc9d3..1f717b11660 100644 --- a/mps/test/function/133.c +++ b/mps/test/function/133.c @@ -5,7 +5,7 @@ TEST_HEADER language = c link = testlib.o rankfmt.o OUTPUT_SPEC - allocfail3 > 8000 + allocfail3 > 3000 failres3 = COMMIT_LIMIT spill8 <= 0 spill9 <= 0 @@ -120,6 +120,7 @@ static void test(void) { /* destroy everything remaining */ + mps_arena_park(arena); mps_ap_destroy(apamc); comment("Destroyed ap."); diff --git a/mps/test/function/134.c b/mps/test/function/134.c index 178894d7316..5eee39e5c4f 100644 --- a/mps/test/function/134.c +++ b/mps/test/function/134.c @@ -24,7 +24,7 @@ END_HEADER #define BACKITER (32) #define RAMPSIZE (128) -#define ITERATIONS (1000000ul) +#define ITERATIONS (100000ul) #define RAMP_INTERFACE /* @@ -137,6 +137,7 @@ static void test(void) { } } + mps_arena_park(arena); mps_ap_destroy(apamc); comment("Destroyed ap."); diff --git a/mps/test/function/138.c b/mps/test/function/138.c index 8c823c6c8a9..d4cfa5d5d4f 100644 --- a/mps/test/function/138.c +++ b/mps/test/function/138.c @@ -66,6 +66,7 @@ static void test(void) mps_arena_collect(arena); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/14.c b/mps/test/function/14.c index 9f0e9a4f26e..e77bafeb658 100644 --- a/mps/test/function/14.c +++ b/mps/test/function/14.c @@ -98,6 +98,7 @@ static void test(void) comment("Finished"); + mps_arena_park(arena); mps_ap_destroy(apA); mps_ap_destroy(apB); diff --git a/mps/test/function/147.c b/mps/test/function/147.c index 21c76bcdbdf..1911f05a54e 100644 --- a/mps/test/function/147.c +++ b/mps/test/function/147.c @@ -79,6 +79,7 @@ static void test(void) } } + mps_arena_park(arena); mps_ap_destroy(sap); comment("Destroyed ap."); diff --git a/mps/test/function/148.c b/mps/test/function/148.c index ee80c9169bf..d78b9631450 100644 --- a/mps/test/function/148.c +++ b/mps/test/function/148.c @@ -124,6 +124,7 @@ static void test(void) report("inc4", "%ld", (com2-com1)/BIGSIZE); + mps_arena_park(arena); mps_ap_destroy(ap); mps_ap_destroy(sap); mps_pool_destroy(pool); diff --git a/mps/test/function/149.c b/mps/test/function/149.c index a9d47dac8dc..a26f510ebdc 100644 --- a/mps/test/function/149.c +++ b/mps/test/function/149.c @@ -23,8 +23,8 @@ OUTPUT_SPEC spill5 <= 0 grow5 = 0 avail5 > 1500000 - allocfail2 > 10000 - failres2 = MEMORY + allocfail2 > 5000 + failres2 = COMMIT_LIMIT shrink6 > 1000000 spill6 <= 0 completed = yes @@ -168,6 +168,7 @@ static void test(void) { report("spill6", "%d", commit6-mps_arena_commit_limit(arena)); report("shrink6", "%d", avail5-avail6); + mps_arena_park(arena); mps_root_destroy(root); comment("Destroyed root."); diff --git a/mps/test/function/15.c b/mps/test/function/15.c index e84ad74313b..cf04dd75545 100644 --- a/mps/test/function/15.c +++ b/mps/test/function/15.c @@ -54,6 +54,7 @@ static void test(void) allocdumb(ap, 1024*256); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); diff --git a/mps/test/function/150.c b/mps/test/function/150.c index 1ff43383d3e..40202f6b482 100644 --- a/mps/test/function/150.c +++ b/mps/test/function/150.c @@ -286,6 +286,7 @@ static void test(void) messagepoll(&z, FINAL_DISCARD); } + mps_arena_park(arena); mps_root_destroy(root0); mps_root_destroy(root1); comment("Destroyed roots."); @@ -304,6 +305,7 @@ static void test(void) report("count2", "%d", final_count); + mps_arena_park(arena); mps_pool_destroy(poolamc); mps_pool_destroy(poolawl); mps_pool_destroy(poollo); diff --git a/mps/test/function/151.c b/mps/test/function/151.c index c6bfb05de0b..9a1c20da6b2 100644 --- a/mps/test/function/151.c +++ b/mps/test/function/151.c @@ -59,6 +59,7 @@ static void test(void) comment("%i of %i", i, ITERATIONS); } + mps_arena_park(arena); mps_ap_destroy(sap); comment("Destroyed ap."); diff --git a/mps/test/function/152.c b/mps/test/function/152.c index 784a40a2a19..1836637668e 100644 --- a/mps/test/function/152.c +++ b/mps/test/function/152.c @@ -100,6 +100,7 @@ static void test(void) report("com", "%ld", com1); report("inc2", "%ld", (com1-com)/BIGSIZE); + mps_arena_park(arena); mps_ap_destroy(ap); mps_ap_destroy(sap); comment("Destroyed ap."); diff --git a/mps/test/function/153.c b/mps/test/function/153.c index 97dac39277f..fb23c98dee1 100644 --- a/mps/test/function/153.c +++ b/mps/test/function/153.c @@ -58,6 +58,7 @@ static void test(void) comment("%i of %i", i, ITERATIONS); } + mps_arena_park(arena); mps_ap_destroy(sap); comment("Destroyed ap."); diff --git a/mps/test/function/16.c b/mps/test/function/16.c index d89940f6542..f3088208377 100644 --- a/mps/test/function/16.c +++ b/mps/test/function/16.c @@ -88,6 +88,7 @@ static void test(void) comment("Finished"); + mps_arena_park(arena); mps_ap_destroy(apA); mps_ap_destroy(apB); diff --git a/mps/test/function/17.c b/mps/test/function/17.c index f029f9a742f..a8f665d3ab7 100644 --- a/mps/test/function/17.c +++ b/mps/test/function/17.c @@ -47,6 +47,7 @@ static void test(void) pool1=pool; } + mps_arena_park(arena); mps_pool_destroy(pool); mps_chain_destroy(chain); mps_fmt_destroy(format); diff --git a/mps/test/function/171.c b/mps/test/function/171.c index b6a0037aa5e..4473aae0f35 100644 --- a/mps/test/function/171.c +++ b/mps/test/function/171.c @@ -138,6 +138,7 @@ static void test(void) { } } + mps_arena_park(arena); mps_ap_destroy(apamc); comment("Destroyed ap."); diff --git a/mps/test/function/18.c b/mps/test/function/18.c index 21f2c38e696..60a17e52d57 100644 --- a/mps/test/function/18.c +++ b/mps/test/function/18.c @@ -5,7 +5,7 @@ TEST_HEADER language = c link = testlib.o newfmt.o OUTPUT_SPEC - errtext = create pool: MEMORY + errtext = create pool: COMMIT_LIMIT END_HEADER */ @@ -39,6 +39,7 @@ static void test(void) cdie(mps_arena_create(&arena, mps_arena_class_vm(), mmqaArenaSIZE), "create arena"); + die(mps_arena_commit_limit_set(arena, 1ul << 30), "commit_limit_set"); die(mps_thread_reg(&thread, arena), "register thread"); die(mps_root_create_reg(&root, arena, mps_rank_ambig(), 0, thread, mps_stack_scan_ambig, stackpointer, 0), diff --git a/mps/test/function/19.c b/mps/test/function/19.c index 7727f45310b..bdbe02f88f8 100644 --- a/mps/test/function/19.c +++ b/mps/test/function/19.c @@ -5,7 +5,7 @@ TEST_HEADER language = c link = testlib.o newfmt.o OUTPUT_SPEC - errtext = create ap: MEMORY + errtext = create ap: COMMIT_LIMIT END_HEADER */ @@ -40,6 +40,7 @@ static void test(void) cdie(mps_arena_create(&arena, mps_arena_class_vm(), mmqaArenaSIZE), "create arena"); + die(mps_arena_commit_limit_set(arena, 1ul << 30), "commit_limit_set"); die(mps_thread_reg(&thread, arena), "register thread"); die(mps_root_create_reg(&root, arena, mps_rank_ambig(), 0, thread, mps_stack_scan_ambig, stackpointer, 0), diff --git a/mps/test/function/2.c b/mps/test/function/2.c index 2133f56f47a..7c0c6974919 100644 --- a/mps/test/function/2.c +++ b/mps/test/function/2.c @@ -82,6 +82,7 @@ static void test(void) b = b->ref[0]; } + mps_arena_park(arena); mps_ap_destroy(ap); comment("Destroyed ap."); diff --git a/mps/test/function/20.c b/mps/test/function/20.c index 5a95e314397..b66f96d4e67 100644 --- a/mps/test/function/20.c +++ b/mps/test/function/20.c @@ -5,7 +5,7 @@ TEST_HEADER language = c link = testlib.o newfmt.o OUTPUT_SPEC - errtext = create format: MEMORY + errtext = create format: COMMIT_LIMIT END_HEADER */ @@ -27,6 +27,7 @@ static void test(void) { int p; die(mps_arena_create(&arena, mps_arena_class_vm(), mmqaArenaSIZE), "create"); + die(mps_arena_commit_limit_set(arena, 1ul << 30), "commit_limit_set"); die(mps_thread_reg(&thread, arena), "register thread"); die(mps_root_create_reg(&root, arena, mps_rank_ambig(), 0, thread, mps_stack_scan_ambig, stackpointer, 0), "create root"); diff --git a/mps/test/function/203.c b/mps/test/function/203.c index bd3cf8d92e2..207937d1d35 100644 --- a/mps/test/function/203.c +++ b/mps/test/function/203.c @@ -1,7 +1,7 @@ /* TEST_HEADER id = $Id$ - summary = new MV2 allocation test + summary = new MVT allocation test language = c link = testlib.o END_HEADER @@ -9,14 +9,10 @@ END_HEADER #include #include "testlib.h" -#include "mpscmv2.h" +#include "mpscmvt.h" #include "mpsavm.h" -#define MAXNUMBER 1000000 - -/* this shouldn't be necessary, but it's not provided anywhere */ - -typedef MPS_T_WORD mps_count_t; +#define MAXNUMBER 100000 void *stackpointer; mps_arena_t arena; @@ -42,7 +38,7 @@ static void setobj(mps_addr_t a, size_t size, unsigned char val) } } -static mps_res_t mv2_alloc(mps_addr_t *ref, mps_ap_t ap, size_t size) { +static mps_res_t mvt_alloc(mps_addr_t *ref, mps_ap_t ap, size_t size) { mps_res_t res; size = ((size+7)/8)*8; @@ -73,7 +69,7 @@ static int chkobj(mps_addr_t a, size_t size, unsigned char val) static void dt(int kind, size_t minSize, size_t avgSize, size_t maxSize, - mps_count_t depth, mps_count_t fragLimit, + mps_word_t depth, mps_word_t fragLimit, size_t mins, size_t maxs, int number, int iter) { mps_pool_t pool; @@ -89,9 +85,9 @@ static void dt(int kind, asserts(time0 != -1, "processor time not available"); die( - mps_pool_create(&pool, arena, mps_class_mv2(), + mps_pool_create(&pool, arena, mps_class_mvt(), minSize, avgSize, maxSize, depth, fragLimit), - "create MV2 pool"); + "create MVT pool"); die(mps_ap_create(&ap, pool, mps_rank_ambig()), "create ap"); @@ -104,7 +100,7 @@ static void dt(int kind, } else { - die(mv2_alloc(&queue[hd].addr, ap, size), "alloc"); + die(mvt_alloc(&queue[hd].addr, ap, size), "alloc"); setobj(queue[hd].addr, size, (unsigned char) (hd%256)); queue[hd].size = size; } @@ -136,12 +132,13 @@ static void dt(int kind, } else { - die(mv2_alloc(&queue[hd].addr, ap, size),"alloc"); + die(mvt_alloc(&queue[hd].addr, ap, size),"alloc"); setobj(queue[hd].addr, size, (unsigned char) (hd%256)); queue[hd].size = size; } } + mps_ap_destroy(ap); mps_pool_destroy(pool); time1=clock(); @@ -157,7 +154,7 @@ static void test(void) { mps_thr_t thread; size_t mins; - mps_count_t dep, frag; + mps_word_t dep, frag; cdie(mps_arena_create(&arena, mps_arena_class_vm(), (size_t) (1024*1024*100)), "create arena"); cdie(mps_thread_reg(&thread, arena), "register thread"); @@ -170,34 +167,34 @@ static void test(void) comment("Frag: %i", frag); - dt(SEQ, 8, 8, 9, dep, frag, 8, 9, 5, 1000); - dt(RANGAP, 64, 64, 64, dep, frag, 8, 128, 100, 100000); + dt(SEQ, 8, 8, 9, dep, frag, 8, 9, 5, 100); + dt(RANGAP, 64, 64, 64, dep, frag, 8, 128, 100, 10000); - dt(DUMMY, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(SEQ, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(RAN, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(SEQGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(RANGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); + dt(DUMMY, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(SEQ, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(RAN, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(SEQGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(RANGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); - dt(DUMMY, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(SEQ, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(RAN, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(SEQGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(RANGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); + dt(DUMMY, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(SEQ, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(RAN, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(SEQGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(RANGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); - dt(DUMMY, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQ, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RAN, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RANGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); + dt(DUMMY, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQ, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RAN, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RANGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); /* try again using exceptional obj for anything over 16K */ - dt(DUMMY, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQ, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RAN, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RANGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); + dt(DUMMY, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQ, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RAN, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RANGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); } diff --git a/mps/test/function/204.c b/mps/test/function/204.c index 73e40029dff..d1a00731086 100644 --- a/mps/test/function/204.c +++ b/mps/test/function/204.c @@ -1,7 +1,7 @@ /* TEST_HEADER id = $Id$ - summary = new MV2 allocation test, extra shallow + summary = new MVT allocation test, extra shallow language = c link = testlib.o END_HEADER @@ -9,15 +9,11 @@ END_HEADER #include #include "testlib.h" -#include "mpscmv2.h" +#include "mpscmvt.h" #include "mpsavm.h" #define MAXNUMBER 1000000 -/* this shouldn't be necessary, but it's not provided anywhere */ - -typedef MPS_T_WORD mps_count_t; - void *stackpointer; mps_arena_t arena; @@ -42,7 +38,7 @@ static void setobj(mps_addr_t a, size_t size, unsigned char val) } } -static mps_res_t mv2_alloc(mps_addr_t *ref, mps_ap_t ap, size_t size) { +static mps_res_t mvt_alloc(mps_addr_t *ref, mps_ap_t ap, size_t size) { mps_res_t res; size = ((size+7)/8)*8; @@ -73,7 +69,7 @@ static int chkobj(mps_addr_t a, size_t size, unsigned char val) static void dt(int kind, size_t minSize, size_t avgSize, size_t maxSize, - mps_count_t depth, mps_count_t fragLimit, + mps_word_t depth, mps_word_t fragLimit, size_t mins, size_t maxs, int number, int iter) { mps_pool_t pool; @@ -89,9 +85,9 @@ static void dt(int kind, asserts(time0 != -1, "processor time not available"); die( - mps_pool_create(&pool, arena, mps_class_mv2(), + mps_pool_create(&pool, arena, mps_class_mvt(), minSize, avgSize, maxSize, depth, fragLimit), - "create MV2 pool"); + "create MVT pool"); die(mps_ap_create(&ap, pool, mps_rank_ambig()), "create ap"); @@ -104,7 +100,7 @@ static void dt(int kind, } else { - die(mv2_alloc(&queue[hd].addr, ap, size), "alloc"); + die(mvt_alloc(&queue[hd].addr, ap, size), "alloc"); setobj(queue[hd].addr, size, (unsigned char) (hd%256)); queue[hd].size = size; } @@ -136,12 +132,13 @@ static void dt(int kind, } else { - die(mv2_alloc(&queue[hd].addr, ap, size),"alloc"); + die(mvt_alloc(&queue[hd].addr, ap, size),"alloc"); setobj(queue[hd].addr, size, (unsigned char) (hd%256)); queue[hd].size = size; } } + mps_ap_destroy(ap); mps_pool_destroy(pool); time1=clock(); @@ -157,7 +154,7 @@ static void test(void) { mps_thr_t thread; size_t mins; - mps_count_t dep, frag; + mps_word_t dep, frag; cdie(mps_arena_create(&arena, mps_arena_class_vm(), (size_t) (1024*1024*100)), "create arena"); cdie(mps_thread_reg(&thread, arena), "register thread"); @@ -170,34 +167,34 @@ static void test(void) comment("Frag: %i", frag); - dt(SEQ, 8, 8, 9, dep, frag, 8, 9, 5, 1000); - dt(RANGAP, 64, 64, 64, dep, frag, 8, 128, 100, 100000); + dt(SEQ, 8, 8, 9, dep, frag, 8, 9, 5, 100); + dt(RANGAP, 64, 64, 64, dep, frag, 8, 128, 100, 10000); - dt(DUMMY, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(SEQ, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(RAN, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(SEQGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(RANGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); + dt(DUMMY, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(SEQ, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(RAN, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(SEQGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(RANGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); - dt(DUMMY, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(SEQ, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(RAN, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(SEQGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(RANGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); + dt(DUMMY, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(SEQ, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(RAN, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(SEQGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(RANGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); - dt(DUMMY, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQ, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RAN, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RANGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); + dt(DUMMY, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQ, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RAN, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RANGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); /* try again using exceptional obj for anything over 16K */ - dt(DUMMY, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQ, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RAN, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RANGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); + dt(DUMMY, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQ, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RAN, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RANGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); } diff --git a/mps/test/function/205.c b/mps/test/function/205.c index e7b4e7c812d..d067d6a5ada 100644 --- a/mps/test/function/205.c +++ b/mps/test/function/205.c @@ -1,7 +1,7 @@ /* TEST_HEADER id = $Id$ - summary = new MV2 allocation test, extra deep + summary = new MVT allocation test, extra deep language = c link = testlib.o END_HEADER @@ -9,15 +9,11 @@ END_HEADER #include #include "testlib.h" -#include "mpscmv2.h" +#include "mpscmvt.h" #include "mpsavm.h" #define MAXNUMBER 1000000 -/* this shouldn't be necessary, but it's not provided anywhere */ - -typedef MPS_T_WORD mps_count_t; - void *stackpointer; mps_arena_t arena; @@ -42,7 +38,7 @@ static void setobj(mps_addr_t a, size_t size, unsigned char val) } } -static mps_res_t mv2_alloc(mps_addr_t *ref, mps_ap_t ap, size_t size) { +static mps_res_t mvt_alloc(mps_addr_t *ref, mps_ap_t ap, size_t size) { mps_res_t res; size = ((size+7)/8)*8; @@ -73,7 +69,7 @@ static int chkobj(mps_addr_t a, size_t size, unsigned char val) static void dt(int kind, size_t minSize, size_t avgSize, size_t maxSize, - mps_count_t depth, mps_count_t fragLimit, + mps_word_t depth, mps_word_t fragLimit, size_t mins, size_t maxs, int number, int iter) { mps_pool_t pool; @@ -89,9 +85,9 @@ static void dt(int kind, asserts(time0 != -1, "processor time not available"); die( - mps_pool_create(&pool, arena, mps_class_mv2(), + mps_pool_create(&pool, arena, mps_class_mvt(), minSize, avgSize, maxSize, depth, fragLimit), - "create MV2 pool"); + "create MVT pool"); die(mps_ap_create(&ap, pool, mps_rank_ambig()), "create ap"); @@ -104,7 +100,7 @@ static void dt(int kind, } else { - die(mv2_alloc(&queue[hd].addr, ap, size), "alloc"); + die(mvt_alloc(&queue[hd].addr, ap, size), "alloc"); setobj(queue[hd].addr, size, (unsigned char) (hd%256)); queue[hd].size = size; } @@ -136,12 +132,13 @@ static void dt(int kind, } else { - die(mv2_alloc(&queue[hd].addr, ap, size),"alloc"); + die(mvt_alloc(&queue[hd].addr, ap, size),"alloc"); setobj(queue[hd].addr, size, (unsigned char) (hd%256)); queue[hd].size = size; } } + mps_ap_destroy(ap); mps_pool_destroy(pool); time1=clock(); @@ -157,7 +154,7 @@ static void test(void) { mps_thr_t thread; size_t mins; - mps_count_t dep, frag; + mps_word_t dep, frag; cdie(mps_arena_create(&arena, mps_arena_class_vm(), (size_t) (1024*1024*100)), "create arena"); cdie(mps_thread_reg(&thread, arena), "register thread"); @@ -170,34 +167,34 @@ static void test(void) comment("Frag: %i", frag); - dt(SEQ, 8, 8, 9, dep, frag, 8, 9, 5, 1000); - dt(RANGAP, 64, 64, 64, dep, frag, 8, 128, 100, 100000); + dt(SEQ, 8, 8, 9, dep, frag, 8, 9, 5, 100); + dt(RANGAP, 64, 64, 64, dep, frag, 8, 128, 100, 10000); - dt(DUMMY, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(SEQ, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(RAN, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(SEQGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); - dt(RANGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 1000000); + dt(DUMMY, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(SEQ, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(RAN, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(SEQGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); + dt(RANGAP, 8, 32, 64, dep, frag, 8, 64, 1000, 100000); - dt(DUMMY, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(SEQ, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(RAN, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(SEQGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); - dt(RANGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 1000000); + dt(DUMMY, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(SEQ, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(RAN, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(SEQGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); + dt(RANGAP, 100, 116, 132, dep, frag, 100, 132, 1000, 100000); - dt(DUMMY, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQ, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RAN, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RANGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 10000); + dt(DUMMY, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQ, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RAN, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RANGAP, mins, 60*1024, 120*1024, dep, frag, mins, 128*1024, 100, 1000); /* try again using exceptional obj for anything over 16K */ - dt(DUMMY, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQ, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RAN, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(SEQGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); - dt(RANGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 10000); + dt(DUMMY, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQ, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RAN, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(SEQGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); + dt(RANGAP, mins, 8*1024, 16*1024, dep, frag, mins, 128*1024, 100, 1000); } diff --git a/mps/test/function/214.c b/mps/test/function/214.c index d84e4b71be4..81fe5c5cfe6 100644 --- a/mps/test/function/214.c +++ b/mps/test/function/214.c @@ -1,7 +1,7 @@ /* TEST_HEADER id = $Id$ - summary = MV2 greed test + summary = MVT greed test language = c link = testlib.o parameters = OBJECTS=1000 OBJSIZE=8192 DEPTH=2 FRAGLIMIT=50 @@ -9,19 +9,15 @@ END_HEADER */ #include "testlib.h" -#include "mpscmv2.h" +#include "mpscmvt.h" #include "mpsavm.h" -/* this shouldn't be necessary, but it's not provided anywhere */ - -typedef MPS_T_WORD mps_count_t; - void *stackpointer; mps_arena_t arena; static mps_addr_t objs[OBJECTS]; -static mps_res_t mv2_alloc(mps_addr_t *ref, mps_ap_t ap, size_t size) { +static mps_res_t mvt_alloc(mps_addr_t *ref, mps_ap_t ap, size_t size) { mps_res_t res; size = ((size+7)/8)*8; @@ -43,20 +39,20 @@ static void test (void) { cdie(mps_arena_create(&arena, mps_arena_class_vm(), (size_t) (1024*1024*100)), "create arena"); cdie(mps_thread_reg(&thread, arena), "register thread"); die( - mps_pool_create(&pool, arena, mps_class_mv2(), + mps_pool_create(&pool, arena, mps_class_mvt(), OBJSIZE, OBJSIZE, OBJSIZE, DEPTH, FRAGLIMIT), - "create MV2 pool"); + "create MVT pool"); die(mps_ap_create(&ap, pool, mps_rank_ambig()), "create ap"); for (i = 0; i < OBJECTS; i++) { - die(mv2_alloc(&objs[i], ap, OBJSIZE), "alloc"); + die(mvt_alloc(&objs[i], ap, OBJSIZE), "alloc"); } report("size1", "%ld", mps_arena_committed(arena)); for (i = 0; i < OBJECTS; i+=2) { mps_free(pool, objs[i], OBJSIZE); - die(mv2_alloc(&objs[i], ap, OBJSIZE), "alloc"); + die(mvt_alloc(&objs[i], ap, OBJSIZE), "alloc"); } report("size2", "%ld", mps_arena_committed(arena)); diff --git a/mps/test/function/215.c b/mps/test/function/215.c index ad55e9df432..14ad7a1b808 100644 --- a/mps/test/function/215.c +++ b/mps/test/function/215.c @@ -150,6 +150,7 @@ static void test(void) { } } + mps_arena_park(arena); mps_ap_destroy(apamc); comment("Destroyed ap."); diff --git a/mps/test/function/223.c b/mps/test/function/223.c index 9340e44f456..6a402d1c831 100644 --- a/mps/test/function/223.c +++ b/mps/test/function/223.c @@ -24,7 +24,7 @@ END_HEADER #define BACKITER (32) #define RAMPSIZE (128) -#define ITERATIONS (1000000ul) +#define ITERATIONS (100000ul) #define RAMP_INTERFACE /* @@ -97,7 +97,7 @@ static void test(void) { mps_ap_create(&apamc, poolamc, mps_rank_exact()), "create ap"); - mps_message_type_enable(arena, mps_message_type_collection_stats()); + mps_message_type_enable(arena, mps_message_type_gc()); inramp = 0; @@ -138,18 +138,18 @@ static void test(void) { rsize = 0; } } - if(mps_message_get(&message, arena, mps_message_type_collection_stats())) { + if(mps_message_get(&message, arena, mps_message_type_gc())) { unsigned long live, condemned, notCondemned; - live = mps_message_collection_stats_live_size(arena, message); - condemned = mps_message_collection_stats_condemned_size(arena, message); - notCondemned = - mps_message_collection_stats_not_condemned_size(arena, message); + live = mps_message_gc_live_size(arena, message); + condemned = mps_message_gc_condemned_size(arena, message); + notCondemned = mps_message_gc_not_condemned_size(arena, message); comment("Collection: live=%ld, condemned=%ld, not condemned = %ld", - live, condemned, notCondemned); + live, condemned, notCondemned); mps_message_discard(arena, message); } } + mps_arena_park(arena); mps_ap_destroy(apamc); comment("Destroyed ap."); diff --git a/mps/test/function/224.c b/mps/test/function/224.c index cd648209bfc..0cec33e1cd8 100644 --- a/mps/test/function/224.c +++ b/mps/test/function/224.c @@ -6,6 +6,8 @@ TEST_HEADER link = testlib.o harness = 2.5 parameters = EXTENDBY=65536 AVGSIZE=32 PROMISE=64 ITERATE=2000 +OUTPUT_SPEC + errtext = alloc: COMMIT_LIMIT END_HEADER This one is supposed to fail, telling us that MV is badly fragmented. @@ -16,7 +18,7 @@ This one is supposed to fail, telling us that MV is badly fragmented. #include "mpsavm.h" -#define VMNZSIZE ((size_t) 30*1024*1024) +#define VMSIZE ((size_t) 30*1024*1024) static void test(void) @@ -26,8 +28,8 @@ static void test(void) mps_addr_t q; int p; - die(mps_arena_create(&arena, mps_arena_class_vmnz(), VMNZSIZE), "create"); - die(mps_arena_commit_limit_set(arena, VMNZSIZE), "commit limit"); + die(mps_arena_create(&arena, mps_arena_class_vm(), VMSIZE), "create"); + die(mps_arena_commit_limit_set(arena, VMSIZE), "commit limit"); die(mps_pool_create(&pool, arena, mps_class_mv(), EXTENDBY, AVGSIZE, EXTENDBY), diff --git a/mps/test/function/226.c b/mps/test/function/226.c index 361c7cf1848..712b35972db 100644 --- a/mps/test/function/226.c +++ b/mps/test/function/226.c @@ -171,6 +171,7 @@ static void test(void) { } } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); comment("Destroyed aps."); diff --git a/mps/test/function/227.c b/mps/test/function/227.c index 89e40fdae74..af07e083a32 100644 --- a/mps/test/function/227.c +++ b/mps/test/function/227.c @@ -164,6 +164,7 @@ static void test(void) { } } + mps_arena_park(arena); mps_ap_destroy(apamc1); mps_ap_destroy(apamc2); comment("Destroyed ap."); diff --git a/mps/test/function/23.c b/mps/test/function/23.c index 02f08029a68..afcf764a059 100644 --- a/mps/test/function/23.c +++ b/mps/test/function/23.c @@ -103,6 +103,8 @@ static void test(void) r = mps_alloc(&p, poolMV, 1024*1024); report("refuse4", "%s", err_text(r)); } + + mps_arena_park(arena); mps_pool_destroy(poolMV); mps_ap_destroy(ap); diff --git a/mps/test/function/24.c b/mps/test/function/24.c index a16cdcec6e8..e28de201019 100644 --- a/mps/test/function/24.c +++ b/mps/test/function/24.c @@ -10,6 +10,8 @@ END_HEADER #include "testlib.h" #include "mpscamc.h" +#define OBJSIZE (10 * (1u << 20)) + #define genCOUNT (3) static mps_gen_param_s testChain[genCOUNT] = { @@ -27,7 +29,7 @@ static mps_res_t myscan(mps_ss_t ss, mps_addr_t base, mps_addr_t limit) static mps_addr_t myskip(mps_addr_t object) { - return (mps_addr_t) ((char *) object + 1); + return (mps_addr_t) ((char *) object + OBJSIZE); } static void mycopy(mps_addr_t object, mps_addr_t to) @@ -100,13 +102,14 @@ static void test(void) for(i=1; i<1000; i++) { do - { die(mps_reserve(&p, ap, 10*1024*1024), "Reserve: "); + { die(mps_reserve(&p, ap, OBJSIZE), "Reserve: "); } - while (!mps_commit(ap, p, 10*1024*1024)); + while (!mps_commit(ap, p, OBJSIZE)); comment("%i at %p", i, p); comment("%i objects of 10 megabytes each allocated", i); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_fmt_destroy(format); diff --git a/mps/test/function/25.c b/mps/test/function/25.c index 5ee683d2ccf..040202bacdf 100644 --- a/mps/test/function/25.c +++ b/mps/test/function/25.c @@ -94,6 +94,7 @@ static void test(void) { c = conc(string_ch("Hello there"), string_ch(" folks!")); } + mps_arena_park(arena); mps_ap_destroy(ap); comment("Destroyed ap."); diff --git a/mps/test/function/27.c b/mps/test/function/27.c index f0c186f3972..2f30b1a81c6 100644 --- a/mps/test/function/27.c +++ b/mps/test/function/27.c @@ -70,8 +70,8 @@ static void test(void) } + mps_arena_park(arena); mps_ap_destroy(ap); - mps_pool_destroy(pool); mps_chain_destroy(chain); mps_fmt_destroy(format); diff --git a/mps/test/function/28.c b/mps/test/function/28.c index 2ebfcd55bd2..5cbf8e803f4 100644 --- a/mps/test/function/28.c +++ b/mps/test/function/28.c @@ -96,6 +96,7 @@ static void test(void) checkfrom(a); + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/29.c b/mps/test/function/29.c index d3294f348b0..70ef940a948 100644 --- a/mps/test/function/29.c +++ b/mps/test/function/29.c @@ -94,6 +94,7 @@ static void test(void) { z = alloclo(ap, 0x4000); } + mps_arena_park(arena); mps_ap_destroy(ap); comment("Destroyed ap."); diff --git a/mps/test/function/3.c b/mps/test/function/3.c index ffc2a533b40..02400bd0e77 100644 --- a/mps/test/function/3.c +++ b/mps/test/function/3.c @@ -82,6 +82,7 @@ static void test(void) comment("%d: %x", j, (int) a); } + mps_arena_park(arena); mps_ap_destroy(ap); comment("Destroyed ap."); diff --git a/mps/test/function/30.c b/mps/test/function/30.c index 748cf8b56a7..583429d8bc2 100644 --- a/mps/test/function/30.c +++ b/mps/test/function/30.c @@ -85,6 +85,7 @@ static void test(void) checkfrom(a); + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_fmt_destroy(format); diff --git a/mps/test/function/31.c b/mps/test/function/31.c index 2221ed53e01..3000a11f1cc 100644 --- a/mps/test/function/31.c +++ b/mps/test/function/31.c @@ -83,6 +83,7 @@ static void test(void) comment("%d of 1000.", i); } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); comment("Destroyed aps."); diff --git a/mps/test/function/32.c b/mps/test/function/32.c index f3cb17f155b..465899c6a13 100644 --- a/mps/test/function/32.c +++ b/mps/test/function/32.c @@ -83,6 +83,7 @@ static void test(void) b = allocdumb(apamc, 0x400*64, 0); } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/33.c b/mps/test/function/33.c index a883b7a7677..31eaeb1164f 100644 --- a/mps/test/function/33.c +++ b/mps/test/function/33.c @@ -82,6 +82,7 @@ static void test(void) b = allocdumb(apamc, 0x400*64, 0); } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/34.c b/mps/test/function/34.c index de1ad4ab423..3d513c75dfe 100644 --- a/mps/test/function/34.c +++ b/mps/test/function/34.c @@ -85,6 +85,7 @@ static void test(void) b = allocdumb(apamc, 0x400*64, 0); } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/35.c b/mps/test/function/35.c index a79ab52c99c..42def83780a 100644 --- a/mps/test/function/35.c +++ b/mps/test/function/35.c @@ -107,6 +107,7 @@ static void test(void) checkfrom(*a); + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/36.c b/mps/test/function/36.c index 3d4482311a3..609d9f9210b 100644 --- a/mps/test/function/36.c +++ b/mps/test/function/36.c @@ -90,6 +90,7 @@ static void test(void) setref(a[j], z, a[k]); } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/37.c b/mps/test/function/37.c index 275cd58dce4..664e9a3dd68 100644 --- a/mps/test/function/37.c +++ b/mps/test/function/37.c @@ -109,6 +109,7 @@ static void test(void) checkfrom(*a); + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/38.c b/mps/test/function/38.c index 409cc6a97b8..e3512864442 100644 --- a/mps/test/function/38.c +++ b/mps/test/function/38.c @@ -151,6 +151,7 @@ static void test(void) } } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolmv); diff --git a/mps/test/function/39.c b/mps/test/function/39.c index 5b27b4ec94b..e2972230a8d 100644 --- a/mps/test/function/39.c +++ b/mps/test/function/39.c @@ -84,6 +84,7 @@ static void test(void) b = allocdumb(apamc, 0x400*64, 0); } + mps_arena_park(arena); mps_ap_destroy(aplo); mps_ap_destroy(apamc); comment("Destroyed aps."); diff --git a/mps/test/function/4.c b/mps/test/function/4.c index 3294eb0a03e..e730bc77ff3 100644 --- a/mps/test/function/4.c +++ b/mps/test/function/4.c @@ -94,6 +94,7 @@ static void test(void) time0 = time1; } + mps_arena_park(arena); mps_ap_destroy(ap); comment("Destroyed ap."); diff --git a/mps/test/function/40.c b/mps/test/function/40.c index 5257e1fcfc1..de0e4dbe462 100644 --- a/mps/test/function/40.c +++ b/mps/test/function/40.c @@ -75,6 +75,7 @@ static void test(void) DC; DMC; + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/41.c b/mps/test/function/41.c index b394e5f37f8..8fcdb372a6d 100644 --- a/mps/test/function/41.c +++ b/mps/test/function/41.c @@ -110,6 +110,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/42.c b/mps/test/function/42.c index 093246cba11..b56a61f2a09 100644 --- a/mps/test/function/42.c +++ b/mps/test/function/42.c @@ -106,6 +106,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/43.c b/mps/test/function/43.c index da73237b90b..1b955f268b7 100644 --- a/mps/test/function/43.c +++ b/mps/test/function/43.c @@ -114,6 +114,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(apweak); mps_ap_destroy(apexact); mps_ap_destroy(apamc); diff --git a/mps/test/function/44.c b/mps/test/function/44.c index 949fa922813..f38e36a06e0 100644 --- a/mps/test/function/44.c +++ b/mps/test/function/44.c @@ -166,6 +166,7 @@ static void test(void) RC; } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/45.c b/mps/test/function/45.c index 0442a4dbd35..e3c7f412931 100644 --- a/mps/test/function/45.c +++ b/mps/test/function/45.c @@ -191,6 +191,7 @@ static void test(void) mps_ap_destroy(ap[i]); } + mps_arena_park(arena); mps_pool_destroy(pool); mps_chain_destroy(chain); mps_fmt_destroy(format); diff --git a/mps/test/function/46.c b/mps/test/function/46.c index c5cd9437859..715ba899090 100644 --- a/mps/test/function/46.c +++ b/mps/test/function/46.c @@ -140,6 +140,7 @@ static void test(void) RC; } + mps_arena_park(arena); mps_ap_destroy(apamc); comment("Destroyed aps."); diff --git a/mps/test/function/47.c b/mps/test/function/47.c index 28682810db0..ee7c5605c2e 100644 --- a/mps/test/function/47.c +++ b/mps/test/function/47.c @@ -87,6 +87,7 @@ static void test(void) { + mps_arena_park(arena); mps_ap_destroy(apawl); comment("Destroyed ap."); diff --git a/mps/test/function/48.c b/mps/test/function/48.c index 700c565dc63..3343684fdbf 100644 --- a/mps/test/function/48.c +++ b/mps/test/function/48.c @@ -115,6 +115,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_ap_destroy(apweak); diff --git a/mps/test/function/49.c b/mps/test/function/49.c index 5fd7f437e91..27864e380e9 100644 --- a/mps/test/function/49.c +++ b/mps/test/function/49.c @@ -273,6 +273,7 @@ static void test(void) report("count2", "%d", final_count); + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_ap_destroy(aplo); diff --git a/mps/test/function/5.c b/mps/test/function/5.c index ae5d74dd5af..7a91636ee30 100644 --- a/mps/test/function/5.c +++ b/mps/test/function/5.c @@ -78,6 +78,7 @@ static void test(void) q->data.size = OBJ_SIZE; (void) mps_commit(apA, p, OBJ_SIZE); + mps_arena_park(arena); mps_ap_destroy(apA); comment("Destroyed apA."); mps_ap_destroy(apB); diff --git a/mps/test/function/50.c b/mps/test/function/50.c index cae53e67e10..741f865eb98 100644 --- a/mps/test/function/50.c +++ b/mps/test/function/50.c @@ -263,6 +263,7 @@ static void test(void) finalpoll(&z, FINAL_DISCARD); } + mps_arena_park(arena); mps_root_destroy(root0); mps_root_destroy(root1); comment("Destroyed roots."); @@ -281,6 +282,7 @@ static void test(void) report("count2", "%d", final_count); + mps_arena_park(arena); mps_pool_destroy(poolamc); mps_pool_destroy(poolawl); mps_pool_destroy(poollo); diff --git a/mps/test/function/51.c b/mps/test/function/51.c index 779445218ae..bdf1a87ad70 100644 --- a/mps/test/function/51.c +++ b/mps/test/function/51.c @@ -219,6 +219,7 @@ static void test(void) /* now to test leaving messages open for a long time! */ + mps_arena_park(arena); mps_ap_destroy(apamc); mps_ap_destroy(apamcz); mps_ap_destroy(apams); diff --git a/mps/test/function/52.c b/mps/test/function/52.c index 4663f49e65e..d0c757a3059 100644 --- a/mps/test/function/52.c +++ b/mps/test/function/52.c @@ -87,6 +87,7 @@ static void test(void) time0 = time1; } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/53.c b/mps/test/function/53.c index 4fc78b1b187..6012a05befc 100644 --- a/mps/test/function/53.c +++ b/mps/test/function/53.c @@ -103,6 +103,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(apamc); mps_ap_destroy(aplo); comment("Destroyed aps."); diff --git a/mps/test/function/54.c b/mps/test/function/54.c index cbf4044a0e6..f2c712ac15a 100644 --- a/mps/test/function/54.c +++ b/mps/test/function/54.c @@ -105,6 +105,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/55.c b/mps/test/function/55.c index c8ed50f3c0c..f34a2fb1856 100644 --- a/mps/test/function/55.c +++ b/mps/test/function/55.c @@ -104,6 +104,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/56.c b/mps/test/function/56.c index 87f9a29b97a..697b2e04b6c 100644 --- a/mps/test/function/56.c +++ b/mps/test/function/56.c @@ -102,6 +102,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/57.c b/mps/test/function/57.c index 5e0177b10b4..dbaab342bd9 100644 --- a/mps/test/function/57.c +++ b/mps/test/function/57.c @@ -99,6 +99,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/6.c b/mps/test/function/6.c index 7e56e09317a..bbbe1217b3e 100644 --- a/mps/test/function/6.c +++ b/mps/test/function/6.c @@ -79,6 +79,8 @@ static void test(void) } } + mps_arena_park(arena); + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/60.c b/mps/test/function/60.c index 51008852af2..b075e0b938e 100644 --- a/mps/test/function/60.c +++ b/mps/test/function/60.c @@ -68,11 +68,11 @@ static void test(void) mps_ap_create(&ap2, poolawl2, mps_rank_exact()), "create ap"); - for (j=1; j<100; j++) + for (j=1; j<=10; j++) { - comment("%i of 100.", j); + comment("%i of 10.", j); - for (i=1; i<10000; i++) + for (i=1; i<=1000; i++) { UC; a = allocone(ap1, 100, 1); @@ -85,6 +85,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(ap1); mps_ap_destroy(ap2); comment("Destroyed aps."); diff --git a/mps/test/function/61.c b/mps/test/function/61.c index 1d3df5e90e2..9ccd97d77de 100644 --- a/mps/test/function/61.c +++ b/mps/test/function/61.c @@ -79,6 +79,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(ap1); mps_ap_destroy(ap2); mps_pool_destroy(poolamc1); diff --git a/mps/test/function/62.c b/mps/test/function/62.c index cc99e36c7c9..1ea9ff432b3 100644 --- a/mps/test/function/62.c +++ b/mps/test/function/62.c @@ -79,6 +79,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(ap1); mps_ap_destroy(ap2); mps_pool_destroy(poolamc1); diff --git a/mps/test/function/63.c b/mps/test/function/63.c index a46c993b3f6..1e9b63693e3 100644 --- a/mps/test/function/63.c +++ b/mps/test/function/63.c @@ -73,6 +73,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(ap1); mps_pool_destroy(poolamc1); mps_chain_destroy(chain); diff --git a/mps/test/function/64.c b/mps/test/function/64.c index 7e9bb633d95..390567b7117 100644 --- a/mps/test/function/64.c +++ b/mps/test/function/64.c @@ -104,6 +104,7 @@ static void test(void) DMC; } + mps_arena_park(arena); mps_ap_destroy(apamc); mps_ap_destroy(aplo); mps_pool_destroy(poolamc); diff --git a/mps/test/function/65.c b/mps/test/function/65.c index 3300ca895ff..3305d96cd72 100644 --- a/mps/test/function/65.c +++ b/mps/test/function/65.c @@ -179,6 +179,7 @@ static void test(void) mps_arena_release(arena); comment("released."); + mps_arena_park(arena); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); mps_chain_destroy(chain); diff --git a/mps/test/function/66.c b/mps/test/function/66.c index 069a4b99358..d395dbb3637 100644 --- a/mps/test/function/66.c +++ b/mps/test/function/66.c @@ -155,6 +155,7 @@ static void test(void) { } } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); comment("Destroyed aps."); diff --git a/mps/test/function/69.c b/mps/test/function/69.c index c86abc37fe4..bbc758b7f42 100644 --- a/mps/test/function/69.c +++ b/mps/test/function/69.c @@ -94,6 +94,7 @@ static void test(void) { mps_message_discard(arena, message); + mps_arena_park(arena); mps_root_destroy(root); mps_ap_destroy(ap); mps_pool_destroy(pool); diff --git a/mps/test/function/72.c b/mps/test/function/72.c index 2eb6b178529..946844d9492 100644 --- a/mps/test/function/72.c +++ b/mps/test/function/72.c @@ -80,6 +80,7 @@ static void test(void) fail(); + mps_arena_park(arena); mps_ap_destroy(ap); comment("Destroyed ap."); diff --git a/mps/test/function/73.c b/mps/test/function/73.c index 492aa6cd919..f38e6d1847a 100644 --- a/mps/test/function/73.c +++ b/mps/test/function/73.c @@ -60,6 +60,7 @@ static void test(void) { /* (total allocated is 1000 M) */ + mps_arena_park(arena); mps_root_destroy(root0); mps_root_destroy(root1); comment("Destroyed roots."); diff --git a/mps/test/function/74.c b/mps/test/function/74.c index 40f86f09e14..58e280b544f 100644 --- a/mps/test/function/74.c +++ b/mps/test/function/74.c @@ -60,6 +60,7 @@ static void test(void) { /* (total allocated is 1000 M) */ + mps_arena_park(arena); mps_root_destroy(root0); mps_root_destroy(root1); comment("Destroyed roots."); diff --git a/mps/test/function/75.c b/mps/test/function/75.c index d6e4a38e870..859471cfbbc 100644 --- a/mps/test/function/75.c +++ b/mps/test/function/75.c @@ -69,6 +69,7 @@ static void test(void) /* (total allocated is 1000 M) */ + mps_arena_park(arena); mps_root_destroy(root0); mps_root_destroy(root1); comment("Destroyed roots."); diff --git a/mps/test/function/76.c b/mps/test/function/76.c index 89120d6aa97..72f2977f80c 100644 --- a/mps/test/function/76.c +++ b/mps/test/function/76.c @@ -121,6 +121,7 @@ static void test(void) /* now to test leaving messages open for a long time! */ + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_ap_destroy(aplo); diff --git a/mps/test/function/77.c b/mps/test/function/77.c index 7a32973ecd2..f57ea4bc2f6 100644 --- a/mps/test/function/77.c +++ b/mps/test/function/77.c @@ -69,8 +69,8 @@ static void test(void) b = allocone(apamc, 1, mps_rank_exact()); - for (j=1; j<100; j++) { - comment("%i of 100.", j); + for (j=1; j<=10; j++) { + comment("%i of 10.", j); a = allocone(apamc, 5, mps_rank_exact()); b = a; c = a; @@ -94,6 +94,7 @@ static void test(void) } } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/78.c b/mps/test/function/78.c index 75c371f7395..331c0f2b2be 100644 --- a/mps/test/function/78.c +++ b/mps/test/function/78.c @@ -97,6 +97,7 @@ static void test(void) } } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/79.c b/mps/test/function/79.c index 42163242b73..78857b4c3be 100644 --- a/mps/test/function/79.c +++ b/mps/test/function/79.c @@ -94,6 +94,7 @@ static void test(void) } } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/80.c b/mps/test/function/80.c index bdc5843fcc8..2d373206f73 100644 --- a/mps/test/function/80.c +++ b/mps/test/function/80.c @@ -94,6 +94,7 @@ static void test(void) } } + mps_arena_park(arena); mps_ap_destroy(apawl); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/function/81.c b/mps/test/function/81.c index ccaad2111bb..4443612896a 100644 --- a/mps/test/function/81.c +++ b/mps/test/function/81.c @@ -78,6 +78,7 @@ static void test(void) mps_arena_collect(arena); + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/83.c b/mps/test/function/83.c index 5524e4ebd26..baad036e364 100644 --- a/mps/test/function/83.c +++ b/mps/test/function/83.c @@ -107,6 +107,7 @@ static void test(void) report("d", "%p", d); + mps_arena_park(arena); mps_ap_destroy(ap1); mps_ap_destroy(ap2); mps_pool_destroy(pool1); diff --git a/mps/test/function/9.c b/mps/test/function/9.c index 9f205a1654a..6f124f5609c 100644 --- a/mps/test/function/9.c +++ b/mps/test/function/9.c @@ -58,6 +58,7 @@ static void test(void) a = allocdumb(ap, 1024*1024*80); + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/96.c b/mps/test/function/96.c index 3ccdb838495..210fbf46542 100644 --- a/mps/test/function/96.c +++ b/mps/test/function/96.c @@ -128,6 +128,7 @@ static void test(void) mps_arena_collect(arena); } + mps_arena_park(arena); mps_ap_destroy(ap); mps_pool_destroy(pool); mps_chain_destroy(chain); diff --git a/mps/test/function/97.c b/mps/test/function/97.c index cc7f49ef433..fabbc1136ab 100644 --- a/mps/test/function/97.c +++ b/mps/test/function/97.c @@ -222,6 +222,7 @@ static void test(void) comment("ok"); } + mps_arena_park(arena); mps_ap_destroy(apamc); mps_ap_destroy(aplo); mps_ap_destroy(apawl); diff --git a/mps/test/function/99.c b/mps/test/function/99.c index d2594f1a5e7..939991c1551 100644 --- a/mps/test/function/99.c +++ b/mps/test/function/99.c @@ -94,6 +94,7 @@ static void test(void) } } + mps_arena_park(arena); mps_ap_destroy(apamcz); mps_ap_destroy(apamc); mps_pool_destroy(poolamc); diff --git a/mps/test/test/README b/mps/test/test/README index 78ac29bdbe7..f9759c09bef 100644 --- a/mps/test/test/README +++ b/mps/test/test/README @@ -5,6 +5,20 @@ perl 5 (or higher). Go "perl qa help" for help, "perl qa options" to see what version of the harness you have (or look at the file "test/version"). -Some brief instructions are in guide.mm-qa in MM Information; ask - (ext 3822) if you need help, want to complain, &c. +Running on OS X +--------------- + +On OS X you can invoke the test suite like this:: + + $ cd test + $ alias qa="perl test/qa -i ../code -l ../code/xc/mps.build/Debug/mps.build/Objects-normal/x86_64/mps.o" + $ qa clib + $ qa run function/5.c + $ qa runset testsets/passing + +Each test case is compiled in its turn to the file +``test/obj/Darwin_12.3.0_i386__unix/tmp_test`` so you can debug it +with:: + + $ lldb test/obj/Darwin_12.3.0_i386__unix/tmp_test diff --git a/mps/test/testsets/passing b/mps/test/testsets/passing index fc00f37feb0..4b0a0cbe1b5 100644 --- a/mps/test/testsets/passing +++ b/mps/test/testsets/passing @@ -20,9 +20,9 @@ function/14.c function/15.c function/16.c function/17.c -% function/18.c -- tries to exhaust memory by mps_alloc -% function/19.c -- tries to exhaust memory by mps_alloc -% function/20.c -- tries to exhaust memory by mps_alloc +function/18.c +function/19.c +function/20.c function/21.c function/22.c % function/23.c -- interactive test, can't run unattended @@ -47,7 +47,7 @@ function/41.c function/42.c function/43.c function/44.c -function/45.c +% function/45.c -- setref: to non-data object @@@@ function/46.c function/47.c function/48.c @@ -60,7 +60,7 @@ function/55.c function/56.c function/57.c % 58-59 -- no such test -% function/60.c -- slow +function/60.c function/61.c function/62.c function/63.c @@ -77,7 +77,7 @@ function/73.c function/74.c function/75.c function/76.c -% function/77.c -- slow +function/77.c function/78.c function/79.c function/80.c @@ -87,7 +87,7 @@ function/83.c % 84-95 -- no such test function/96.c function/97.c -function/98.c +% function/98.c -- tries to exhaust memory by mps_arena_create function/99.c function/100.c function/101.c @@ -104,7 +104,7 @@ function/112.c function/113.c function/114.c % 115 -- no such test -% function/116.c -- tries to exhaust memory by mps_alloc +function/116.c function/117.c function/118.c function/119.c @@ -118,10 +118,10 @@ function/126.c function/127.c function/128.c function/129.c -% function/130.c -- tries to exhaust memory by mps_alloc -% function/131.c -- tries to exhaust memory by mps_alloc -% function/132.c -- failed on allocfail2: wanted > 10000, was 6840 @@@@ -% function/133.c -- failed on allocfail3: wanted > 8000, was 3060 @@@@ +% function/130.c -- job003789 +% function/131.c -- job003789 +function/132.c +function/133.c function/134.c function/135.c function/136.c @@ -134,7 +134,7 @@ function/144.c % 145-146 -- no such test function/147.c % function/148.c -- failed on inc4: wanted = 1, was 0 @@@@ -% function/149.c -- failed on allocfail2: wanted > 10000, was 6858 @@@@ +function/149.c function/150.c function/151.c function/152.c @@ -155,15 +155,15 @@ function/167.c % function/171.c -- job003495 function/200.c % 201-202 -- no such test -% function/203.c -- requires mps_count_t and mps_class_mv2 -% function/204.c -- requires mps_count_t and mps_class_mv2 -% function/205.c -- requires mps_count_t and mps_class_mv2 +function/203.c +function/204.c +function/205.c function/206.c function/207.c % 208-213 -- no such test -% function/214.c -- requires mps_count_t and mps_class_mv2 +function/214.c function/215.c -% function/223.c -- requires mps_message_type_collection_stats -% function/224.c -- COMMIT_LIMIT @@@@ +function/223.c +function/224.c % 225 -- no such test function/226.c diff --git a/mps/tool/branch b/mps/tool/branch index bb54f0d4527..981735d4360 100755 --- a/mps/tool/branch +++ b/mps/tool/branch @@ -1,11 +1,10 @@ #!/usr/bin/env python # -# Ravenbrook -# -# # BRANCH -- CREATE VERSION OR TASK BRANCH -# # Gareth Rees, Ravenbrook Limited, 2014-03-18 +# +# $Id$ +# Copyright (c) 2014 Ravenbrook Limited. See end of file for license. # # # 1. INTRODUCTION diff --git a/mps/tool/noaslr.c b/mps/tool/noaslr.c new file mode 100644 index 00000000000..3ae73dc9362 --- /dev/null +++ b/mps/tool/noaslr.c @@ -0,0 +1,102 @@ +/* noaslr.c: Disable ASLR on OS X Mavericks + * + * $Id: //info.ravenbrook.com/project/mps/master/code/eventcnv.c#26 $ + * Copyright (c) 2014 Ravenbrook Limited. See end of file for license. + * + * This is a command-line tool that runs another program with address + * space layout randomization (ASLR) disabled. + * + * The technique is taken from GDB via "How gdb disables ASLR in Mac + * OS X Lion" + * + * + * On OS X Mavericks, the _POSIX_SPAWN_DISABLE_ASLR constant is not + * defined in any header, but the LLDB sources reveal its value, and + * experimentally this value works. + * + */ + +#include +#include +#include +#include + +#ifndef _POSIX_SPAWN_DISABLE_ASLR +#define _POSIX_SPAWN_DISABLE_ASLR 0x100 +#endif + +int main(int argc, char **argv) +{ + extern char **environ; + pid_t pid; + posix_spawnattr_t attr; + int res, status = 1; + char *default_argv[] = {"/bin/sh", NULL}; + + if (argc >= 2) + ++ argv; + else + argv = default_argv; + + res = posix_spawnattr_init(&attr); + if (res != 0) + return res; + + res = posix_spawnattr_setflags(&attr, _POSIX_SPAWN_DISABLE_ASLR); + if (res != 0) + return res; + + res = posix_spawn(&pid, argv[0], NULL, &attr, argv, environ); + if (res != 0) + return res; + + if (waitpid(pid, &status, 0) == -1) + return 1; + + if (!WIFEXITED(status)) + return 1; + + return WEXITSTATUS(status); +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014 Ravenbrook Limited . + * All rights reserved. This is an open source license. Contact + * Ravenbrook for commercial licensing options. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Redistributions in any form must be accompanied by information on how + * to obtain complete source code for this software and any accompanying + * software that uses this software. The source code must either be + * included in the distribution or be available for no more than the cost + * of distribution plus a nominal fee, and must be freely redistributable + * under reasonable conditions. For an executable file, complete source + * code means the source code for all modules it contains. It does not + * include source code for modules or files that typically accompany the + * major components of the operating system on which the executable file + * runs. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/tool/p4-bisect b/mps/tool/p4-bisect index 2886d823c59..10af68cfc0c 100755 --- a/mps/tool/p4-bisect +++ b/mps/tool/p4-bisect @@ -132,7 +132,6 @@ def run(args): def main(argv): parser = argparse.ArgumentParser( prog='p4-bisect', epilog='For help on CMD, use p4-bisect CMD -h') - parser.set_defaults(func=partial(help, parser)) subparsers = parser.add_subparsers() a = subparsers.add_parser diff --git a/mps/tool/release b/mps/tool/release index 6daf82bed1c..d47fc36e788 100755 --- a/mps/tool/release +++ b/mps/tool/release @@ -1,11 +1,10 @@ #!/usr/bin/env python # -# Ravenbrook -# -# # RELEASE -- MAKE A RELEASE -# # Gareth Rees, Ravenbrook Limited, 2014-03-18 +# +# $Id$ +# Copyright (c) 2014 Ravenbrook Limited. See end of file for license. # # # 1. INTRODUCTION diff --git a/mps/tool/testaslr.c b/mps/tool/testaslr.c new file mode 100644 index 00000000000..f421565b7be --- /dev/null +++ b/mps/tool/testaslr.c @@ -0,0 +1,62 @@ +/* testaslr.c: Simple test for ASLR + * + * $Id: //info.ravenbrook.com/project/mps/master/code/eventcnv.c#26 $ + * Copyright (c) 2014 Ravenbrook Limited. See end of file for license. + * + * Run this program multiple times and see if gets different addresses. + */ + +#include +#include + +int data; + +int main() { + void *heap = malloc(4); + int stack = 0; + printf("data: %p text: %p stack: %p heap: %p\n", + &data, (void *)main, &stack, heap); + return 0; +} + + +/* C. COPYRIGHT AND LICENSE + * + * Copyright (C) 2014 Ravenbrook Limited . + * All rights reserved. This is an open source license. Contact + * Ravenbrook for commercial licensing options. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Redistributions in any form must be accompanied by information on how + * to obtain complete source code for this software and any accompanying + * software that uses this software. The source code must either be + * included in the distribution or be available for no more than the cost + * of distribution plus a nominal fee, and must be freely redistributable + * under reasonable conditions. For an executable file, complete source + * code means the source code for all modules it contains. It does not + * include source code for modules or files that typically accompany the + * major components of the operating system on which the executable file + * runs. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + * PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/mps/tool/testcases.txt b/mps/tool/testcases.txt new file mode 100644 index 00000000000..0b45b8cc548 --- /dev/null +++ b/mps/tool/testcases.txt @@ -0,0 +1,56 @@ +============= ================ ========================================== +Test case Flags Notes +============= ================ ========================================== +abqtest +airtest +amcss =P +amcsshe =P +amcssth =P =T +amsss =P +amssshe =P +apss +arenacv +awlut +awluthe +awlutth =T +btcv +bttest =N interactive +djbench =N benchmark +exposet0 =P +expt825 +finalcv =P +finaltest =P +fotest +gcbench =N benchmark +landtest +locbwcss +lockcov +lockut =T +locusss +locv +messtest +mpmss +mpsicv +mv2test +nailboardtest +poolncv +qs +sacss +segsmss +steptest =P +teletest =N interactive +walkt0 +zcoll =L +zmess +============= ================ ========================================== + +Key to flags +............ + + B -- known Bad + L -- Long runtime + N -- Not an automated test case + P -- relies on Polling or incremental collection + T -- multi-Threaded + W -- Windows-only + X -- Unix-only diff --git a/mps/tool/testcoverage b/mps/tool/testcoverage index dacae8cd5f4..56ee5789bbf 100755 --- a/mps/tool/testcoverage +++ b/mps/tool/testcoverage @@ -1,11 +1,10 @@ #!/bin/sh # -# Ravenbrook -# -# # TESTCOVERAGE -- TEST COVERAGE REPORT FOR THE MPS -# # Gareth Rees, Ravenbrook Limited, 2014-03-21 +# +# $Id$ +# Copyright (c) 2014 Ravenbrook Limited. See end of file for license. # # # 1. INTRODUCTION @@ -20,6 +19,8 @@ OS=$(uname -s) PROJECT=mps TOOL=$(dirname "$0") CODE=$TOOL/../code +MPS_TELEMETRY_CONTROL=all +export MPS_TELEMETRY_CONTROL case "$ARCH-$OS" in *-Darwin) diff --git a/mps/tool/testemscripten b/mps/tool/testemscripten new file mode 100755 index 00000000000..b4c565440db --- /dev/null +++ b/mps/tool/testemscripten @@ -0,0 +1,136 @@ +#!/bin/sh +# +# TESTEMSCRIPTEN -- TEST THE MPS WITH EMSCRIPTEN +# Gareth Rees, Ravenbrook Limited, 2014-04-17 +# +# $Id$ +# Copyright (c) 2014 Ravenbrook Limited. See end of file for license. +# +# +# 1. INTRODUCTION +# +# This shell script pulls Emscripten from GitHub and uses it to build +# the MPS. +# +# Supported platforms: ?. +# +# +# 1.1. PREREQUISITES +# +# clang, curl, git, nodejs +# +# "python" needs to be Python 2 (otherwise configure fails), so you +# may need to run: +# +# port select --set python python27 +# +# You need to have a program "python2" on your path (which runs Python +# 2.7.3), so on OS X with MacPorts you need to run: +# +# ln -s /opt/local/bin/python2.7 /opt/local/bin/python2 + + +# 2. CONFIGURATION + +# Emscripten git repository +EMSCRIPTEN_REMOTE=https://github.com/kripken/emscripten.git + +# Fastcomp git repository +FASTCOMP_REMOTE=https://github.com/kripken/emscripten-fastcomp.git + +# Fastcomp clang git repository +CLANG_REMOTE=https://github.com/kripken/emscripten-fastcomp-clang + +# Directory to put everything in +TESTDIR="$PWD/.test" +mkdir -p -- "$TESTDIR" +cd -- "$TESTDIR" + + +# 3. UTILITIES + +# 3.1. checkout REPO REMOTE -- clone a git repository and pull + +checkout () { + REPO=$1 + REMOTE=$2 + if [ -d "$REPO" ]; then + echo "$REPO exists: skipping clone." + else + echo "cloning $REMOTE into $REPO." + git clone --recursive -- "$REMOTE" "$REPO" + fi + ( + cd -- "$REPO" + git pull + ) +} + + +# 4. PROCEDURE + +checkout emscripten "$EMSCRIPTEN_REMOTE" + + +# See [FASTCOMP]. + +checkout emscripten-fastcomp "$FASTCOMP_REMOTE" +( + cd emscripten-fastcomp + ( + cd tools + checkout clang "$CLANG_REMOTE"; + ) + mkdir -p build + ( + cd build + ../configure --enable-optimized --disable-assertions --enable-targets=host,js + make; + ) +) + + +# A. REFERENCES +# +# [EMPSCRIPTEN] "Emscripten SDK" +# +# +# [FASTCOMP] "LLVM Backend, aka fastcomp" +# +# +# +# B. DOCUMENT HISTORY +# +# 2014-04-17 GDR Created based on [EMSCRIPTEN] and [FASTCOMP]. +# +# +# C. COPYRIGHT AND LICENCE +# +# Copyright (c) 2014 Ravenbrook Ltd. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# +# $Id$ diff --git a/mps/tool/testopendylan b/mps/tool/testopendylan index 0c185ff2fd0..4bd1488261c 100755 --- a/mps/tool/testopendylan +++ b/mps/tool/testopendylan @@ -1,11 +1,10 @@ #!/bin/sh # -# Ravenbrook -# -# # TESTOPENDYLAN -- TEST THE MPS WITH OPENDYLAN -# # Gareth Rees, Ravenbrook Limited, 2014-03-20 +# +# $Id$ +# Copyright (c) 2014 Ravenbrook Limited. See end of file for license. # # # 1. INTRODUCTION diff --git a/mps/tool/testrun.bat b/mps/tool/testrun.bat index 022037b6e90..2aaf8aa4956 100755 --- a/mps/tool/testrun.bat +++ b/mps/tool/testrun.bat @@ -8,54 +8,38 @@ @rem job003489). Finally, it prints a summary of passes and failures, and @rem if there were any failures, it exits with a non-zero status code. @rem -@rem Usage:: +@rem Usage: @rem -@rem testrun.bat PLATFORM VARIETY [CASE1 CASE2 ...] +@rem testrun.bat PLATFORM VARIETY ( SUITE | CASE1 CASE2 ... ) @echo off +@rem Find test case database in same directory as this script. +@rem The incantation %%~dpF% expands %%F to a drive letter and path only. +@rem See "help for" for more details. +for %%F in ("%0") do set TEST_CASE_DB=%%~dpF%testcases.txt + set PFM=%1 shift set VARIETY=%1 shift +set TESTSUITE=%1 -set ALL_TEST_CASES=^ - abqtest.exe ^ - airtest.exe ^ - amcss.exe ^ - amcsshe.exe ^ - amcssth.exe ^ - amsss.exe ^ - amssshe.exe ^ - apss.exe ^ - arenacv.exe ^ - awlut.exe ^ - awluthe.exe ^ - awlutth.exe ^ - btcv.exe ^ - exposet0.exe ^ - expt825.exe ^ - fbmtest.exe ^ - finalcv.exe ^ - finaltest.exe ^ - fotest.exe ^ - locbwcss.exe ^ - lockcov.exe ^ - lockut.exe ^ - locusss.exe ^ - locv.exe ^ - messtest.exe ^ - mpmss.exe ^ - mpsicv.exe ^ - mv2test.exe ^ - nailboardtest.exe ^ - poolncv.exe ^ - qs.exe ^ - sacss.exe ^ - segsmss.exe ^ - steptest.exe ^ - walkt0.exe ^ - zmess.exe +@rem Make a temporary output directory for the test logs. +set LOGDIR=%TMP%\mps-%PFM%-%VARIETY%-log +echo MPS test suite +echo Logging test output to %LOGDIR% +echo Test directory: %PFM%\%VARIETY% +if exist %LOGDIR% rmdir /q /s %LOGDIR% +mkdir %LOGDIR% + +@rem Determine which tests to run. +set EXCLUDE= +if "%TESTSUITE%"=="testrun" set EXCLUDE=LNX +if "%TESTSUITE%"=="testci" set EXCLUDE=BNX +if "%TESTSUITE%"=="testall" set EXCLUDE=NX +if "%TESTSUITE%"=="testansi" set EXCLUDE=LNTX +if "%TESTSUITE%"=="testpoll" set EXCLUDE=LNPTX @rem Ensure that test cases don't pop up dialog box on abort() set MPS_TESTLIB_NOABORT=true @@ -63,36 +47,42 @@ set TEST_COUNT=0 set PASS_COUNT=0 set FAIL_COUNT=0 set SEPARATOR=---------------------------------------- -set LOGDIR=%TMP%\mps-%PFM%-%VARIETY%-log -echo Logging test output to %LOGDIR% -if exist %LOGDIR% rmdir /q /s %LOGDIR% -mkdir %LOGDIR% -if "%1"=="" call :run_tests %ALL_TEST_CASES% +if "%EXCLUDE%"=="" goto :args +for /f "tokens=1" %%T IN ('type %TEST_CASE_DB% ^|^ + findstr /b /r [abcdefghijklmnopqrstuvwxyz] ^|^ + findstr /v /r =[%EXCLUDE%]') do call :run_test %%T +goto :done +:args +if "%1"=="" goto :done +call :run_test %1 +shift +goto :args + +:done if "%FAIL_COUNT%"=="0" ( echo Tests: %TEST_COUNT%. All tests pass. - exit 0 + exit /b 0 ) else ( echo Tests: %TEST_COUNT%. Passes: %PASS_COUNT%. Failures: %FAIL_COUNT%. - exit 1 + exit /b 1 ) -:run_tests -if "%1"=="" exit /b +:run_test set /a TEST_COUNT=%TEST_COUNT%+1 +set LOGTEST=%LOGDIR%\%TEST_COUNT%-%1 echo Running %1 -%PFM%\%VARIETY%\%1 > %LOGDIR%\%1 +%PFM%\%VARIETY%\%1 > %LOGTEST% if "%errorlevel%"=="0" ( set /a PASS_COUNT=%PASS_COUNT%+1 ) else ( echo %SEPARATOR%%SEPARATOR% - type %LOGDIR%\%1 + type %LOGTEST% echo %SEPARATOR%%SEPARATOR% set /a FAIL_COUNT=%FAIL_COUNT%+1 ) -shift -goto run_tests +exit /b @rem C. COPYRIGHT AND LICENSE diff --git a/mps/tool/testrun.sh b/mps/tool/testrun.sh index 175443bf820..cdb41ac458b 100755 --- a/mps/tool/testrun.sh +++ b/mps/tool/testrun.sh @@ -12,68 +12,60 @@ # # Usage:: # -# testrun.sh DIR [CASE1 CASE2 ...] - -ALL_TEST_CASES=" - abqtest - airtest - amcss - amcsshe - amcssth - amsss - amssshe - apss - arenacv - awlut - awluthe - awlutth - btcv - exposet0 - expt825 - fbmtest - finalcv - finaltest - fotest - locbwcss - lockcov - lockut - locusss - locv - messtest - mpmss - mpsicv - mv2test - nailboardtest - poolncv - qs - sacss - segsmss - steptest - walkt0 - zmess -" -# bttest -- interactive, so cannot be run unattended -# djbench -- benchmark, not test case -# gcbench -- benchmark, not test case -# teletest -- interactive, so cannot be run unattended -# zcoll -- takes too long to be useful as a regularly run smoke test +# testrun.sh DIR ( SUITE | CASE1 CASE2 [...] ) +# +# You can use this feature to run the same test many times, to get +# lots of random coverage. For example:: +# +# yes amcss | head -100 | xargs tool/testrun.sh code/xc/Debug +# +# This runs the AMC stress test 100 times from the code/xc/Debug +# directory, reporting all failures. # Make a temporary output directory for the test logs. LOGDIR=$(mktemp -d /tmp/mps.log.XXXXXX) -TEST_DIR=$1 echo "MPS test suite" echo "Logging test output to $LOGDIR" -echo "Test directory: $TEST_DIR" -shift -TEST_CASES=${*:-${ALL_TEST_CASES}} -SEPARATOR="----------------------------------------" +# First argument is the directory containing the test cases. +TEST_DIR=$1 +shift +echo "Test directory: $TEST_DIR" + +# Determine which tests to run. +TEST_CASE_DB=$(dirname -- "$0")/testcases.txt +if [ $# -eq 1 ]; then + TEST_SUITE=$1 + echo "Test suite: $TEST_SUITE" + case $TEST_SUITE in + testrun) EXCLUDE="LNW" ;; + testci) EXCLUDE="BNW" ;; + testall) EXCLUDE="NW" ;; + testansi) EXCLUDE="LNTW" ;; + testpoll) EXCLUDE="LNPTW" ;; + *) + echo "Test suite $TEST_SUITE not recognized." + exit 1 ;; + esac + TEST_CASES=$(<"$TEST_CASE_DB" grep -e '^[a-z]' | + grep -v -e "=[$EXCLUDE]" | + cut -d' ' -f1) +else + echo "$# test cases from the command line" + TEST_CASES=$* +fi + +SEPARATOR=---------------------------------------- TEST_COUNT=0 PASS_COUNT=0 FAIL_COUNT=0 for TESTCASE in $TEST_CASES; do - TEST="$(basename -- "$TESTCASE")" - LOGTEST="$LOGDIR/$TEST" + TEST=$(basename -- "$TESTCASE") + LOGTEST=$LOGDIR/$TEST_COUNT-$TEST + TELEMETRY=$LOGDIR/$TEST_COUNT-$TEST-io + MPS_TELEMETRY_FILENAME=$TELEMETRY.log + export MPS_TELEMETRY_FILENAME + echo "Running $TEST" TEST_COUNT=$(expr $TEST_COUNT + 1) if "$TEST_DIR/$TESTCASE" > "$LOGTEST" 2>&1; then @@ -86,6 +78,18 @@ for TESTCASE in $TEST_CASES; do echo ${SEPARATOR}${SEPARATOR} FAIL_COUNT=$(expr $FAIL_COUNT + 1) fi + + if [ -f "$MPS_TELEMETRY_FILENAME" ]; then + "$TEST_DIR/mpseventcnv" -f "$MPS_TELEMETRY_FILENAME" > "$TELEMETRY.cnv" + gzip "$MPS_TELEMETRY_FILENAME" + "$TEST_DIR/mpseventtxt" < "$TELEMETRY.cnv" > "$TELEMETRY.txt" + if [ -x "$TEST_DIR/mpseventsql" ]; then + MPS_TELEMETRY_DATABASE=$TELEMETRY.db + export MPS_TELEMETRY_DATABASE + "$TEST_DIR/mpseventsql" < "$TELEMETRY.cnv" >> "$LOGTEST" 2>&1 + fi + rm -f "$TELEMETRY.cnv" "$TELEMETRY.txt" "$TELEMETRY.db" + fi done if [ $FAIL_COUNT = 0 ]; then echo "Tests: $TEST_COUNT. All tests pass."