stacks: move runtime stack to a separate structure

This commit is contained in:
Daniel Kochmański 2024-04-03 14:00:38 +02:00
parent 45f3be3aa1
commit 51157bbf9d
7 changed files with 70 additions and 69 deletions

View file

@ -1152,9 +1152,9 @@ update_bytes_consed () {
static void
ecl_mark_env(struct cl_env_struct *env)
{
if (env->stack) {
GC_push_conditional((void *)env->stack, (void *)env->stack_top, 1);
GC_set_mark_bit((void *)env->stack);
if (env->run_stack.org) {
GC_push_conditional((void *)env->run_stack.org, (void *)env->run_stack.top, 1);
GC_set_mark_bit((void *)env->run_stack.org);
}
if (env->frs_stack.top) {
GC_push_conditional((void *)env->frs_stack.org, (void *)(env->frs_stack.top+1), 1);

View file

@ -55,7 +55,7 @@ typedef struct cl_compiler_env *cl_compiler_ptr;
#define asm_begin(env) current_pc(env)
#define current_pc(env) ECL_STACK_INDEX(env)
#define set_pc(env,n) asm_clear(env,n)
#define asm_ref(env,n) (cl_fixnum)((env)->stack[n])
#define asm_ref(env,n) (cl_fixnum)((env)->run_stack.org[n])
static void asm_clear(cl_env_ptr env, cl_index h);
static void asm_op(cl_env_ptr env, cl_fixnum op);
static void asm_op2(cl_env_ptr env, int op, int arg);
@ -192,7 +192,7 @@ asm_end(cl_env_ptr env, cl_index beginning, cl_object definition) {
output->bytecodes.flex = ECL_NIL;
output->bytecodes.nlcl = ecl_make_fixnum(c_env->env_width);
for (i = 0, code = (cl_opcode *)output->bytecodes.code; i < code_size; i++) {
code[i] = (cl_opcode)(cl_fixnum)(env->stack[beginning+i]);
code[i] = (cl_opcode)(cl_fixnum)(env->run_stack.org[beginning+i]);
}
output->bytecodes.entry = _ecl_bytecodes_dispatch_vararg;
ecl_set_function_source_file_info(output, (file == OBJNULL)? ECL_NIL : file,
@ -257,7 +257,7 @@ asm_complete(cl_env_ptr env, int op, cl_index pc) {
else if (ecl_unlikely(delta < -MAX_OPARG || delta > MAX_OPARG))
FEprogram_error("Too large jump", 0);
else {
env->stack[pc] = (cl_object)(cl_fixnum)delta;
env->run_stack.org[pc] = (cl_object)(cl_fixnum)delta;
}
}

View file

@ -954,7 +954,7 @@ sharp_asterisk_reader(cl_object in, cl_object c, cl_object d)
last = ECL_STACK_REF(env,-1);
x = ecl_alloc_simple_vector(dim, ecl_aet_bit);
for (i = 0; i < dim; i++) {
elt = (i < dimcount) ? env->stack[sp+i] : last;
elt = (i < dimcount) ? env->run_stack.org[sp+i] : last;
if (elt == ecl_make_fixnum(0))
x->vector.self.bit[i/CHAR_BIT] &= ~(0200 >> i%CHAR_BIT);
else

View file

@ -138,7 +138,7 @@ ecl_cs_set_org(cl_env_ptr env)
cl_object *
ecl_stack_set_size(cl_env_ptr env, cl_index tentative_new_size)
{
cl_index top = env->stack_top - env->stack;
cl_index top = env->run_stack.top - env->run_stack.org;
cl_object *new_stack, *old_stack;
cl_index safety_area = ecl_option_values[ECL_OPT_LISP_STACK_SAFETY_AREA];
cl_index new_size = tentative_new_size + 2*safety_area;
@ -150,27 +150,27 @@ ecl_stack_set_size(cl_env_ptr env, cl_index tentative_new_size)
FEerror("Internal error: cannot shrink stack below stack top.",0);
}
old_stack = env->stack;
old_stack = env->run_stack.org;
new_stack = (cl_object *)ecl_alloc_atomic(new_size * sizeof(cl_object));
ECL_STACK_RESIZE_DISABLE_INTERRUPTS(env);
memcpy(new_stack, old_stack, env->stack_size * sizeof(cl_object));
env->stack_size = new_size;
env->stack_limit_size = new_size - 2*safety_area;
env->stack = new_stack;
env->stack_top = env->stack + top;
env->stack_limit = env->stack + (new_size - 2*safety_area);
memcpy(new_stack, old_stack, env->run_stack.size * sizeof(cl_object));
env->run_stack.size = new_size;
env->run_stack.limit_size = new_size - 2*safety_area;
env->run_stack.org = new_stack;
env->run_stack.top = env->run_stack.org + top;
env->run_stack.limit = env->run_stack.org + (new_size - 2*safety_area);
/* A stack always has at least one element. This is assumed by cl__va_start
* and friends, which take a sp=0 to have no arguments.
*/
if (top == 0) {
*(env->stack_top++) = ecl_make_fixnum(0);
*(env->run_stack.top++) = ecl_make_fixnum(0);
}
ECL_STACK_RESIZE_ENABLE_INTERRUPTS(env);
ecl_dealloc(old_stack);
return env->stack_top;
return env->run_stack.top;
}
void
@ -188,41 +188,41 @@ FEstack_advance(void)
cl_object *
ecl_stack_grow(cl_env_ptr env)
{
return ecl_stack_set_size(env, env->stack_size + env->stack_size / 2);
return ecl_stack_set_size(env, env->run_stack.size + env->run_stack.size / 2);
}
cl_index
ecl_stack_push_values(cl_env_ptr env) {
cl_index i = env->nvalues;
cl_object *b = env->stack_top;
cl_object *b = env->run_stack.top;
cl_object *p = b + i;
if (p >= env->stack_limit) {
if (p >= env->run_stack.limit) {
b = ecl_stack_grow(env);
p = b + i;
}
env->stack_top = p;
env->run_stack.top = p;
memcpy(b, env->values, i * sizeof(cl_object));
return i;
}
void
ecl_stack_pop_values(cl_env_ptr env, cl_index n) {
cl_object *p = env->stack_top - n;
if (ecl_unlikely(p < env->stack))
cl_object *p = env->run_stack.top - n;
if (ecl_unlikely(p < env->run_stack.org))
FEstack_underflow();
env->nvalues = n;
env->stack_top = p;
env->run_stack.top = p;
memcpy(env->values, p, n * sizeof(cl_object));
}
cl_object
ecl_stack_frame_open(cl_env_ptr env, cl_object f, cl_index size)
{
cl_object *base = env->stack_top;
cl_object *base = env->run_stack.top;
cl_index bindex;
if (size) {
if ((env->stack_limit - base) < size) {
base = ecl_stack_set_size(env, env->stack_size + size);
if ((env->run_stack.limit - base) < size) {
base = ecl_stack_set_size(env, env->run_stack.size + size);
}
}
bindex = ECL_STACK_INDEX(env);
@ -232,7 +232,7 @@ ecl_stack_frame_open(cl_env_ptr env, cl_object f, cl_index size)
f->frame.size = size;
f->frame.sp = bindex;
f->frame.env = env;
env->stack_top = (base + size);
env->run_stack.top = (base + size);
return f;
}
@ -240,11 +240,11 @@ void
ecl_stack_frame_push(cl_object f, cl_object o)
{
cl_env_ptr env = f->frame.env;
cl_object *top = env->stack_top;
if (top >= env->stack_limit) {
cl_object *top = env->run_stack.top;
if (top >= env->run_stack.limit) {
top = ecl_stack_grow(env);
}
env->stack_top = ++top;
env->run_stack.top = ++top;
*(top-1) = o;
f->frame.size++;
}
@ -835,7 +835,7 @@ si_get_limit(cl_object type)
else if (type == @'ext::c-stack')
output = env->c_stack.limit_size;
else if (type == @'ext::lisp-stack')
output = env->stack_limit_size;
output = env->run_stack.limit_size;
else if (type == @'ext::heap-size') {
/* size_t can be larger than cl_index */
ecl_return1(env, ecl_make_unsigned_integer(cl_core.max_heap_size));
@ -885,9 +885,9 @@ init_stacks(cl_env_ptr env)
ihs_org.lex_env = ECL_NIL;
ihs_org.index = 0;
/* lisp stack */
env->stack = NULL;
env->stack_top = NULL;
env->stack_limit = NULL;
env->stack_size = 0;
env->run_stack.org = NULL;
env->run_stack.top = NULL;
env->run_stack.limit = NULL;
env->run_stack.size = 0;
ecl_stack_set_size(env, ecl_option_values[ECL_OPT_LISP_STACK_SIZE]);
}

View file

@ -389,10 +389,10 @@ handle_all_queued_interrupt_safe(cl_env_ptr env)
cl_object big_register[ECL_BIGNUM_REGISTER_NUMBER];
memcpy(big_register, env->big_register, ECL_BIGNUM_REGISTER_NUMBER*sizeof(cl_object));
ecl_init_bignum_registers(env);
/* We might have been interrupted while we push/pop in the
* stack. Increasing env->stack_top ensures that we don't
* overwrite the topmost stack value. */
env->stack_top++;
/* We might have been interrupted while we push/pop in the stack. Increasing
* env->run_stack.top ensures that we don't overwrite the topmost stack
* value. */
env->run_stack.top++;
/* We also need to save and restore the (top+1)'th frame and
* binding stack value to prevent overwriting it.
* INV: Due to the stack safety areas we don't need to check
@ -406,7 +406,7 @@ handle_all_queued_interrupt_safe(cl_env_ptr env)
/* ... and restore everything again */
memcpy(env->bds_stack.top+1, &top_binding, sizeof(struct ecl_bds_frame));
memcpy(env->frs_stack.top+1, &top_frame, sizeof(struct ecl_frame));
env->stack_top--;
env->run_stack.top--;
ecl_clear_bignum_registers(env);
memcpy(env->big_register, big_register, ECL_BIGNUM_REGISTER_NUMBER*sizeof(cl_object));
env->packages_to_be_created_p = packages_to_be_created_p;

View file

@ -10,6 +10,17 @@ extern "C" {
#define _ECL_ARGS(x) x
/* The runtime stack, which is used mainly for keeping the arguments of a
* function before it is invoked, and also by the compiler and by the reader
* when they are building some data structure. */
struct ecl_runtime_stack {
cl_index size;
cl_index limit_size;
cl_object *org;
cl_object *top;
cl_object *limit;
};
/* The BinDing Stack stores the bindings of special variables. */
struct ecl_binding_stack {
#ifdef ECL_THREADS
@ -79,21 +90,11 @@ struct cl_env_struct {
/* The four stacks in ECL. */
/*
* The lisp stack, which is used mainly for keeping the arguments of a
* function before it is invoked, and also by the compiler and by the
* reader when they are building some data structure.
*/
cl_index stack_size;
cl_index stack_limit_size;
cl_object *stack;
cl_object *stack_top;
cl_object *stack_limit;
struct ecl_runtime_stack run_stack;
struct ecl_binding_stack bds_stack;
struct ecl_frames_stack frs_stack;
struct ecl_history_stack ihs_stack;
struct ecl_c_stack c_stack;
struct ecl_c_stack c_stack; /* shadow stack */
/* Private variables used by different parts of ECL: */
/* ... the reader and printer ... */

View file

@ -364,45 +364,45 @@ extern ECL_API ecl_frame_ptr _ecl_frs_push(cl_env_ptr);
* LISP STACK
*************/
#define ECL_STACK_INDEX(env) ((env)->stack_top - (env)->stack)
#define ECL_STACK_INDEX(env) ((env)->run_stack.top - (env)->run_stack.org)
#define ECL_STACK_PUSH(the_env,o) do { \
const cl_env_ptr __env = (the_env); \
cl_object *__new_top = __env->stack_top; \
if (ecl_unlikely(__new_top >= __env->stack_limit)) { \
cl_object *__new_top = __env->run_stack.top; \
if (ecl_unlikely(__new_top >= __env->run_stack.limit)) { \
__new_top = ecl_stack_grow(__env); \
} \
__env->stack_top = __new_top+1; \
__env->run_stack.top = __new_top+1; \
*__new_top = (o); } while (0)
#define ECL_STACK_POP_UNSAFE(env) *(--((env)->stack_top))
#define ECL_STACK_POP_UNSAFE(env) *(--((env)->run_stack.top))
#define ECL_STACK_REF(env,n) ((env)->stack_top[n])
#define ECL_STACK_REF(env,n) ((env)->run_stack.top[n])
#define ECL_STACK_SET_INDEX(the_env,ndx) do { \
const cl_env_ptr __env = (the_env); \
cl_object *__new_top = __env->stack + (ndx); \
if (ecl_unlikely(__new_top > __env->stack_top)) \
cl_object *__new_top = __env->run_stack.org + (ndx); \
if (ecl_unlikely(__new_top > __env->run_stack.top)) \
FEstack_advance(); \
__env->stack_top = __new_top; } while (0)
__env->run_stack.top = __new_top; } while (0)
#define ECL_STACK_POP_N(the_env,n) do { \
const cl_env_ptr __env = (the_env); \
cl_object *__new_top = __env->stack_top - (n); \
if (ecl_unlikely(__new_top < __env->stack)) \
cl_object *__new_top = __env->run_stack.top - (n); \
if (ecl_unlikely(__new_top < __env->run_stack.org)) \
FEstack_underflow(); \
__env->stack_top = __new_top; } while (0)
__env->run_stack.top = __new_top; } while (0)
#define ECL_STACK_POP_N_UNSAFE(the_env,n) ((the_env)->stack_top -= (n))
#define ECL_STACK_POP_N_UNSAFE(the_env,n) ((the_env)->run_stack.top -= (n))
#define ECL_STACK_PUSH_N(the_env,n) do { \
const cl_env_ptr __env = (the_env) ; \
cl_index __aux = (n); \
cl_object *__new_top = __env->stack_top; \
while (ecl_unlikely((__env->stack_limit - __new_top) <= __aux)) { \
cl_object *__new_top = __env->run_stack.top; \
while (ecl_unlikely((__env->run_stack.limit - __new_top) <= __aux)) { \
__new_top = ecl_stack_grow(__env); \
} \
__env->stack_top = __new_top + __aux; } while (0)
__env->run_stack.top = __new_top + __aux; } while (0)
#define ECL_STACK_FRAME_REF(f,ndx) ((f)->frame.env->stack[(f)->frame.base+(ndx)])
#define ECL_STACK_FRAME_SET(f,ndx,o) do { ECL_STACK_FRAME_REF(f,ndx) = (o); } while(0)