diff --git a/src/c/alloc_2.d b/src/c/alloc_2.d index ba32eb1ab..58e0c35c7 100755 --- a/src/c/alloc_2.d +++ b/src/c/alloc_2.d @@ -439,6 +439,7 @@ cl_object_mark_proc(void *addr, struct GC_ms_entry *msp, struct GC_ms_entry *msl # endif # ifdef ECL_THREADS case t_process: + MAYBE_MARK(o->process.queue_record); MAYBE_MARK(o->process.waiting_for); MAYBE_MARK(o->process.exit_values); MAYBE_MARK(o->process.exit_lock); @@ -979,7 +980,8 @@ init_alloc(void) to_bitmap(&o, &(o.process.parent)) | to_bitmap(&o, &(o.process.exit_lock)) | to_bitmap(&o, &(o.process.exit_values)) | - to_bitmap(&o, &(o.process.waiting_for)); + to_bitmap(&o, &(o.process.waiting_for)) | + to_bitmap(&o, &(o.process.queue_record)); type_info[t_lock].descriptor = to_bitmap(&o, &(o.lock.name)) | to_bitmap(&o, &(o.lock.owner)) | diff --git a/src/c/threads/condition_variable.d b/src/c/threads/condition_variable.d index 5245dbb6e..4c66cdbc6 100644 --- a/src/c/threads/condition_variable.d +++ b/src/c/threads/condition_variable.d @@ -88,7 +88,8 @@ mp_condition_variable_signal(cl_object cv) { if (cv->condition_variable.waiter != Cnil) { cv->condition_variable.waiter = Cnil; - ecl_wakeup_waiters(cv, ECL_WAKEUP_ONE | ECL_WAKEUP_RESET_FLAG); + ecl_wakeup_waiters(ecl_process_env(), cv, + ECL_WAKEUP_ONE | ECL_WAKEUP_RESET_FLAG); } @(return Ct) } @@ -98,7 +99,8 @@ mp_condition_variable_broadcast(cl_object cv) { if (cv->condition_variable.waiter != Cnil) { cv->condition_variable.waiter = Cnil; - ecl_wakeup_waiters(cv, ECL_WAKEUP_ALL | ECL_WAKEUP_RESET_FLAG); + ecl_wakeup_waiters(ecl_process_env(), cv, + ECL_WAKEUP_ALL | ECL_WAKEUP_RESET_FLAG); } @(return Ct) } diff --git a/src/c/threads/mutex.d b/src/c/threads/mutex.d index 75143dc20..642488a55 100644 --- a/src/c/threads/mutex.d +++ b/src/c/threads/mutex.d @@ -111,7 +111,7 @@ mp_giveup_lock(cl_object lock) } if (--lock->lock.counter == 0) { lock->lock.owner = Cnil; - ecl_wakeup_waiters(lock, ECL_WAKEUP_ONE); + ecl_wakeup_waiters(env, lock, ECL_WAKEUP_ONE); } ecl_return1(env, Ct); } diff --git a/src/c/threads/process.d b/src/c/threads/process.d index d86284961..93d9ab9a0 100644 --- a/src/c/threads/process.d +++ b/src/c/threads/process.d @@ -300,6 +300,8 @@ alloc_process(cl_object name, cl_object initial_bindings) } process->process.initial_bindings = array; process->process.exit_lock = mp_make_lock(0); + process->process.waiting_for = Cnil; + process->process.queue_record = ecl_list1(process); return process; } @@ -730,6 +732,8 @@ init_threads(cl_env_ptr env) process->process.args = Cnil; process->process.thread = main_thread; process->process.env = env; + process->process.waiting_for = Cnil; + process->process.queue_record = ecl_list1(process); env->own_process = process; diff --git a/src/c/threads/queue.d b/src/c/threads/queue.d index 2122ff0ca..824e00e17 100644 --- a/src/c/threads/queue.d +++ b/src/c/threads/queue.d @@ -54,26 +54,23 @@ ecl_make_atomic_queue() } void -ecl_atomic_queue_nconc(cl_object lock_list_pair, cl_object item) +ecl_atomic_queue_nconc(cl_env_ptr the_env, cl_object lock_list_pair, cl_object new_tail) { - cl_object new_head = ecl_list1(item); - cl_env_ptr the_env = ecl_process_env(); ecl_disable_interrupts_env(the_env); { cl_object *lock = &ECL_CONS_CAR(lock_list_pair); cl_object *queue = &ECL_CONS_CDR(lock_list_pair); ecl_get_spinlock(the_env, lock); - ecl_nconc(lock_list_pair, new_head); + ecl_nconc(lock_list_pair, new_tail); ecl_giveup_spinlock(lock); } ecl_enable_interrupts_env(the_env); } cl_object -ecl_atomic_queue_pop(cl_object lock_list_pair) +ecl_atomic_queue_pop(cl_env_ptr the_env, cl_object lock_list_pair) { cl_object output; - cl_env_ptr the_env = ecl_process_env(); ecl_disable_interrupts_env(the_env); { cl_object *lock = &ECL_CONS_CAR(lock_list_pair); @@ -91,10 +88,9 @@ ecl_atomic_queue_pop(cl_object lock_list_pair) } cl_object -ecl_atomic_queue_pop_all(cl_object lock_list_pair) +ecl_atomic_queue_pop_all(cl_env_ptr the_env, cl_object lock_list_pair) { cl_object output; - cl_env_ptr the_env = ecl_process_env(); ecl_disable_interrupts_env(the_env); { cl_object *lock = &ECL_CONS_CAR(lock_list_pair); @@ -109,9 +105,8 @@ ecl_atomic_queue_pop_all(cl_object lock_list_pair) } void -ecl_atomic_queue_delete(cl_object lock_list_pair, cl_object item) +ecl_atomic_queue_delete(cl_env_ptr the_env, cl_object lock_list_pair, cl_object item) { - cl_env_ptr the_env = ecl_process_env(); ecl_disable_interrupts_env(the_env); { cl_object *lock = &ECL_CONS_CAR(lock_list_pair); @@ -170,24 +165,41 @@ waiting_time(cl_index iteration, struct ecl_timeval *start) void ecl_wait_on(cl_object (*condition)(cl_env_ptr, cl_object), cl_object o) { - cl_env_ptr the_env = ecl_process_env(); - cl_object own_process = the_env->own_process; - sigset_t original, empty; + const cl_env_ptr the_env = ecl_process_env(); + volatile cl_object own_process = the_env->own_process; + volatile cl_object record; + volatile sigset_t original; + + /* 0) We reserve a record for the queue. In order to a void + * using the garbage collector, we reuse records */ + record = own_process->process.queue_record; + unlikely_if (record == Cnil) { + record = ecl_list1(own_process); + } else { + own_process->process.queue_record = Cnil; + } /* 1) First we block all signals. */ - sigemptyset(&empty); - pthread_sigmask(SIG_SETMASK, &original, &empty); + { + sigset_t empty; + sigemptyset(&empty); + pthread_sigmask(SIG_SETMASK, &original, &empty); + } + + /* 2) Now we add ourselves to the queue. In order to avoid a + * call to the GC, we try to reuse records. */ + ecl_atomic_queue_nconc(the_env, o->lock.waiter, record); + own_process->process.waiting_for = o; CL_UNWIND_PROTECT_BEGIN(the_env) { - /* 2) Now we add ourselves to the queue. */ - ecl_atomic_queue_nconc(o->lock.waiter, own_process); - own_process->process.waiting_for = o; - /* 3) At this point we may receive signals, but we - * might have missed the wakeup one that happened - * before 1), which is why we start with the check*/ - if (cl_second(o->lock.waiter) != own_process || - condition(the_env, o) == Cnil) { + * might have missed a wakeup event if that happened + * between 0) and 2), which is why we start with the + * check*/ + cl_object queue = ECL_CONS_CDR(o->lock.waiter); + if (ECL_CONS_CAR(queue) != own_process || + condition(the_env, o) == Cnil) + { do { /* This will wait until we get a signal that * demands some code being executed. Note that @@ -203,7 +215,9 @@ ecl_wait_on(cl_object (*condition)(cl_env_ptr, cl_object), cl_object o) /* 4) At this point we wrap up. We remove ourselves from the queue and restore signals, which were */ own_process->process.waiting_for = Cnil; - ecl_atomic_queue_delete(o->lock.waiter, own_process); + ecl_atomic_queue_delete(the_env, o->lock.waiter, own_process); + own_process->process.queue_record = record; + ECL_RPLACD(record, Cnil); pthread_sigmask(SIG_SETMASK, NULL, &original); } CL_UNWIND_PROTECT_END; } @@ -218,9 +232,9 @@ wakeup_this(cl_object p, int flags) } static void -wakeup_all(cl_object waiter, int flags) +wakeup_all(cl_env_ptr the_env, cl_object waiter, int flags) { - cl_object queue = ecl_atomic_queue_pop_all(waiter); + cl_object queue = ecl_atomic_queue_pop_all(the_env, waiter); queue = cl_nreverse(queue); while (!Null(queue)) { cl_object process = ECL_CONS_CAR(queue); @@ -231,12 +245,13 @@ wakeup_all(cl_object waiter, int flags) } static void -wakeup_one(cl_object waiter, int flags) +wakeup_one(cl_env_ptr the_env, cl_object waiter, int flags) { do { - cl_object next = ecl_atomic_queue_pop(waiter); + cl_object next = ECL_CONS_CDR(waiter); if (Null(next)) return; + next = ECL_CONS_CAR(next); if (next->process.active) { wakeup_this(next, flags); return; @@ -245,15 +260,15 @@ wakeup_one(cl_object waiter, int flags) } void -ecl_wakeup_waiters(cl_object o, int flags) +ecl_wakeup_waiters(cl_env_ptr the_env, cl_object o, int flags) { cl_object waiter = o->lock.waiter; print_lock("releasing\t", o); if (ECL_CONS_CDR(waiter) != Cnil) { if (flags & ECL_WAKEUP_ALL) { - wakeup_all(waiter, flags); + wakeup_all(the_env, waiter, flags); } else { - wakeup_one(waiter, flags); + wakeup_one(the_env, waiter, flags); } } sched_yield(); diff --git a/src/c/unixint.d b/src/c/unixint.d index 0310a5e4e..e443b3388 100644 --- a/src/c/unixint.d +++ b/src/c/unixint.d @@ -607,6 +607,7 @@ handler_fn_protype(sigsegv_handler, int sig, siginfo_t *info, void *aux) return; } # endif + si_dump_c_backtrace(MAKE_FIXNUM(30)); # if 0 && defined(SA_ONSTACK) /* The handler is executed in an externally allocated stack, and * thus it is not safe to execute lisp code here. We just bounce diff --git a/src/h/internal.h b/src/h/internal.h index ac1fe216a..b8d0bbce0 100644 --- a/src/h/internal.h +++ b/src/h/internal.h @@ -469,13 +469,13 @@ extern void ecl_giveup_spinlock(cl_object *lock); extern cl_object ecl_make_atomic_queue(); #define ecl_atomic_queue_list(queue) ECL_CONS_CDR(queue) -extern void ecl_atomic_queue_nconc(cl_object queue, cl_object item); -extern cl_object ecl_atomic_queue_pop(cl_object queue); -extern cl_object ecl_atomic_queue_pop_all(cl_object queue); -extern void ecl_atomic_queue_delete(cl_object queue, cl_object item); +extern void ecl_atomic_queue_nconc(cl_env_ptr the_env, cl_object queue, cl_object item); +extern cl_object ecl_atomic_queue_pop(cl_env_ptr the_env, cl_object queue); +extern cl_object ecl_atomic_queue_pop_all(cl_env_ptr the_env, cl_object queue); +extern void ecl_atomic_queue_delete(cl_env_ptr the_env, cl_object queue, cl_object item); extern void ecl_wait_on(cl_object (*condition)(cl_env_ptr, cl_object), cl_object o); -extern void ecl_wakeup_waiters(cl_object o, bool all); +extern void ecl_wakeup_waiters(cl_env_ptr the_env, cl_object o, bool all); #endif /* threads/rwlock.d */ diff --git a/src/h/object.h b/src/h/object.h index 59a60ec25..2edacd435 100644 --- a/src/h/object.h +++ b/src/h/object.h @@ -896,6 +896,7 @@ struct ecl_process { int trap_fpe_bits; cl_object exit_values; cl_object waiting_for; + cl_object queue_record; }; #define ECL_WAKEUP_ONE 0