From f0a8547a9003d16a74cd0e0f957ea43bff06ecaf Mon Sep 17 00:00:00 2001 From: normal Date: Tue, 27 Mar 2018 09:28:37 +0000 Subject: [PATCH 1/4] thread_sync.c: avoid reaching across stacks of dead threads rb_ensure is insufficient cleanup for fork and we must reinitialize all waitqueues in the child process. Unfortunately this increases the footprint of ConditionVariable, Queue and SizedQueue by 8 bytes on 32-bit (16 bytes on 64-bit). [ruby-core:86316] [Bug #14634] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62934 b2dd03c8-39d4-4d8f-98ff-823fe69b080e --- test/thread/test_cv.rb | 19 ++++++++++ test/thread/test_queue.rb | 48 +++++++++++++++++++++++++ thread.c | 2 ++ thread_sync.c | 75 ++++++++++++++++++++++++++++++++++++--- 4 files changed, 139 insertions(+), 5 deletions(-) diff --git a/test/thread/test_cv.rb b/test/thread/test_cv.rb index 1e15d2e9ec31db..306cadd63b2957 100644 --- a/test/thread/test_cv.rb +++ b/test/thread/test_cv.rb @@ -217,4 +217,23 @@ def test_dump Marshal.dump(condvar) end end + + def test_condvar_fork + mutex = Mutex.new + condvar = ConditionVariable.new + thrs = (1..10).map do + Thread.new { mutex.synchronize { condvar.wait(mutex) } } + end + thrs.each { 3.times { Thread.pass } } + pid = fork do + mutex.synchronize { condvar.broadcast } + exit!(0) + end + _, s = Process.waitpid2(pid) + assert_predicate s, :success?, 'no segfault [ruby-core:86316] [Bug #14634]' + until thrs.empty? + mutex.synchronize { condvar.broadcast } + thrs.delete_if { |t| t.join(0.01) } + end + end if Process.respond_to?(:fork) end diff --git a/test/thread/test_queue.rb b/test/thread/test_queue.rb index 4e6c9fa4c9f7c1..d69ecf92b2c140 100644 --- a/test/thread/test_queue.rb +++ b/test/thread/test_queue.rb @@ -565,4 +565,52 @@ def test_queue_with_trap puts 'exit' INPUT end + + def test_fork_while_queue_waiting + q = Queue.new + sq = SizedQueue.new(1) + thq = Thread.new { q.pop } + thsq = Thread.new { sq.pop } + Thread.pass until thq.stop? && thsq.stop? + + pid = fork do + exit!(1) if q.num_waiting != 0 + exit!(2) if sq.num_waiting != 0 + exit!(6) unless q.empty? + exit!(7) unless sq.empty? + q.push :child_q + sq.push :child_sq + exit!(3) if q.pop != :child_q + exit!(4) if sq.pop != :child_sq + exit!(0) + end + _, s = Process.waitpid2(pid) + assert_predicate s, :success?, 'no segfault [ruby-core:86316] [Bug #14634]' + + q.push :thq + sq.push :thsq + assert_equal :thq, thq.value + assert_equal :thsq, thsq.value + + sq.push(1) + th = Thread.new { q.pop; sq.pop } + thsq = Thread.new { sq.push(2) } + Thread.pass until th.stop? && thsq.stop? + pid = fork do + exit!(1) if q.num_waiting != 0 + exit!(2) if sq.num_waiting != 0 + exit!(3) unless q.empty? + exit!(4) if sq.empty? + exit!(5) if sq.pop != 1 + exit!(0) + end + _, s = Process.waitpid2(pid) + assert_predicate s, :success?, 'no segfault [ruby-core:86316] [Bug #14634]' + + assert_predicate thsq, :stop? + assert_equal 1, sq.pop + assert_same sq, thsq.value + q.push('restart th') + assert_equal 2, th.value + end if Process.respond_to?(:fork) end diff --git a/thread.c b/thread.c index d7e0d91d7f6b96..5d6599f8c3bdbd 100644 --- a/thread.c +++ b/thread.c @@ -4188,6 +4188,8 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r } rb_vm_living_threads_init(vm); rb_vm_living_threads_insert(vm, th); + rb_thread_sync_reset_all(); + vm->sleeper = 0; clear_coverage(); } diff --git a/thread_sync.c b/thread_sync.c index ef0bbf3af396a1..6712c5690e3115 100644 --- a/thread_sync.c +++ b/thread_sync.c @@ -4,6 +4,14 @@ static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable; static VALUE rb_eClosedQueueError; +/* + * keep these globally so we can walk and reinitialize them at fork + * in the child process + */ +static LIST_HEAD(szqueue_list); +static LIST_HEAD(queue_list); +static LIST_HEAD(condvar_list); + /* sync_waiter is always on-stack */ struct sync_waiter { rb_thread_t *th; @@ -54,6 +62,7 @@ typedef struct rb_mutex_struct { static void rb_mutex_abandon_all(rb_mutex_t *mutexes); static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th); static void rb_mutex_abandon_locking_mutex(rb_thread_t *th); +static void rb_thread_sync_reset_all(void); #endif static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th); @@ -535,7 +544,9 @@ void rb_mutex_allow_trap(VALUE self, int val) /* Queue */ #define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq) +#define queue_live(q) UNALIGNED_MEMBER_PTR(q, live) PACKED_STRUCT_UNALIGNED(struct rb_queue { + struct list_node live; struct list_head waitq; const VALUE que; int num_waiting; @@ -543,6 +554,7 @@ PACKED_STRUCT_UNALIGNED(struct rb_queue { #define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq) #define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq) +#define szqueue_live(sq) UNALIGNED_MEMBER_PTR(sq, q.live) PACKED_STRUCT_UNALIGNED(struct rb_szqueue { struct rb_queue q; int num_waiting_push; @@ -559,6 +571,14 @@ queue_mark(void *ptr) rb_gc_mark(q->que); } +static void +queue_free(void *ptr) +{ + struct rb_queue *q = ptr; + list_del(queue_live(q)); + ruby_xfree(ptr); +} + static size_t queue_memsize(const void *ptr) { @@ -567,7 +587,7 @@ queue_memsize(const void *ptr) static const rb_data_type_t queue_data_type = { "queue", - {queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,}, + {queue_mark, queue_free, queue_memsize,}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED }; @@ -579,6 +599,7 @@ queue_alloc(VALUE klass) obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q); list_head_init(queue_waitq(q)); + list_add(&queue_list, queue_live(q)); return obj; } @@ -601,6 +622,14 @@ szqueue_mark(void *ptr) queue_mark(&sq->q); } +static void +szqueue_free(void *ptr) +{ + struct rb_szqueue *sq = ptr; + list_del(szqueue_live(sq)); + ruby_xfree(ptr); +} + static size_t szqueue_memsize(const void *ptr) { @@ -609,7 +638,7 @@ szqueue_memsize(const void *ptr) static const rb_data_type_t szqueue_data_type = { "sized_queue", - {szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,}, + {szqueue_mark, szqueue_free, szqueue_memsize,}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED }; @@ -621,6 +650,7 @@ szqueue_alloc(VALUE klass) &szqueue_data_type, sq); list_head_init(szqueue_waitq(sq)); list_head_init(szqueue_pushq(sq)); + list_add(&szqueue_list, szqueue_live(sq)); return obj; } @@ -866,7 +896,7 @@ queue_do_pop(VALUE self, struct rb_queue *q, int should_block) list_add_tail(&qw.as.q->waitq, &qw.w.node); qw.as.q->num_waiting++; - rb_ensure(queue_sleep, Qfalse, queue_sleep_done, (VALUE)&qw); + rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)&qw); } } @@ -1108,7 +1138,7 @@ rb_szqueue_push(int argc, VALUE *argv, VALUE self) list_add_tail(pushq, &qw.w.node); sq->num_waiting_push++; - rb_ensure(queue_sleep, Qfalse, szqueue_sleep_done, (VALUE)&qw); + rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)&qw); } } @@ -1212,6 +1242,7 @@ rb_szqueue_empty_p(VALUE self) /* TODO: maybe this can be IMEMO */ struct rb_condvar { struct list_head waitq; + struct list_node live; }; /* @@ -1242,6 +1273,14 @@ struct rb_condvar { * } */ +static void +condvar_free(void *ptr) +{ + struct rb_condvar *cv = ptr; + list_del(&cv->live); + ruby_xfree(ptr); +} + static size_t condvar_memsize(const void *ptr) { @@ -1250,7 +1289,7 @@ condvar_memsize(const void *ptr) static const rb_data_type_t cv_data_type = { "condvar", - {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,}, + {0, condvar_free, condvar_memsize,}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED }; @@ -1272,6 +1311,7 @@ condvar_alloc(VALUE klass) obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv); list_head_init(&cv->waitq); + list_add(&condvar_list, &cv->live); return obj; } @@ -1385,6 +1425,31 @@ define_thread_class(VALUE outer, const char *name, VALUE super) return klass; } +#if defined(HAVE_WORKING_FORK) +/* we must not reference stacks of dead threads in a forked child */ +static void +rb_thread_sync_reset_all(void) +{ + struct rb_queue *q = 0; + struct rb_szqueue *sq = 0; + struct rb_condvar *cv = 0; + + list_for_each(&queue_list, q, live) { + list_head_init(queue_waitq(q)); + q->num_waiting = 0; + } + list_for_each(&szqueue_list, sq, q.live) { + list_head_init(szqueue_waitq(sq)); + list_head_init(szqueue_pushq(sq)); + sq->num_waiting_push = 0; + sq->q.num_waiting = 0; + } + list_for_each(&condvar_list, cv, live) { + list_head_init(&cv->waitq); + } +} +#endif + static void Init_thread_sync(void) { From 945bf91597af2453bdccebdfc46916e1c4a96795 Mon Sep 17 00:00:00 2001 From: normal Date: Fri, 20 Apr 2018 03:22:26 +0000 Subject: [PATCH 2/4] variable.c: fix thread + fork errors in autoload This is fairly non-intrusive bugfix to prevent children from trying to reach into thread stacks of the parent. I will probably reuse this idea and redo r62934, too (same bug). * vm_core.h (typedef struct rb_vm_struct): add fork_gen counter * thread.c (rb_thread_atfork_internal): increment fork_gen * variable.c (struct autoload_data_i): store fork_gen * variable.c (check_autoload_data): remove (replaced with get_...) * variable.c (get_autoload_data): check fork_gen when retrieving * variable.c (check_autoload_required): use get_autoload_data * variable.c (rb_autoloading_value): ditto * variable.c (rb_autoload_p): ditto * variable.c (current_autoload_data): ditto * variable.c (autoload_reset): reset fork_gen, adjust indent * variable.c (rb_autoload_load): set fork_gen when setting state * test/ruby/test_autoload.rb (test_autoload_fork): new test [ruby-core:86410] [Bug #14634] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63210 b2dd03c8-39d4-4d8f-98ff-823fe69b080e --- test/ruby/test_autoload.rb | 26 ++++++++++++++++++++++++++ thread.c | 1 + variable.c | 32 +++++++++++++++++++++++--------- vm_core.h | 1 + 4 files changed, 51 insertions(+), 9 deletions(-) diff --git a/test/ruby/test_autoload.rb b/test/ruby/test_autoload.rb index 0220f3e27d4e0d..3095052a81905e 100644 --- a/test/ruby/test_autoload.rb +++ b/test/ruby/test_autoload.rb @@ -285,6 +285,32 @@ class AutoloadTest end end + def test_autoload_fork + EnvUtil.default_warning do + Tempfile.create(['autoload', '.rb']) {|file| + file.puts 'sleep 0.3; class AutoloadTest; end' + file.close + add_autoload(file.path) + begin + thrs = [] + 3.times do + thrs << Thread.new { AutoloadTest; nil } + thrs << Thread.new { fork { AutoloadTest } } + end + thrs.each(&:join) + thrs.each do |th| + pid = th.value or next + _, status = Process.waitpid2(pid) + assert_predicate status, :success? + end + ensure + remove_autoload_constant + assert_nil $!, '[ruby-core:86410] [Bug #14634]' + end + } + end + end if Process.respond_to?(:fork) + def add_autoload(path) (@autoload_paths ||= []) << path ::Object.class_eval {autoload(:AutoloadTest, path)} diff --git a/thread.c b/thread.c index 5d6599f8c3bdbd..fee1003eed4789 100644 --- a/thread.c +++ b/thread.c @@ -4188,6 +4188,7 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r } rb_vm_living_threads_init(vm); rb_vm_living_threads_insert(vm, th); + vm->fork_gen++; rb_thread_sync_reset_all(); vm->sleeper = 0; diff --git a/variable.c b/variable.c index f700303d2ff09c..bfb6fa2eddfade 100644 --- a/variable.c +++ b/variable.c @@ -20,6 +20,7 @@ #include "ccan/list/list.h" #include "id_table.h" #include "debug_counter.h" +#include "vm_core.h" struct rb_id_table *rb_global_tbl; static ID autoload, classpath, tmp_classpath, classid; @@ -1859,6 +1860,7 @@ struct autoload_data_i { rb_const_flag_t flag; VALUE value; struct autoload_state *state; /* points to on-stack struct */ + rb_serial_t fork_gen; }; static void @@ -1881,8 +1883,18 @@ static const rb_data_type_t autoload_data_i_type = { 0, 0, RUBY_TYPED_FREE_IMMEDIATELY }; -#define check_autoload_data(av) \ - (struct autoload_data_i *)rb_check_typeddata((av), &autoload_data_i_type) +static struct autoload_data_i * +get_autoload_data(VALUE av) +{ + struct autoload_data_i *ele = rb_check_typeddata(av, &autoload_data_i_type); + + /* do not reach across stack for ->state after forking: */ + if (ele && ele->state && ele->fork_gen != GET_VM()->fork_gen) { + ele->state = 0; + ele->fork_gen = 0; + } + return ele; +} RUBY_FUNC_EXPORTED void rb_autoload(VALUE mod, ID id, const char *file) @@ -1982,7 +1994,7 @@ check_autoload_required(VALUE mod, ID id, const char **loadingpath) const char *loading; int safe; - if (!(load = autoload_data(mod, id)) || !(ele = check_autoload_data(load))) { + if (!(load = autoload_data(mod, id)) || !(ele = get_autoload_data(load))) { return 0; } file = ele->feature; @@ -2020,7 +2032,7 @@ rb_autoloading_value(VALUE mod, ID id, VALUE* value, rb_const_flag_t *flag) VALUE load; struct autoload_data_i *ele; - if (!(load = autoload_data(mod, id)) || !(ele = check_autoload_data(load))) { + if (!(load = autoload_data(mod, id)) || !(ele = get_autoload_data(load))) { return 0; } if (ele->state && ele->state->thread == rb_thread_current()) { @@ -2087,8 +2099,9 @@ autoload_reset(VALUE arg) int need_wakeups = 0; if (state->ele->state == state) { - need_wakeups = 1; - state->ele->state = 0; + need_wakeups = 1; + state->ele->state = 0; + state->ele->fork_gen = 0; } /* At the last, move a value defined in autoload to constant table */ @@ -2170,7 +2183,7 @@ rb_autoload_load(VALUE mod, ID id) if (src && loading && strcmp(src, loading) == 0) return Qfalse; /* set ele->state for a marker of autoloading thread */ - if (!(ele = check_autoload_data(load))) { + if (!(ele = get_autoload_data(load))) { return Qfalse; } @@ -2180,6 +2193,7 @@ rb_autoload_load(VALUE mod, ID id) state.thread = rb_thread_current(); if (!ele->state) { ele->state = &state; + ele->fork_gen = GET_VM()->fork_gen; /* * autoload_reset will wake up any threads added to this @@ -2217,7 +2231,7 @@ rb_autoload_p(VALUE mod, ID id) } load = check_autoload_required(mod, id, 0); if (!load) return Qnil; - return (ele = check_autoload_data(load)) ? ele->feature : Qnil; + return (ele = get_autoload_data(load)) ? ele->feature : Qnil; } void @@ -2646,7 +2660,7 @@ current_autoload_data(VALUE mod, ID id) struct autoload_data_i *ele; VALUE load = autoload_data(mod, id); if (!load) return 0; - ele = check_autoload_data(load); + ele = get_autoload_data(load); if (!ele) return 0; /* for autoloading thread, keep the defined value to autoloading storage */ if (ele->state && (ele->state->thread == rb_thread_current())) { diff --git a/vm_core.h b/vm_core.h index cfe0042c25eea3..a1de1ce327ad08 100644 --- a/vm_core.h +++ b/vm_core.h @@ -507,6 +507,7 @@ typedef struct rb_vm_struct { struct rb_thread_struct *main_thread; struct rb_thread_struct *running_thread; + rb_serial_t fork_gen; struct list_head waiting_fds; /* <=> struct waiting_fd */ struct list_head living_threads; size_t living_thread_num; From ca9b30a36578cdb0596e6f4629968e2872f7f6ea Mon Sep 17 00:00:00 2001 From: normal Date: Fri, 20 Apr 2018 22:53:37 +0000 Subject: [PATCH 3/4] thread_sync: redo r62934 to use fork_gen Instead of maintaining linked-lists to store all rb_queue/rb_szqueue/rb_condvar structs; store only a fork_gen serial number to simplify management of these items. This reduces initialization costs and avoids the up-front cost of resetting all Queue/SizedQueue/ConditionVariable objects at fork while saving 8 bytes per-structure on 64-bit. There are no savings on 32-bit. * thread.c (rb_thread_atfork_internal): remove rb_thread_sync_reset_all call * thread_sync.c (rb_thread_sync_reset_all): remove * thread_sync.c (queue_live): remove * thread_sync.c (queue_free): remove * thread_sync.c (struct rb_queue): s/live/fork_gen/ * thread_sync.c (queue_data_type): use default free * thread_sync.c (queue_alloc): remove list_add * thread_sync.c (queue_fork_check): new function * thread_sync.c (queue_ptr): call queue_fork_check * thread_sync.c (szqueue_free): remove * thread_sync.c (szqueue_data_type): use default free * thread_sync.c (szqueue_alloc): remove list_add * thread_sync.c (szqueue_ptr): check fork_gen via queue_fork_check * thread_sync.c (struct rb_condvar): s/live/fork_gen/ * thread_sync.c (condvar_free): remove * thread_sync.c (cv_data_type): use default free * thread_sync.c (condvar_ptr): check fork_gen * thread_sync.c (condvar_alloc): remove list_add [ruby-core:86316] [Bug #14634] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63215 b2dd03c8-39d4-4d8f-98ff-823fe69b080e --- thread.c | 1 - thread_sync.c | 94 ++++++++++++++++++--------------------------------- 2 files changed, 33 insertions(+), 62 deletions(-) diff --git a/thread.c b/thread.c index fee1003eed4789..94de2fa313525c 100644 --- a/thread.c +++ b/thread.c @@ -4189,7 +4189,6 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r rb_vm_living_threads_init(vm); rb_vm_living_threads_insert(vm, th); vm->fork_gen++; - rb_thread_sync_reset_all(); vm->sleeper = 0; clear_coverage(); diff --git a/thread_sync.c b/thread_sync.c index 6712c5690e3115..e9df19ad75c240 100644 --- a/thread_sync.c +++ b/thread_sync.c @@ -62,7 +62,6 @@ typedef struct rb_mutex_struct { static void rb_mutex_abandon_all(rb_mutex_t *mutexes); static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th); static void rb_mutex_abandon_locking_mutex(rb_thread_t *th); -static void rb_thread_sync_reset_all(void); #endif static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th); @@ -544,17 +543,15 @@ void rb_mutex_allow_trap(VALUE self, int val) /* Queue */ #define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq) -#define queue_live(q) UNALIGNED_MEMBER_PTR(q, live) PACKED_STRUCT_UNALIGNED(struct rb_queue { - struct list_node live; struct list_head waitq; + rb_serial_t fork_gen; const VALUE que; int num_waiting; }); #define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq) #define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq) -#define szqueue_live(sq) UNALIGNED_MEMBER_PTR(sq, q.live) PACKED_STRUCT_UNALIGNED(struct rb_szqueue { struct rb_queue q; int num_waiting_push; @@ -571,14 +568,6 @@ queue_mark(void *ptr) rb_gc_mark(q->que); } -static void -queue_free(void *ptr) -{ - struct rb_queue *q = ptr; - list_del(queue_live(q)); - ruby_xfree(ptr); -} - static size_t queue_memsize(const void *ptr) { @@ -587,7 +576,7 @@ queue_memsize(const void *ptr) static const rb_data_type_t queue_data_type = { "queue", - {queue_mark, queue_free, queue_memsize,}, + {queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED }; @@ -599,16 +588,32 @@ queue_alloc(VALUE klass) obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q); list_head_init(queue_waitq(q)); - list_add(&queue_list, queue_live(q)); return obj; } +static int +queue_fork_check(struct rb_queue *q) +{ + rb_serial_t fork_gen = GET_VM()->fork_gen; + + if (q->fork_gen == fork_gen) { + return 0; + } + /* forked children can't reach into parent thread stacks */ + q->fork_gen = fork_gen; + list_head_init(queue_waitq(q)); + q->num_waiting = 0; + return 1; +} + static struct rb_queue * queue_ptr(VALUE obj) { struct rb_queue *q; TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q); + queue_fork_check(q); + return q; } @@ -622,14 +627,6 @@ szqueue_mark(void *ptr) queue_mark(&sq->q); } -static void -szqueue_free(void *ptr) -{ - struct rb_szqueue *sq = ptr; - list_del(szqueue_live(sq)); - ruby_xfree(ptr); -} - static size_t szqueue_memsize(const void *ptr) { @@ -638,7 +635,7 @@ szqueue_memsize(const void *ptr) static const rb_data_type_t szqueue_data_type = { "sized_queue", - {szqueue_mark, szqueue_free, szqueue_memsize,}, + {szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED }; @@ -650,7 +647,6 @@ szqueue_alloc(VALUE klass) &szqueue_data_type, sq); list_head_init(szqueue_waitq(sq)); list_head_init(szqueue_pushq(sq)); - list_add(&szqueue_list, szqueue_live(sq)); return obj; } @@ -660,6 +656,11 @@ szqueue_ptr(VALUE obj) struct rb_szqueue *sq; TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq); + if (queue_fork_check(&sq->q)) { + list_head_init(szqueue_pushq(sq)); + sq->num_waiting_push = 0; + } + return sq; } @@ -1239,10 +1240,9 @@ rb_szqueue_empty_p(VALUE self) /* ConditionalVariable */ -/* TODO: maybe this can be IMEMO */ struct rb_condvar { struct list_head waitq; - struct list_node live; + rb_serial_t fork_gen; }; /* @@ -1273,14 +1273,6 @@ struct rb_condvar { * } */ -static void -condvar_free(void *ptr) -{ - struct rb_condvar *cv = ptr; - list_del(&cv->live); - ruby_xfree(ptr); -} - static size_t condvar_memsize(const void *ptr) { @@ -1289,7 +1281,7 @@ condvar_memsize(const void *ptr) static const rb_data_type_t cv_data_type = { "condvar", - {0, condvar_free, condvar_memsize,}, + {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED }; @@ -1297,9 +1289,15 @@ static struct rb_condvar * condvar_ptr(VALUE self) { struct rb_condvar *cv; + rb_serial_t fork_gen = GET_VM()->fork_gen; TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv); + /* forked children can't reach into parent thread stacks */ + if (cv->fork_gen != fork_gen) { + list_head_init(&cv->waitq); + } + return cv; } @@ -1311,7 +1309,6 @@ condvar_alloc(VALUE klass) obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv); list_head_init(&cv->waitq); - list_add(&condvar_list, &cv->live); return obj; } @@ -1425,31 +1422,6 @@ define_thread_class(VALUE outer, const char *name, VALUE super) return klass; } -#if defined(HAVE_WORKING_FORK) -/* we must not reference stacks of dead threads in a forked child */ -static void -rb_thread_sync_reset_all(void) -{ - struct rb_queue *q = 0; - struct rb_szqueue *sq = 0; - struct rb_condvar *cv = 0; - - list_for_each(&queue_list, q, live) { - list_head_init(queue_waitq(q)); - q->num_waiting = 0; - } - list_for_each(&szqueue_list, sq, q.live) { - list_head_init(szqueue_waitq(sq)); - list_head_init(szqueue_pushq(sq)); - sq->num_waiting_push = 0; - sq->q.num_waiting = 0; - } - list_for_each(&condvar_list, cv, live) { - list_head_init(&cv->waitq); - } -} -#endif - static void Init_thread_sync(void) { From d2986e0dc41b3242b4b4291775b71886ef2e7e06 Mon Sep 17 00:00:00 2001 From: normal Date: Mon, 30 Apr 2018 23:47:21 +0000 Subject: [PATCH 4/4] thread_sync.c (condvar_ptr): reset fork_gen after forking Otherwise the condition variable waiter list will always be empty, which is wrong :x [Bug #14725] [Bug #14634] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63309 b2dd03c8-39d4-4d8f-98ff-823fe69b080e --- thread_sync.c | 1 + 1 file changed, 1 insertion(+) diff --git a/thread_sync.c b/thread_sync.c index e9df19ad75c240..3766a30703eefa 100644 --- a/thread_sync.c +++ b/thread_sync.c @@ -1295,6 +1295,7 @@ condvar_ptr(VALUE self) /* forked children can't reach into parent thread stacks */ if (cv->fork_gen != fork_gen) { + cv->fork_gen = fork_gen; list_head_init(&cv->waitq); }