diff --git a/thread.c b/thread.c index fee1003eed4789..94de2fa313525c 100644 --- a/thread.c +++ b/thread.c @@ -4189,7 +4189,6 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r rb_vm_living_threads_init(vm); rb_vm_living_threads_insert(vm, th); vm->fork_gen++; - rb_thread_sync_reset_all(); vm->sleeper = 0; clear_coverage(); diff --git a/thread_sync.c b/thread_sync.c index 6712c5690e3115..e9df19ad75c240 100644 --- a/thread_sync.c +++ b/thread_sync.c @@ -62,7 +62,6 @@ typedef struct rb_mutex_struct { static void rb_mutex_abandon_all(rb_mutex_t *mutexes); static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th); static void rb_mutex_abandon_locking_mutex(rb_thread_t *th); -static void rb_thread_sync_reset_all(void); #endif static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th); @@ -544,17 +543,15 @@ void rb_mutex_allow_trap(VALUE self, int val) /* Queue */ #define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq) -#define queue_live(q) UNALIGNED_MEMBER_PTR(q, live) PACKED_STRUCT_UNALIGNED(struct rb_queue { - struct list_node live; struct list_head waitq; + rb_serial_t fork_gen; const VALUE que; int num_waiting; }); #define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq) #define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq) -#define szqueue_live(sq) UNALIGNED_MEMBER_PTR(sq, q.live) PACKED_STRUCT_UNALIGNED(struct rb_szqueue { struct rb_queue q; int num_waiting_push; @@ -571,14 +568,6 @@ queue_mark(void *ptr) rb_gc_mark(q->que); } -static void -queue_free(void *ptr) -{ - struct rb_queue *q = ptr; - list_del(queue_live(q)); - ruby_xfree(ptr); -} - static size_t queue_memsize(const void *ptr) { @@ -587,7 +576,7 @@ queue_memsize(const void *ptr) static const rb_data_type_t queue_data_type = { "queue", - {queue_mark, queue_free, queue_memsize,}, + {queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED }; @@ -599,16 +588,32 @@ queue_alloc(VALUE klass) obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q); list_head_init(queue_waitq(q)); - list_add(&queue_list, queue_live(q)); return obj; } +static int +queue_fork_check(struct rb_queue *q) +{ + rb_serial_t fork_gen = GET_VM()->fork_gen; + + if (q->fork_gen == fork_gen) { + return 0; + } + /* forked children can't reach into parent thread stacks */ + q->fork_gen = fork_gen; + list_head_init(queue_waitq(q)); + q->num_waiting = 0; + return 1; +} + static struct rb_queue * queue_ptr(VALUE obj) { struct rb_queue *q; TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q); + queue_fork_check(q); + return q; } @@ -622,14 +627,6 @@ szqueue_mark(void *ptr) queue_mark(&sq->q); } -static void -szqueue_free(void *ptr) -{ - struct rb_szqueue *sq = ptr; - list_del(szqueue_live(sq)); - ruby_xfree(ptr); -} - static size_t szqueue_memsize(const void *ptr) { @@ -638,7 +635,7 @@ szqueue_memsize(const void *ptr) static const rb_data_type_t szqueue_data_type = { "sized_queue", - {szqueue_mark, szqueue_free, szqueue_memsize,}, + {szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED }; @@ -650,7 +647,6 @@ szqueue_alloc(VALUE klass) &szqueue_data_type, sq); list_head_init(szqueue_waitq(sq)); list_head_init(szqueue_pushq(sq)); - list_add(&szqueue_list, szqueue_live(sq)); return obj; } @@ -660,6 +656,11 @@ szqueue_ptr(VALUE obj) struct rb_szqueue *sq; TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq); + if (queue_fork_check(&sq->q)) { + list_head_init(szqueue_pushq(sq)); + sq->num_waiting_push = 0; + } + return sq; } @@ -1239,10 +1240,9 @@ rb_szqueue_empty_p(VALUE self) /* ConditionalVariable */ -/* TODO: maybe this can be IMEMO */ struct rb_condvar { struct list_head waitq; - struct list_node live; + rb_serial_t fork_gen; }; /* @@ -1273,14 +1273,6 @@ struct rb_condvar { * } */ -static void -condvar_free(void *ptr) -{ - struct rb_condvar *cv = ptr; - list_del(&cv->live); - ruby_xfree(ptr); -} - static size_t condvar_memsize(const void *ptr) { @@ -1289,7 +1281,7 @@ condvar_memsize(const void *ptr) static const rb_data_type_t cv_data_type = { "condvar", - {0, condvar_free, condvar_memsize,}, + {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED }; @@ -1297,9 +1289,15 @@ static struct rb_condvar * condvar_ptr(VALUE self) { struct rb_condvar *cv; + rb_serial_t fork_gen = GET_VM()->fork_gen; TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv); + /* forked children can't reach into parent thread stacks */ + if (cv->fork_gen != fork_gen) { + list_head_init(&cv->waitq); + } + return cv; } @@ -1311,7 +1309,6 @@ condvar_alloc(VALUE klass) obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv); list_head_init(&cv->waitq); - list_add(&condvar_list, &cv->live); return obj; } @@ -1425,31 +1422,6 @@ define_thread_class(VALUE outer, const char *name, VALUE super) return klass; } -#if defined(HAVE_WORKING_FORK) -/* we must not reference stacks of dead threads in a forked child */ -static void -rb_thread_sync_reset_all(void) -{ - struct rb_queue *q = 0; - struct rb_szqueue *sq = 0; - struct rb_condvar *cv = 0; - - list_for_each(&queue_list, q, live) { - list_head_init(queue_waitq(q)); - q->num_waiting = 0; - } - list_for_each(&szqueue_list, sq, q.live) { - list_head_init(szqueue_waitq(sq)); - list_head_init(szqueue_pushq(sq)); - sq->num_waiting_push = 0; - sq->q.num_waiting = 0; - } - list_for_each(&condvar_list, cv, live) { - list_head_init(&cv->waitq); - } -} -#endif - static void Init_thread_sync(void) {