@@ -13742,13 +13742,13 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
1374213742 perf_event_wakeup (event );
1374313743}
1374413744
13745- static void perf_event_exit_task_context (struct task_struct * child , bool exit )
13745+ static void perf_event_exit_task_context (struct task_struct * task , bool exit )
1374613746{
13747- struct perf_event_context * child_ctx , * clone_ctx = NULL ;
13747+ struct perf_event_context * ctx , * clone_ctx = NULL ;
1374813748 struct perf_event * child_event , * next ;
1374913749
13750- child_ctx = perf_pin_task_context (child );
13751- if (!child_ctx )
13750+ ctx = perf_pin_task_context (task );
13751+ if (!ctx )
1375213752 return ;
1375313753
1375413754 /*
@@ -13761,28 +13761,28 @@ static void perf_event_exit_task_context(struct task_struct *child, bool exit)
1376113761 * without ctx::mutex (it cannot because of the move_group double mutex
1376213762 * lock thing). See the comments in perf_install_in_context().
1376313763 */
13764- mutex_lock (& child_ctx -> mutex );
13764+ mutex_lock (& ctx -> mutex );
1376513765
1376613766 /*
1376713767 * In a single ctx::lock section, de-schedule the events and detach the
1376813768 * context from the task such that we cannot ever get it scheduled back
1376913769 * in.
1377013770 */
13771- raw_spin_lock_irq (& child_ctx -> lock );
13771+ raw_spin_lock_irq (& ctx -> lock );
1377213772 if (exit )
13773- task_ctx_sched_out (child_ctx , NULL , EVENT_ALL );
13773+ task_ctx_sched_out (ctx , NULL , EVENT_ALL );
1377413774
1377513775 /*
1377613776 * Now that the context is inactive, destroy the task <-> ctx relation
1377713777 * and mark the context dead.
1377813778 */
13779- RCU_INIT_POINTER (child -> perf_event_ctxp , NULL );
13780- put_ctx (child_ctx ); /* cannot be last */
13781- WRITE_ONCE (child_ctx -> task , TASK_TOMBSTONE );
13782- put_task_struct (child ); /* cannot be last */
13779+ RCU_INIT_POINTER (task -> perf_event_ctxp , NULL );
13780+ put_ctx (ctx ); /* cannot be last */
13781+ WRITE_ONCE (ctx -> task , TASK_TOMBSTONE );
13782+ put_task_struct (task ); /* cannot be last */
1378313783
13784- clone_ctx = unclone_ctx (child_ctx );
13785- raw_spin_unlock_irq (& child_ctx -> lock );
13784+ clone_ctx = unclone_ctx (ctx );
13785+ raw_spin_unlock_irq (& ctx -> lock );
1378613786
1378713787 if (clone_ctx )
1378813788 put_ctx (clone_ctx );
@@ -13793,12 +13793,12 @@ static void perf_event_exit_task_context(struct task_struct *child, bool exit)
1379313793 * get a few PERF_RECORD_READ events.
1379413794 */
1379513795 if (exit )
13796- perf_event_task (child , child_ctx , 0 );
13796+ perf_event_task (task , ctx , 0 );
1379713797
13798- list_for_each_entry_safe (child_event , next , & child_ctx -> event_list , event_entry )
13799- perf_event_exit_event (child_event , child_ctx );
13798+ list_for_each_entry_safe (child_event , next , & ctx -> event_list , event_entry )
13799+ perf_event_exit_event (child_event , ctx );
1380013800
13801- mutex_unlock (& child_ctx -> mutex );
13801+ mutex_unlock (& ctx -> mutex );
1380213802
1380313803 if (!exit ) {
1380413804 /*
@@ -13814,24 +13814,26 @@ static void perf_event_exit_task_context(struct task_struct *child, bool exit)
1381413814 *
1381513815 * Wait for all events to drop their context reference.
1381613816 */
13817- wait_var_event (& child_ctx -> refcount ,
13818- refcount_read (& child_ctx -> refcount ) == 1 );
13817+ wait_var_event (& ctx -> refcount ,
13818+ refcount_read (& ctx -> refcount ) == 1 );
1381913819 }
13820- put_ctx (child_ctx );
13820+ put_ctx (ctx );
1382113821}
1382213822
1382313823/*
13824- * When a child task exits, feed back event values to parent events.
13824+ * When a task exits, feed back event values to parent events.
1382513825 *
1382613826 * Can be called with exec_update_lock held when called from
1382713827 * setup_new_exec().
1382813828 */
13829- void perf_event_exit_task (struct task_struct * child )
13829+ void perf_event_exit_task (struct task_struct * task )
1383013830{
1383113831 struct perf_event * event , * tmp ;
1383213832
13833- mutex_lock (& child -> perf_event_mutex );
13834- list_for_each_entry_safe (event , tmp , & child -> perf_event_list ,
13833+ WARN_ON_ONCE (task != current );
13834+
13835+ mutex_lock (& task -> perf_event_mutex );
13836+ list_for_each_entry_safe (event , tmp , & task -> perf_event_list ,
1383513837 owner_entry ) {
1383613838 list_del_init (& event -> owner_entry );
1383713839
@@ -13842,23 +13844,23 @@ void perf_event_exit_task(struct task_struct *child)
1384213844 */
1384313845 smp_store_release (& event -> owner , NULL );
1384413846 }
13845- mutex_unlock (& child -> perf_event_mutex );
13847+ mutex_unlock (& task -> perf_event_mutex );
1384613848
13847- perf_event_exit_task_context (child , true);
13849+ perf_event_exit_task_context (task , true);
1384813850
1384913851 /*
1385013852 * The perf_event_exit_task_context calls perf_event_task
13851- * with child 's task_ctx, which generates EXIT events for
13852- * child contexts and sets child ->perf_event_ctxp[] to NULL.
13853+ * with task 's task_ctx, which generates EXIT events for
13854+ * task contexts and sets task ->perf_event_ctxp[] to NULL.
1385313855 * At this point we need to send EXIT events to cpu contexts.
1385413856 */
13855- perf_event_task (child , NULL , 0 );
13857+ perf_event_task (task , NULL , 0 );
1385613858
1385713859 /*
1385813860 * Detach the perf_ctx_data for the system-wide event.
1385913861 */
1386013862 guard (percpu_read )(& global_ctx_data_rwsem );
13861- detach_task_ctx_data (child );
13863+ detach_task_ctx_data (task );
1386213864}
1386313865
1386413866/*
0 commit comments