@@ -2483,6 +2483,48 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
24832483#endif
24842484}
24852485
2486+ static void
2487+ ttwu_do_activate (struct rq * rq , struct task_struct * p , int wake_flags )
2488+ {
2489+ #ifdef CONFIG_SMP
2490+ if (p -> sched_contributes_to_load )
2491+ rq -> nr_uninterruptible -- ;
2492+ #endif
2493+
2494+ ttwu_activate (rq , p , ENQUEUE_WAKEUP | ENQUEUE_WAKING );
2495+ ttwu_do_wakeup (rq , p , wake_flags );
2496+ }
2497+
2498+ /*
2499+ * Called in case the task @p isn't fully descheduled from its runqueue,
2500+ * in this case we must do a remote wakeup. Its a 'light' wakeup though,
2501+ * since all we need to do is flip p->state to TASK_RUNNING, since
2502+ * the task is still ->on_rq.
2503+ */
2504+ static int ttwu_remote (struct task_struct * p , int wake_flags )
2505+ {
2506+ struct rq * rq ;
2507+ int ret = 0 ;
2508+
2509+ rq = __task_rq_lock (p );
2510+ if (p -> on_rq ) {
2511+ ttwu_do_wakeup (rq , p , wake_flags );
2512+ ret = 1 ;
2513+ }
2514+ __task_rq_unlock (rq );
2515+
2516+ return ret ;
2517+ }
2518+
2519+ static void ttwu_queue (struct task_struct * p , int cpu )
2520+ {
2521+ struct rq * rq = cpu_rq (cpu );
2522+
2523+ raw_spin_lock (& rq -> lock );
2524+ ttwu_do_activate (rq , p , 0 );
2525+ raw_spin_unlock (& rq -> lock );
2526+ }
2527+
24862528/**
24872529 * try_to_wake_up - wake up a thread
24882530 * @p: the thread to be awakened
@@ -2501,27 +2543,25 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
25012543static int
25022544try_to_wake_up (struct task_struct * p , unsigned int state , int wake_flags )
25032545{
2504- int cpu , this_cpu , success = 0 ;
25052546 unsigned long flags ;
2506- struct rq * rq ;
2507-
2508- this_cpu = get_cpu ();
2547+ int cpu , success = 0 ;
25092548
25102549 smp_wmb ();
25112550 raw_spin_lock_irqsave (& p -> pi_lock , flags );
25122551 if (!(p -> state & state ))
25132552 goto out ;
25142553
2554+ success = 1 ; /* we're going to change ->state */
25152555 cpu = task_cpu (p );
25162556
2517- if (p -> on_rq ) {
2518- rq = __task_rq_lock (p );
2519- if (p -> on_rq )
2520- goto out_running ;
2521- __task_rq_unlock (rq );
2522- }
2557+ if (p -> on_rq && ttwu_remote (p , wake_flags ))
2558+ goto stat ;
25232559
25242560#ifdef CONFIG_SMP
2561+ /*
2562+ * If the owning (remote) cpu is still in the middle of schedule() with
2563+ * this task as prev, wait until its done referencing the task.
2564+ */
25252565 while (p -> on_cpu ) {
25262566#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
25272567 /*
@@ -2530,8 +2570,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
25302570 * to spin on ->on_cpu if p is current, since that would
25312571 * deadlock.
25322572 */
2533- if (p == current )
2534- goto out_activate ;
2573+ if (p == current ) {
2574+ ttwu_queue (p , cpu );
2575+ goto stat ;
2576+ }
25352577#endif
25362578 cpu_relax ();
25372579 }
@@ -2547,32 +2589,15 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
25472589 p -> sched_class -> task_waking (p );
25482590
25492591 cpu = select_task_rq (p , SD_BALANCE_WAKE , wake_flags );
2550- #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2551- out_activate :
2552- #endif
2553- #endif /* CONFIG_SMP */
2554-
2555- rq = cpu_rq (cpu );
2556- raw_spin_lock (& rq -> lock );
2557-
2558- #ifdef CONFIG_SMP
2559- if (cpu != task_cpu (p ))
2592+ if (task_cpu (p ) != cpu )
25602593 set_task_cpu (p , cpu );
2594+ #endif /* CONFIG_SMP */
25612595
2562- if (p -> sched_contributes_to_load )
2563- rq -> nr_uninterruptible -- ;
2564- #endif
2565-
2566- ttwu_activate (rq , p , ENQUEUE_WAKEUP | ENQUEUE_WAKING );
2567- out_running :
2568- ttwu_do_wakeup (rq , p , wake_flags );
2569- success = 1 ;
2570- __task_rq_unlock (rq );
2571-
2596+ ttwu_queue (p , cpu );
2597+ stat :
25722598 ttwu_stat (p , cpu , wake_flags );
25732599out :
25742600 raw_spin_unlock_irqrestore (& p -> pi_lock , flags );
2575- put_cpu ();
25762601
25772602 return success ;
25782603}
0 commit comments