@@ -604,37 +604,33 @@ static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
604604 }
605605}
606606
607- static int nocb_gp_toggle_rdp (struct rcu_data * rdp )
607+ static void nocb_gp_toggle_rdp (struct rcu_data * rdp_gp , struct rcu_data * rdp )
608608{
609609 struct rcu_segcblist * cblist = & rdp -> cblist ;
610610 unsigned long flags ;
611- int ret ;
612611
613- rcu_nocb_lock_irqsave (rdp , flags );
614- if (rcu_segcblist_test_flags (cblist , SEGCBLIST_OFFLOADED ) &&
615- !rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP )) {
612+ /*
613+ * Locking orders future de-offloaded callbacks enqueue against previous
614+ * handling of this rdp. Ie: Make sure rcuog is done with this rdp before
615+ * deoffloaded callbacks can be enqueued.
616+ */
617+ raw_spin_lock_irqsave (& rdp -> nocb_lock , flags );
618+ if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_OFFLOADED )) {
616619 /*
617620 * Offloading. Set our flag and notify the offload worker.
618621 * We will handle this rdp until it ever gets de-offloaded.
619622 */
620- rcu_segcblist_set_flags (cblist , SEGCBLIST_KTHREAD_GP );
621- ret = 1 ;
622- } else if (!rcu_segcblist_test_flags (cblist , SEGCBLIST_OFFLOADED ) &&
623- rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP )) {
623+ list_add_tail (& rdp -> nocb_entry_rdp , & rdp_gp -> nocb_head_rdp );
624+ rcu_segcblist_set_flags (cblist , SEGCBLIST_OFFLOADED );
625+ } else {
624626 /*
625627 * De-offloading. Clear our flag and notify the de-offload worker.
626628 * We will ignore this rdp until it ever gets re-offloaded.
627629 */
628- rcu_segcblist_clear_flags (cblist , SEGCBLIST_KTHREAD_GP );
629- ret = 0 ;
630- } else {
631- WARN_ON_ONCE (1 );
632- ret = -1 ;
630+ list_del (& rdp -> nocb_entry_rdp );
631+ rcu_segcblist_clear_flags (cblist , SEGCBLIST_OFFLOADED );
633632 }
634-
635- rcu_nocb_unlock_irqrestore (rdp , flags );
636-
637- return ret ;
633+ raw_spin_unlock_irqrestore (& rdp -> nocb_lock , flags );
638634}
639635
640636static void nocb_gp_sleep (struct rcu_data * my_rdp , int cpu )
@@ -841,14 +837,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
841837 }
842838
843839 if (rdp_toggling ) {
844- int ret ;
845-
846- ret = nocb_gp_toggle_rdp (rdp_toggling );
847- if (ret == 1 )
848- list_add_tail (& rdp_toggling -> nocb_entry_rdp , & my_rdp -> nocb_head_rdp );
849- else if (ret == 0 )
850- list_del (& rdp_toggling -> nocb_entry_rdp );
851-
840+ nocb_gp_toggle_rdp (my_rdp , rdp_toggling );
852841 swake_up_one (& rdp_toggling -> nocb_state_wq );
853842 }
854843
@@ -1018,16 +1007,11 @@ void rcu_nocb_flush_deferred_wakeup(void)
10181007}
10191008EXPORT_SYMBOL_GPL (rcu_nocb_flush_deferred_wakeup );
10201009
1021- static int rdp_offload_toggle (struct rcu_data * rdp ,
1022- bool offload , unsigned long flags )
1023- __releases (rdp - > nocb_lock )
1010+ static int rcu_nocb_queue_toggle_rdp (struct rcu_data * rdp )
10241011{
1025- struct rcu_segcblist * cblist = & rdp -> cblist ;
10261012 struct rcu_data * rdp_gp = rdp -> nocb_gp_rdp ;
10271013 bool wake_gp = false;
1028-
1029- rcu_segcblist_offload (cblist , offload );
1030- rcu_nocb_unlock_irqrestore (rdp , flags );
1014+ unsigned long flags ;
10311015
10321016 raw_spin_lock_irqsave (& rdp_gp -> nocb_gp_lock , flags );
10331017 // Queue this rdp for add/del to/from the list to iterate on rcuog
@@ -1041,9 +1025,25 @@ static int rdp_offload_toggle(struct rcu_data *rdp,
10411025 return wake_gp ;
10421026}
10431027
1028+ static bool rcu_nocb_rdp_deoffload_wait_cond (struct rcu_data * rdp )
1029+ {
1030+ unsigned long flags ;
1031+ bool ret ;
1032+
1033+ /*
1034+ * Locking makes sure rcuog is done handling this rdp before deoffloaded
1035+ * enqueue can happen. Also it keeps the SEGCBLIST_OFFLOADED flag stable
1036+ * while the ->nocb_lock is held.
1037+ */
1038+ raw_spin_lock_irqsave (& rdp -> nocb_lock , flags );
1039+ ret = !rcu_segcblist_test_flags (& rdp -> cblist , SEGCBLIST_OFFLOADED );
1040+ raw_spin_unlock_irqrestore (& rdp -> nocb_lock , flags );
1041+
1042+ return ret ;
1043+ }
1044+
10441045static int rcu_nocb_rdp_deoffload (struct rcu_data * rdp )
10451046{
1046- struct rcu_segcblist * cblist = & rdp -> cblist ;
10471047 unsigned long flags ;
10481048 int wake_gp ;
10491049 struct rcu_data * rdp_gp = rdp -> nocb_gp_rdp ;
@@ -1056,51 +1056,42 @@ static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
10561056 /* Flush all callbacks from segcblist and bypass */
10571057 rcu_barrier ();
10581058
1059+ /*
1060+ * Make sure the rcuoc kthread isn't in the middle of a nocb locked
1061+ * sequence while offloading is deactivated, along with nocb locking.
1062+ */
1063+ if (rdp -> nocb_cb_kthread )
1064+ kthread_park (rdp -> nocb_cb_kthread );
1065+
10591066 rcu_nocb_lock_irqsave (rdp , flags );
10601067 WARN_ON_ONCE (rcu_cblist_n_cbs (& rdp -> nocb_bypass ));
10611068 WARN_ON_ONCE (rcu_segcblist_n_cbs (& rdp -> cblist ));
1069+ rcu_nocb_unlock_irqrestore (rdp , flags );
10621070
1063- wake_gp = rdp_offload_toggle (rdp , false, flags );
1071+ wake_gp = rcu_nocb_queue_toggle_rdp (rdp );
10641072
10651073 mutex_lock (& rdp_gp -> nocb_gp_kthread_mutex );
1074+
10661075 if (rdp_gp -> nocb_gp_kthread ) {
10671076 if (wake_gp )
10681077 wake_up_process (rdp_gp -> nocb_gp_kthread );
10691078
10701079 swait_event_exclusive (rdp -> nocb_state_wq ,
1071- !rcu_segcblist_test_flags (cblist ,
1072- SEGCBLIST_KTHREAD_GP ));
1073- if (rdp -> nocb_cb_kthread )
1074- kthread_park (rdp -> nocb_cb_kthread );
1080+ rcu_nocb_rdp_deoffload_wait_cond (rdp ));
10751081 } else {
10761082 /*
10771083 * No kthread to clear the flags for us or remove the rdp from the nocb list
10781084 * to iterate. Do it here instead. Locking doesn't look stricly necessary
10791085 * but we stick to paranoia in this rare path.
10801086 */
1081- rcu_nocb_lock_irqsave ( rdp , flags );
1082- rcu_segcblist_clear_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_GP );
1083- rcu_nocb_unlock_irqrestore ( rdp , flags );
1087+ raw_spin_lock_irqsave ( & rdp -> nocb_lock , flags );
1088+ rcu_segcblist_clear_flags (& rdp -> cblist , SEGCBLIST_OFFLOADED );
1089+ raw_spin_unlock_irqrestore ( & rdp -> nocb_lock , flags );
10841090
10851091 list_del (& rdp -> nocb_entry_rdp );
10861092 }
1087- mutex_unlock (& rdp_gp -> nocb_gp_kthread_mutex );
10881093
1089- /*
1090- * Lock one last time to acquire latest callback updates from kthreads
1091- * so we can later handle callbacks locally without locking.
1092- */
1093- rcu_nocb_lock_irqsave (rdp , flags );
1094- /*
1095- * Theoretically we could clear SEGCBLIST_LOCKING after the nocb
1096- * lock is released but how about being paranoid for once?
1097- */
1098- rcu_segcblist_clear_flags (cblist , SEGCBLIST_LOCKING );
1099- /*
1100- * Without SEGCBLIST_LOCKING, we can't use
1101- * rcu_nocb_unlock_irqrestore() anymore.
1102- */
1103- raw_spin_unlock_irqrestore (& rdp -> nocb_lock , flags );
1094+ mutex_unlock (& rdp_gp -> nocb_gp_kthread_mutex );
11041095
11051096 return 0 ;
11061097}
@@ -1129,10 +1120,20 @@ int rcu_nocb_cpu_deoffload(int cpu)
11291120}
11301121EXPORT_SYMBOL_GPL (rcu_nocb_cpu_deoffload );
11311122
1132- static int rcu_nocb_rdp_offload (struct rcu_data * rdp )
1123+ static bool rcu_nocb_rdp_offload_wait_cond (struct rcu_data * rdp )
11331124{
1134- struct rcu_segcblist * cblist = & rdp -> cblist ;
11351125 unsigned long flags ;
1126+ bool ret ;
1127+
1128+ raw_spin_lock_irqsave (& rdp -> nocb_lock , flags );
1129+ ret = rcu_segcblist_test_flags (& rdp -> cblist , SEGCBLIST_OFFLOADED );
1130+ raw_spin_unlock_irqrestore (& rdp -> nocb_lock , flags );
1131+
1132+ return ret ;
1133+ }
1134+
1135+ static int rcu_nocb_rdp_offload (struct rcu_data * rdp )
1136+ {
11361137 int wake_gp ;
11371138 struct rcu_data * rdp_gp = rdp -> nocb_gp_rdp ;
11381139
@@ -1152,20 +1153,14 @@ static int rcu_nocb_rdp_offload(struct rcu_data *rdp)
11521153 WARN_ON_ONCE (rcu_cblist_n_cbs (& rdp -> nocb_bypass ));
11531154 WARN_ON_ONCE (rcu_segcblist_n_cbs (& rdp -> cblist ));
11541155
1155- /*
1156- * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING
1157- * is set.
1158- */
1159- raw_spin_lock_irqsave (& rdp -> nocb_lock , flags );
1160-
1161- wake_gp = rdp_offload_toggle (rdp , true, flags );
1156+ wake_gp = rcu_nocb_queue_toggle_rdp (rdp );
11621157 if (wake_gp )
11631158 wake_up_process (rdp_gp -> nocb_gp_kthread );
11641159
1165- kthread_unpark (rdp -> nocb_cb_kthread );
1166-
11671160 swait_event_exclusive (rdp -> nocb_state_wq ,
1168- rcu_segcblist_test_flags (cblist , SEGCBLIST_KTHREAD_GP ));
1161+ rcu_nocb_rdp_offload_wait_cond (rdp ));
1162+
1163+ kthread_unpark (rdp -> nocb_cb_kthread );
11691164
11701165 return 0 ;
11711166}
@@ -1340,8 +1335,7 @@ void __init rcu_init_nohz(void)
13401335 rdp = per_cpu_ptr (& rcu_data , cpu );
13411336 if (rcu_segcblist_empty (& rdp -> cblist ))
13421337 rcu_segcblist_init (& rdp -> cblist );
1343- rcu_segcblist_offload (& rdp -> cblist , true);
1344- rcu_segcblist_set_flags (& rdp -> cblist , SEGCBLIST_KTHREAD_GP );
1338+ rcu_segcblist_set_flags (& rdp -> cblist , SEGCBLIST_OFFLOADED );
13451339 }
13461340 rcu_organize_nocb_kthreads ();
13471341}
0 commit comments