@@ -816,6 +816,9 @@ EXPORT_SYMBOL(tcf_idr_cleanup);
816816 * its reference and bind counters, and return 1. Otherwise insert temporary
817817 * error pointer (to prevent concurrent users from inserting actions with same
818818 * index) and return 0.
819+ *
820+ * May return -EAGAIN for binding actions in case of a parallel add/delete on
821+ * the requested index.
819822 */
820823
821824int tcf_idr_check_alloc (struct tc_action_net * tn , u32 * index ,
@@ -824,43 +827,61 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
824827 struct tcf_idrinfo * idrinfo = tn -> idrinfo ;
825828 struct tc_action * p ;
826829 int ret ;
830+ u32 max ;
827831
828- again :
829- mutex_lock (& idrinfo -> lock );
830832 if (* index ) {
833+ again :
834+ rcu_read_lock ();
831835 p = idr_find (& idrinfo -> action_idr , * index );
836+
832837 if (IS_ERR (p )) {
833838 /* This means that another process allocated
834839 * index but did not assign the pointer yet.
835840 */
836- mutex_unlock ( & idrinfo -> lock );
841+ rcu_read_unlock ( );
837842 goto again ;
838843 }
839844
840- if (p ) {
841- refcount_inc (& p -> tcfa_refcnt );
842- if (bind )
843- atomic_inc (& p -> tcfa_bindcnt );
844- * a = p ;
845- ret = 1 ;
846- } else {
847- * a = NULL ;
848- ret = idr_alloc_u32 (& idrinfo -> action_idr , NULL , index ,
849- * index , GFP_KERNEL );
850- if (!ret )
851- idr_replace (& idrinfo -> action_idr ,
852- ERR_PTR (- EBUSY ), * index );
845+ if (!p ) {
846+ /* Empty slot, try to allocate it */
847+ max = * index ;
848+ rcu_read_unlock ();
849+ goto new ;
853850 }
851+
852+ if (!refcount_inc_not_zero (& p -> tcfa_refcnt )) {
853+ /* Action was deleted in parallel */
854+ rcu_read_unlock ();
855+ return - EAGAIN ;
856+ }
857+
858+ if (bind )
859+ atomic_inc (& p -> tcfa_bindcnt );
860+ * a = p ;
861+
862+ rcu_read_unlock ();
863+
864+ return 1 ;
854865 } else {
866+ /* Find a slot */
855867 * index = 1 ;
856- * a = NULL ;
857- ret = idr_alloc_u32 (& idrinfo -> action_idr , NULL , index ,
858- UINT_MAX , GFP_KERNEL );
859- if (!ret )
860- idr_replace (& idrinfo -> action_idr , ERR_PTR (- EBUSY ),
861- * index );
868+ max = UINT_MAX ;
862869 }
870+
871+ new :
872+ * a = NULL ;
873+
874+ mutex_lock (& idrinfo -> lock );
875+ ret = idr_alloc_u32 (& idrinfo -> action_idr , ERR_PTR (- EBUSY ), index , max ,
876+ GFP_KERNEL );
863877 mutex_unlock (& idrinfo -> lock );
878+
879+ /* N binds raced for action allocation,
880+ * retry for all the ones that failed.
881+ */
882+ if (ret == - ENOSPC && * index == max )
883+ ret = - EAGAIN ;
884+
864885 return ret ;
865886}
866887EXPORT_SYMBOL (tcf_idr_check_alloc );
@@ -1283,19 +1304,20 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
12831304 [TCA_ACT_HW_STATS ] = NLA_POLICY_BITFIELD32 (TCA_ACT_HW_STATS_ANY ),
12841305};
12851306
1286- void tcf_idr_insert_many (struct tc_action * actions [])
1307+ void tcf_idr_insert_many (struct tc_action * actions [], int init_res [] )
12871308{
12881309 struct tc_action * a ;
12891310 int i ;
12901311
12911312 tcf_act_for_each_action (i , a , actions ) {
12921313 struct tcf_idrinfo * idrinfo ;
12931314
1315+ if (init_res [i ] == 0 ) /* Bound */
1316+ continue ;
1317+
12941318 idrinfo = a -> idrinfo ;
12951319 mutex_lock (& idrinfo -> lock );
1296- /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
1297- * it is just created, otherwise this is just a nop.
1298- */
1320+ /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
12991321 idr_replace (& idrinfo -> action_idr , a , a -> tcfa_index );
13001322 mutex_unlock (& idrinfo -> lock );
13011323 }
@@ -1495,7 +1517,7 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
14951517 /* We have to commit them all together, because if any error happened in
14961518 * between, we could not handle the failure gracefully.
14971519 */
1498- tcf_idr_insert_many (actions );
1520+ tcf_idr_insert_many (actions , init_res );
14991521
15001522 * attr_size = tcf_action_full_attrs_size (sz );
15011523 err = i - 1 ;
0 commit comments