@@ -53,12 +53,13 @@ struct clusterip_config {
5353#endif
5454 enum clusterip_hashmode hash_mode ; /* which hashing mode */
5555 u_int32_t hash_initval ; /* hash initialization */
56+ struct rcu_head rcu ;
5657};
5758
5859static LIST_HEAD (clusterip_configs );
5960
6061/* clusterip_lock protects the clusterip_configs list */
61- static DEFINE_RWLOCK (clusterip_lock );
62+ static DEFINE_SPINLOCK (clusterip_lock );
6263
6364#ifdef CONFIG_PROC_FS
6465static const struct file_operations clusterip_proc_fops ;
@@ -71,11 +72,17 @@ clusterip_config_get(struct clusterip_config *c)
7172 atomic_inc (& c -> refcount );
7273}
7374
75+
76+ static void clusterip_config_rcu_free (struct rcu_head * head )
77+ {
78+ kfree (container_of (head , struct clusterip_config , rcu ));
79+ }
80+
7481static inline void
7582clusterip_config_put (struct clusterip_config * c )
7683{
7784 if (atomic_dec_and_test (& c -> refcount ))
78- kfree ( c );
85+ call_rcu_bh ( & c -> rcu , clusterip_config_rcu_free );
7986}
8087
8188/* decrease the count of entries using/referencing this config. If last
@@ -84,10 +91,11 @@ clusterip_config_put(struct clusterip_config *c)
8491static inline void
8592clusterip_config_entry_put (struct clusterip_config * c )
8693{
87- write_lock_bh (& clusterip_lock );
88- if (atomic_dec_and_test (& c -> entries )) {
89- list_del (& c -> list );
90- write_unlock_bh (& clusterip_lock );
94+ local_bh_disable ();
95+ if (atomic_dec_and_lock (& c -> entries , & clusterip_lock )) {
96+ list_del_rcu (& c -> list );
97+ spin_unlock (& clusterip_lock );
98+ local_bh_enable ();
9199
92100 dev_mc_del (c -> dev , c -> clustermac );
93101 dev_put (c -> dev );
@@ -100,15 +108,15 @@ clusterip_config_entry_put(struct clusterip_config *c)
100108#endif
101109 return ;
102110 }
103- write_unlock_bh ( & clusterip_lock );
111+ local_bh_enable ( );
104112}
105113
106114static struct clusterip_config *
107115__clusterip_config_find (__be32 clusterip )
108116{
109117 struct clusterip_config * c ;
110118
111- list_for_each_entry (c , & clusterip_configs , list ) {
119+ list_for_each_entry_rcu (c , & clusterip_configs , list ) {
112120 if (c -> clusterip == clusterip )
113121 return c ;
114122 }
@@ -121,16 +129,15 @@ clusterip_config_find_get(__be32 clusterip, int entry)
121129{
122130 struct clusterip_config * c ;
123131
124- read_lock_bh ( & clusterip_lock );
132+ rcu_read_lock_bh ( );
125133 c = __clusterip_config_find (clusterip );
126- if (!c ) {
127- read_unlock_bh (& clusterip_lock );
128- return NULL ;
134+ if (c ) {
135+ if (unlikely (!atomic_inc_not_zero (& c -> refcount )))
136+ c = NULL ;
137+ else if (entry )
138+ atomic_inc (& c -> entries );
129139 }
130- atomic_inc (& c -> refcount );
131- if (entry )
132- atomic_inc (& c -> entries );
133- read_unlock_bh (& clusterip_lock );
140+ rcu_read_unlock_bh ();
134141
135142 return c ;
136143}
@@ -181,9 +188,9 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
181188 }
182189#endif
183190
184- write_lock_bh (& clusterip_lock );
185- list_add (& c -> list , & clusterip_configs );
186- write_unlock_bh (& clusterip_lock );
191+ spin_lock_bh (& clusterip_lock );
192+ list_add_rcu (& c -> list , & clusterip_configs );
193+ spin_unlock_bh (& clusterip_lock );
187194
188195 return c ;
189196}
@@ -733,6 +740,9 @@ static void __exit clusterip_tg_exit(void)
733740#endif
734741 nf_unregister_hook (& cip_arp_ops );
735742 xt_unregister_target (& clusterip_tg_reg );
743+
744+ /* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */
745+ rcu_barrier_bh ();
736746}
737747
738748module_init (clusterip_tg_init );
0 commit comments