Skip to content

Commit bed1be2

Browse files
Eric Dumazetkaber
authored andcommitted
netfilter: nfnetlink_log: RCU conversion
- instances_lock becomes a spinlock - lockless lookups While nfnetlink_log probably not performance critical, using less rwlocks in our code is always welcomed... Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
1 parent c463ac9 commit bed1be2

File tree

1 file changed

+27
-22
lines changed

1 file changed

+27
-22
lines changed

net/netfilter/nfnetlink_log.c

Lines changed: 27 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,10 @@ struct nfulnl_instance {
6666
u_int16_t group_num; /* number of this queue */
6767
u_int16_t flags;
6868
u_int8_t copy_mode;
69+
struct rcu_head rcu;
6970
};
7071

71-
static DEFINE_RWLOCK(instances_lock);
72+
static DEFINE_SPINLOCK(instances_lock);
7273
static atomic_t global_seq;
7374

7475
#define INSTANCE_BUCKETS 16
@@ -88,7 +89,7 @@ __instance_lookup(u_int16_t group_num)
8889
struct nfulnl_instance *inst;
8990

9091
head = &instance_table[instance_hashfn(group_num)];
91-
hlist_for_each_entry(inst, pos, head, hlist) {
92+
hlist_for_each_entry_rcu(inst, pos, head, hlist) {
9293
if (inst->group_num == group_num)
9394
return inst;
9495
}
@@ -106,22 +107,26 @@ instance_lookup_get(u_int16_t group_num)
106107
{
107108
struct nfulnl_instance *inst;
108109

109-
read_lock_bh(&instances_lock);
110+
rcu_read_lock_bh();
110111
inst = __instance_lookup(group_num);
111112
if (inst)
112113
instance_get(inst);
113-
read_unlock_bh(&instances_lock);
114+
rcu_read_unlock_bh();
114115

115116
return inst;
116117
}
117118

119+
static void nfulnl_instance_free_rcu(struct rcu_head *head)
120+
{
121+
kfree(container_of(head, struct nfulnl_instance, rcu));
122+
module_put(THIS_MODULE);
123+
}
124+
118125
static void
119126
instance_put(struct nfulnl_instance *inst)
120127
{
121-
if (inst && atomic_dec_and_test(&inst->use)) {
122-
kfree(inst);
123-
module_put(THIS_MODULE);
124-
}
128+
if (inst && atomic_dec_and_test(&inst->use))
129+
call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
125130
}
126131

127132
static void nfulnl_timer(unsigned long data);
@@ -132,7 +137,7 @@ instance_create(u_int16_t group_num, int pid)
132137
struct nfulnl_instance *inst;
133138
int err;
134139

135-
write_lock_bh(&instances_lock);
140+
spin_lock_bh(&instances_lock);
136141
if (__instance_lookup(group_num)) {
137142
err = -EEXIST;
138143
goto out_unlock;
@@ -169,12 +174,12 @@ instance_create(u_int16_t group_num, int pid)
169174
hlist_add_head(&inst->hlist,
170175
&instance_table[instance_hashfn(group_num)]);
171176

172-
write_unlock_bh(&instances_lock);
177+
spin_unlock_bh(&instances_lock);
173178

174179
return inst;
175180

176181
out_unlock:
177-
write_unlock_bh(&instances_lock);
182+
spin_unlock_bh(&instances_lock);
178183
return ERR_PTR(err);
179184
}
180185

@@ -200,9 +205,9 @@ __instance_destroy(struct nfulnl_instance *inst)
200205
static inline void
201206
instance_destroy(struct nfulnl_instance *inst)
202207
{
203-
write_lock_bh(&instances_lock);
208+
spin_lock_bh(&instances_lock);
204209
__instance_destroy(inst);
205-
write_unlock_bh(&instances_lock);
210+
spin_unlock_bh(&instances_lock);
206211
}
207212

208213
static int
@@ -672,7 +677,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
672677
int i;
673678

674679
/* destroy all instances for this pid */
675-
write_lock_bh(&instances_lock);
680+
spin_lock_bh(&instances_lock);
676681
for (i = 0; i < INSTANCE_BUCKETS; i++) {
677682
struct hlist_node *tmp, *t2;
678683
struct nfulnl_instance *inst;
@@ -684,7 +689,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
684689
__instance_destroy(inst);
685690
}
686691
}
687-
write_unlock_bh(&instances_lock);
692+
spin_unlock_bh(&instances_lock);
688693
}
689694
return NOTIFY_DONE;
690695
}
@@ -861,19 +866,19 @@ static struct hlist_node *get_first(struct iter_state *st)
861866

862867
for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
863868
if (!hlist_empty(&instance_table[st->bucket]))
864-
return instance_table[st->bucket].first;
869+
return rcu_dereference_bh(instance_table[st->bucket].first);
865870
}
866871
return NULL;
867872
}
868873

869874
static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
870875
{
871-
h = h->next;
876+
h = rcu_dereference_bh(h->next);
872877
while (!h) {
873878
if (++st->bucket >= INSTANCE_BUCKETS)
874879
return NULL;
875880

876-
h = instance_table[st->bucket].first;
881+
h = rcu_dereference_bh(instance_table[st->bucket].first);
877882
}
878883
return h;
879884
}
@@ -890,9 +895,9 @@ static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
890895
}
891896

892897
static void *seq_start(struct seq_file *seq, loff_t *pos)
893-
__acquires(instances_lock)
898+
__acquires(rcu_bh)
894899
{
895-
read_lock_bh(&instances_lock);
900+
rcu_read_lock_bh();
896901
return get_idx(seq->private, *pos);
897902
}
898903

@@ -903,9 +908,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
903908
}
904909

905910
static void seq_stop(struct seq_file *s, void *v)
906-
__releases(instances_lock)
911+
__releases(rcu_bh)
907912
{
908-
read_unlock_bh(&instances_lock);
913+
rcu_read_unlock_bh();
909914
}
910915

911916
static int seq_show(struct seq_file *s, void *v)

0 commit comments

Comments
 (0)