Skip to content

Commit fff1f30

Browse files
edumazetdavem330
authored andcommitted
tcp: add a spinlock to protect struct request_sock_queue
struct request_sock_queue fields are currently protected by the listener 'lock' (not a real spinlock) We need to add a private spinlock instead, so that softirq handlers creating children do not have to worry with backlog notion that the listener 'lock' carries. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent f6d3125 commit fff1f30

File tree

3 files changed

+26
-33
lines changed

3 files changed

+26
-33
lines changed

include/net/request_sock.h

Lines changed: 18 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -176,9 +176,11 @@ struct fastopen_queue {
176176
*
177177
*/
178178
struct request_sock_queue {
179+
spinlock_t rskq_lock;
180+
u8 rskq_defer_accept;
181+
179182
struct request_sock *rskq_accept_head;
180183
struct request_sock *rskq_accept_tail;
181-
u8 rskq_defer_accept;
182184
struct listen_sock *listen_opt;
183185
struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
184186
* if TFO is enabled.
@@ -196,16 +198,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue);
196198
void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
197199
bool reset);
198200

199-
static inline struct request_sock *
200-
reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
201-
{
202-
struct request_sock *req = queue->rskq_accept_head;
203-
204-
queue->rskq_accept_head = NULL;
205-
return req;
206-
}
207-
208-
static inline int reqsk_queue_empty(struct request_sock_queue *queue)
201+
static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
209202
{
210203
return queue->rskq_accept_head == NULL;
211204
}
@@ -215,6 +208,7 @@ static inline void reqsk_queue_add(struct request_sock_queue *queue,
215208
struct sock *parent,
216209
struct sock *child)
217210
{
211+
spin_lock(&queue->rskq_lock);
218212
req->sk = child;
219213
sk_acceptq_added(parent);
220214

@@ -225,18 +219,23 @@ static inline void reqsk_queue_add(struct request_sock_queue *queue,
225219

226220
queue->rskq_accept_tail = req;
227221
req->dl_next = NULL;
222+
spin_unlock(&queue->rskq_lock);
228223
}
229224

230-
static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
225+
static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
226+
struct sock *parent)
231227
{
232-
struct request_sock *req = queue->rskq_accept_head;
233-
234-
WARN_ON(req == NULL);
235-
236-
queue->rskq_accept_head = req->dl_next;
237-
if (queue->rskq_accept_head == NULL)
238-
queue->rskq_accept_tail = NULL;
228+
struct request_sock *req;
239229

230+
spin_lock_bh(&queue->rskq_lock);
231+
req = queue->rskq_accept_head;
232+
if (req) {
233+
sk_acceptq_removed(parent);
234+
queue->rskq_accept_head = req->dl_next;
235+
if (queue->rskq_accept_head == NULL)
236+
queue->rskq_accept_tail = NULL;
237+
}
238+
spin_unlock_bh(&queue->rskq_lock);
240239
return req;
241240
}
242241

net/core/request_sock.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
5858
return -ENOMEM;
5959

6060
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
61+
spin_lock_init(&queue->rskq_lock);
6162
spin_lock_init(&queue->syn_wait_lock);
6263

6364
spin_lock_init(&queue->fastopenq.lock);

net/ipv4/inet_connection_sock.c

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -330,10 +330,9 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
330330
if (error)
331331
goto out_err;
332332
}
333-
req = reqsk_queue_remove(queue);
333+
req = reqsk_queue_remove(queue, sk);
334334
newsk = req->sk;
335335

336-
sk_acceptq_removed(sk);
337336
if (sk->sk_protocol == IPPROTO_TCP &&
338337
tcp_rsk(req)->tfo_listener) {
339338
spin_lock_bh(&queue->fastopenq.lock);
@@ -832,11 +831,7 @@ void inet_csk_listen_stop(struct sock *sk)
832831
{
833832
struct inet_connection_sock *icsk = inet_csk(sk);
834833
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
835-
struct request_sock *acc_req;
836-
struct request_sock *req;
837-
838-
/* make all the listen_opt local to us */
839-
acc_req = reqsk_queue_yank_acceptq(queue);
834+
struct request_sock *next, *req;
840835

841836
/* Following specs, it would be better either to send FIN
842837
* (and enter FIN-WAIT-1, it is normal close)
@@ -848,11 +843,9 @@ void inet_csk_listen_stop(struct sock *sk)
848843
*/
849844
reqsk_queue_destroy(queue);
850845

851-
while ((req = acc_req) != NULL) {
846+
while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
852847
struct sock *child = req->sk;
853848

854-
acc_req = req->dl_next;
855-
856849
local_bh_disable();
857850
bh_lock_sock(child);
858851
WARN_ON(sock_owned_by_user(child));
@@ -882,18 +875,18 @@ void inet_csk_listen_stop(struct sock *sk)
882875
local_bh_enable();
883876
sock_put(child);
884877

885-
sk_acceptq_removed(sk);
886878
reqsk_put(req);
887879
}
888880
if (queue->fastopenq.rskq_rst_head) {
889881
/* Free all the reqs queued in rskq_rst_head. */
890882
spin_lock_bh(&queue->fastopenq.lock);
891-
acc_req = queue->fastopenq.rskq_rst_head;
883+
req = queue->fastopenq.rskq_rst_head;
892884
queue->fastopenq.rskq_rst_head = NULL;
893885
spin_unlock_bh(&queue->fastopenq.lock);
894-
while ((req = acc_req) != NULL) {
895-
acc_req = req->dl_next;
886+
while (req != NULL) {
887+
next = req->dl_next;
896888
reqsk_put(req);
889+
req = next;
897890
}
898891
}
899892
WARN_ON(sk->sk_ack_backlog);

0 commit comments

Comments
 (0)