Skip to content

Commit aec7961

Browse files
kuba-moodavem330
authored andcommitted
tls: fix race between async notify and socket close
The submitting thread (one which called recvmsg/sendmsg) may exit as soon as the async crypto handler calls complete() so any code past that point risks touching already freed data. Try to avoid the locking and extra flags altogether. Have the main thread hold an extra reference, this way we can depend solely on the atomic ref counter for synchronization. Don't futz with reiniting the completion, either, we are now tightly controlling when completion fires. Reported-by: valis <sec@valis.email> Fixes: 0cada33 ("net/tls: fix race condition causing kernel panic") Signed-off-by: Jakub Kicinski <kuba@kernel.org> Reviewed-by: Simon Horman <horms@kernel.org> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Sabrina Dubroca <sd@queasysnail.net> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent c57ca51 commit aec7961

File tree

2 files changed

+10
-38
lines changed

2 files changed

+10
-38
lines changed

include/net/tls.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -97,9 +97,6 @@ struct tls_sw_context_tx {
9797
struct tls_rec *open_rec;
9898
struct list_head tx_list;
9999
atomic_t encrypt_pending;
100-
/* protect crypto_wait with encrypt_pending */
101-
spinlock_t encrypt_compl_lock;
102-
int async_notify;
103100
u8 async_capable:1;
104101

105102
#define BIT_TX_SCHEDULED 0
@@ -136,8 +133,6 @@ struct tls_sw_context_rx {
136133
struct tls_strparser strp;
137134

138135
atomic_t decrypt_pending;
139-
/* protect crypto_wait with decrypt_pending*/
140-
spinlock_t decrypt_compl_lock;
141136
struct sk_buff_head async_hold;
142137
struct wait_queue_head wq;
143138
};

net/tls/tls_sw.c

Lines changed: 10 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -224,22 +224,15 @@ static void tls_decrypt_done(void *data, int err)
224224

225225
kfree(aead_req);
226226

227-
spin_lock_bh(&ctx->decrypt_compl_lock);
228-
if (!atomic_dec_return(&ctx->decrypt_pending))
227+
if (atomic_dec_and_test(&ctx->decrypt_pending))
229228
complete(&ctx->async_wait.completion);
230-
spin_unlock_bh(&ctx->decrypt_compl_lock);
231229
}
232230

233231
static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
234232
{
235-
int pending;
236-
237-
spin_lock_bh(&ctx->decrypt_compl_lock);
238-
reinit_completion(&ctx->async_wait.completion);
239-
pending = atomic_read(&ctx->decrypt_pending);
240-
spin_unlock_bh(&ctx->decrypt_compl_lock);
241-
if (pending)
233+
if (!atomic_dec_and_test(&ctx->decrypt_pending))
242234
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
235+
atomic_inc(&ctx->decrypt_pending);
243236

244237
return ctx->async_wait.err;
245238
}
@@ -267,6 +260,7 @@ static int tls_do_decryption(struct sock *sk,
267260
aead_request_set_callback(aead_req,
268261
CRYPTO_TFM_REQ_MAY_BACKLOG,
269262
tls_decrypt_done, aead_req);
263+
DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
270264
atomic_inc(&ctx->decrypt_pending);
271265
} else {
272266
aead_request_set_callback(aead_req,
@@ -455,7 +449,6 @@ static void tls_encrypt_done(void *data, int err)
455449
struct sk_msg *msg_en;
456450
bool ready = false;
457451
struct sock *sk;
458-
int pending;
459452

460453
msg_en = &rec->msg_encrypted;
461454

@@ -494,12 +487,8 @@ static void tls_encrypt_done(void *data, int err)
494487
ready = true;
495488
}
496489

497-
spin_lock_bh(&ctx->encrypt_compl_lock);
498-
pending = atomic_dec_return(&ctx->encrypt_pending);
499-
500-
if (!pending && ctx->async_notify)
490+
if (atomic_dec_and_test(&ctx->encrypt_pending))
501491
complete(&ctx->async_wait.completion);
502-
spin_unlock_bh(&ctx->encrypt_compl_lock);
503492

504493
if (!ready)
505494
return;
@@ -511,22 +500,9 @@ static void tls_encrypt_done(void *data, int err)
511500

512501
static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
513502
{
514-
int pending;
515-
516-
spin_lock_bh(&ctx->encrypt_compl_lock);
517-
ctx->async_notify = true;
518-
519-
pending = atomic_read(&ctx->encrypt_pending);
520-
spin_unlock_bh(&ctx->encrypt_compl_lock);
521-
if (pending)
503+
if (!atomic_dec_and_test(&ctx->encrypt_pending))
522504
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
523-
else
524-
reinit_completion(&ctx->async_wait.completion);
525-
526-
/* There can be no concurrent accesses, since we have no
527-
* pending encrypt operations
528-
*/
529-
WRITE_ONCE(ctx->async_notify, false);
505+
atomic_inc(&ctx->encrypt_pending);
530506

531507
return ctx->async_wait.err;
532508
}
@@ -577,6 +553,7 @@ static int tls_do_encryption(struct sock *sk,
577553

578554
/* Add the record in tx_list */
579555
list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
556+
DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
580557
atomic_inc(&ctx->encrypt_pending);
581558

582559
rc = crypto_aead_encrypt(aead_req);
@@ -2601,7 +2578,7 @@ static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct soc
26012578
}
26022579

26032580
crypto_init_wait(&sw_ctx_tx->async_wait);
2604-
spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
2581+
atomic_set(&sw_ctx_tx->encrypt_pending, 1);
26052582
INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
26062583
INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
26072584
sw_ctx_tx->tx_work.sk = sk;
@@ -2622,7 +2599,7 @@ static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
26222599
}
26232600

26242601
crypto_init_wait(&sw_ctx_rx->async_wait);
2625-
spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
2602+
atomic_set(&sw_ctx_rx->decrypt_pending, 1);
26262603
init_waitqueue_head(&sw_ctx_rx->wq);
26272604
skb_queue_head_init(&sw_ctx_rx->rx_list);
26282605
skb_queue_head_init(&sw_ctx_rx->async_hold);

0 commit comments

Comments
 (0)