From bce037ab7e73b8d2eaee1e79eb32f4bec9533f75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcin=20Wcis=C5=82o?= Date: Mon, 13 Oct 2025 23:30:48 +0200 Subject: [PATCH 1/2] tls: fix race between tx work scheduling and socket close MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit jira VULN-8187 cve CVE-2024-26585 commit-author Jakub Kicinski commit e01e3934a1b2d122919f73bc6ddbe1cdafc4bbdb upstream-diff No actual difference from the upstream patch, but required manual conflicts resolution due to differences in neighbouring code Similarly to previous commit, the submitting thread (recvmsg/sendmsg) may exit as soon as the async crypto handler calls complete(). Reorder scheduling the work before calling complete(). This seems more logical in the first place, as it's the inverse order of what the submitting thread will do. Reported-by: valis Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance") Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Reviewed-by: Sabrina Dubroca Signed-off-by: David S. Miller (cherry picked from commit e01e3934a1b2d122919f73bc6ddbe1cdafc4bbdb) Signed-off-by: Marcin Wcisło --- net/tls/tls_sw.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 35cd4f1124622..0374d86302bbd 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -427,7 +427,6 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) struct scatterlist *sge; struct sk_msg *msg_en; struct tls_rec *rec; - bool ready = false; int pending; rec = container_of(aead_req, struct tls_rec, aead_req); @@ -459,8 +458,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) /* If received record is at head of tx_list, schedule tx */ first_rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); - if (rec == first_rec) - ready = true; + if (rec == first_rec) { + /* Schedule the transmission */ + if (!test_and_set_bit(BIT_TX_SCHEDULED, + &ctx->tx_bitmask)) + schedule_delayed_work(&ctx->tx_work.work, 1); + } } spin_lock_bh(&ctx->encrypt_compl_lock); @@ -469,13 +472,6 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) if (!pending && ctx->async_notify) complete(&ctx->async_wait.completion); spin_unlock_bh(&ctx->encrypt_compl_lock); - - if (!ready) - return; - - /* Schedule the transmission */ - if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) - schedule_delayed_work(&ctx->tx_work.work, 1); } static int tls_do_encryption(struct sock *sk, From 16e1adf2da60fbaff6dfb6b71b126401b7ed8486 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcin=20Wcis=C5=82o?= Date: Tue, 14 Oct 2025 01:14:13 +0200 Subject: [PATCH 2/2] netfilter: nft_limit: reject configurations that cause integer overflow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit jira VULN-8197 cve CVE-2024-26668 commit-author Florian Westphal commit c9d9eb9c53d37cdebbad56b91e40baf42d5a97aa upstream-diff Used `limit' struct instead of `priv' because of missing 369b6cb5d391750fc01ce951c2500281d2975705. Also added casts to `u64' where appropriate as inspired by the RH's LTS 9.4 backport of this patch embedded in 270e20bbcd9bb76345ba0cb966a1a070960bffb9. Reject bogus configs where internal token counter wraps around. This only occurs with very very large requests, such as 17gbyte/s. Its better to reject this rather than having incorrect ratelimit. Fixes: d2168e849ebf ("netfilter: nft_limit: add per-byte limiting") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso (cherry picked from commit c9d9eb9c53d37cdebbad56b91e40baf42d5a97aa) Signed-off-by: Marcin Wcisło --- net/netfilter/nft_limit.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c index 82ec27bdf9412..c626dc10df78e 100644 --- a/net/netfilter/nft_limit.c +++ b/net/netfilter/nft_limit.c @@ -54,16 +54,18 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost) static int nft_limit_init(struct nft_limit *limit, const struct nlattr * const tb[], bool pkts) { - u64 unit, tokens; + u64 unit, tokens, rate_with_burst; if (tb[NFTA_LIMIT_RATE] == NULL || tb[NFTA_LIMIT_UNIT] == NULL) return -EINVAL; limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE])); + if (limit->rate == 0) + return -EINVAL; + unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT])); - limit->nsecs = unit * NSEC_PER_SEC; - if (limit->rate == 0 || limit->nsecs < unit) + if (check_mul_overflow(unit, (u64)NSEC_PER_SEC, &limit->nsecs)) return -EOVERFLOW; if (tb[NFTA_LIMIT_BURST]) @@ -72,18 +74,25 @@ static int nft_limit_init(struct nft_limit *limit, if (pkts && limit->burst == 0) limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT; - if (limit->rate + limit->burst < limit->rate) + if (check_add_overflow(limit->rate, (u64)limit->burst, &rate_with_burst)) return -EOVERFLOW; if (pkts) { - tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst; + u64 tmp = div64_u64(limit->nsecs, limit->rate); + + if (check_mul_overflow(tmp, (u64)limit->burst, &tokens)) + return -EOVERFLOW; } else { + u64 tmp; + /* The token bucket size limits the number of tokens can be * accumulated. tokens_max specifies the bucket size. * tokens_max = unit * (rate + burst) / rate. */ - tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst), - limit->rate); + if (check_mul_overflow(limit->nsecs, rate_with_burst, &tmp)) + return -EOVERFLOW; + + tokens = div64_u64(tmp, limit->rate); } limit->tokens = tokens;