Skip to content

Commit 427fff9

Browse files
ebiggerskuba-moo
authored andcommitted
nvme-tcp: use crc32c() and skb_copy_and_crc32c_datagram_iter()
Now that the crc32c() library function directly takes advantage of architecture-specific optimizations and there also now exists a function skb_copy_and_crc32c_datagram_iter(), it is unnecessary to go through the crypto_ahash API. Just use those functions. This is much simpler, and it also improves performance due to eliminating the crypto API overhead. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Eric Biggers <ebiggers@google.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Link: https://patch.msgid.link/20250519175012.36581-10-ebiggers@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent ea6342d commit 427fff9

File tree

2 files changed

+42
-86
lines changed

2 files changed

+42
-86
lines changed

drivers/nvme/host/Kconfig

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,9 @@ config NVME_TCP
8484
tristate "NVM Express over Fabrics TCP host driver"
8585
depends on INET
8686
depends on BLOCK
87+
select CRC32
88+
select NET_CRC32C
8789
select NVME_FABRICS
88-
select CRYPTO
89-
select CRYPTO_CRC32C
9090
help
9191
This provides support for the NVMe over Fabrics protocol using
9292
the TCP transport. This allows you to use remote block devices

drivers/nvme/host/tcp.c

Lines changed: 40 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <linux/init.h>
99
#include <linux/slab.h>
1010
#include <linux/err.h>
11+
#include <linux/crc32.h>
1112
#include <linux/nvme-tcp.h>
1213
#include <linux/nvme-keyring.h>
1314
#include <net/sock.h>
@@ -16,7 +17,6 @@
1617
#include <net/tls_prot.h>
1718
#include <net/handshake.h>
1819
#include <linux/blk-mq.h>
19-
#include <crypto/hash.h>
2020
#include <net/busy_poll.h>
2121
#include <trace/events/sock.h>
2222

@@ -168,8 +168,8 @@ struct nvme_tcp_queue {
168168
bool hdr_digest;
169169
bool data_digest;
170170
bool tls_enabled;
171-
struct ahash_request *rcv_hash;
172-
struct ahash_request *snd_hash;
171+
u32 rcv_crc;
172+
u32 snd_crc;
173173
__le32 exp_ddgst;
174174
__le32 recv_ddgst;
175175
struct completion tls_complete;
@@ -456,32 +456,38 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
456456
return req;
457457
}
458458

459-
static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
460-
__le32 *dgst)
459+
#define NVME_TCP_CRC_SEED (~0)
460+
461+
static inline void nvme_tcp_ddgst_update(u32 *crcp,
462+
struct page *page, size_t off, size_t len)
461463
{
462-
ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
463-
crypto_ahash_final(hash);
464+
page += off / PAGE_SIZE;
465+
off %= PAGE_SIZE;
466+
while (len) {
467+
const void *vaddr = kmap_local_page(page);
468+
size_t n = min(len, (size_t)PAGE_SIZE - off);
469+
470+
*crcp = crc32c(*crcp, vaddr + off, n);
471+
kunmap_local(vaddr);
472+
page++;
473+
off = 0;
474+
len -= n;
475+
}
464476
}
465477

466-
static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
467-
struct page *page, off_t off, size_t len)
478+
static inline __le32 nvme_tcp_ddgst_final(u32 crc)
468479
{
469-
struct scatterlist sg;
470-
471-
sg_init_table(&sg, 1);
472-
sg_set_page(&sg, page, len, off);
473-
ahash_request_set_crypt(hash, &sg, NULL, len);
474-
crypto_ahash_update(hash);
480+
return cpu_to_le32(~crc);
475481
}
476482

477-
static inline void nvme_tcp_hdgst(struct ahash_request *hash,
478-
void *pdu, size_t len)
483+
static inline __le32 nvme_tcp_hdgst(const void *pdu, size_t len)
479484
{
480-
struct scatterlist sg;
485+
return cpu_to_le32(~crc32c(NVME_TCP_CRC_SEED, pdu, len));
486+
}
481487

482-
sg_init_one(&sg, pdu, len);
483-
ahash_request_set_crypt(hash, &sg, pdu + len, len);
484-
crypto_ahash_digest(hash);
488+
static inline void nvme_tcp_set_hdgst(void *pdu, size_t len)
489+
{
490+
*(__le32 *)(pdu + len) = nvme_tcp_hdgst(pdu, len);
485491
}
486492

487493
static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
@@ -499,8 +505,7 @@ static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
499505
}
500506

501507
recv_digest = *(__le32 *)(pdu + hdr->hlen);
502-
nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
503-
exp_digest = *(__le32 *)(pdu + hdr->hlen);
508+
exp_digest = nvme_tcp_hdgst(pdu, pdu_len);
504509
if (recv_digest != exp_digest) {
505510
dev_err(queue->ctrl->ctrl.device,
506511
"header digest error: recv %#x expected %#x\n",
@@ -526,7 +531,7 @@ static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
526531
nvme_tcp_queue_id(queue));
527532
return -EPROTO;
528533
}
529-
crypto_ahash_init(queue->rcv_hash);
534+
queue->rcv_crc = NVME_TCP_CRC_SEED;
530535

531536
return 0;
532537
}
@@ -926,8 +931,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
926931
iov_iter_count(&req->iter));
927932

928933
if (queue->data_digest)
929-
ret = skb_copy_and_hash_datagram_iter(skb, *offset,
930-
&req->iter, recv_len, queue->rcv_hash);
934+
ret = skb_copy_and_crc32c_datagram_iter(skb, *offset,
935+
&req->iter, recv_len, &queue->rcv_crc);
931936
else
932937
ret = skb_copy_datagram_iter(skb, *offset,
933938
&req->iter, recv_len);
@@ -945,7 +950,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
945950

946951
if (!queue->data_remaining) {
947952
if (queue->data_digest) {
948-
nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
953+
queue->exp_ddgst = nvme_tcp_ddgst_final(queue->rcv_crc);
949954
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
950955
} else {
951956
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
@@ -1147,7 +1152,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
11471152
return ret;
11481153

11491154
if (queue->data_digest)
1150-
nvme_tcp_ddgst_update(queue->snd_hash, page,
1155+
nvme_tcp_ddgst_update(&queue->snd_crc, page,
11511156
offset, ret);
11521157

11531158
/*
@@ -1161,8 +1166,8 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
11611166
/* fully successful last send in current PDU */
11621167
if (last && ret == len) {
11631168
if (queue->data_digest) {
1164-
nvme_tcp_ddgst_final(queue->snd_hash,
1165-
&req->ddgst);
1169+
req->ddgst =
1170+
nvme_tcp_ddgst_final(queue->snd_crc);
11661171
req->state = NVME_TCP_SEND_DDGST;
11671172
req->offset = 0;
11681173
} else {
@@ -1194,7 +1199,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
11941199
msg.msg_flags |= MSG_EOR;
11951200

11961201
if (queue->hdr_digest && !req->offset)
1197-
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1202+
nvme_tcp_set_hdgst(pdu, sizeof(*pdu));
11981203

11991204
bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
12001205
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
@@ -1207,7 +1212,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
12071212
if (inline_data) {
12081213
req->state = NVME_TCP_SEND_DATA;
12091214
if (queue->data_digest)
1210-
crypto_ahash_init(queue->snd_hash);
1215+
queue->snd_crc = NVME_TCP_CRC_SEED;
12111216
} else {
12121217
nvme_tcp_done_send_req(queue);
12131218
}
@@ -1229,7 +1234,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
12291234
int ret;
12301235

12311236
if (queue->hdr_digest && !req->offset)
1232-
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1237+
nvme_tcp_set_hdgst(pdu, sizeof(*pdu));
12331238

12341239
if (!req->h2cdata_left)
12351240
msg.msg_flags |= MSG_SPLICE_PAGES;
@@ -1244,7 +1249,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
12441249
if (!len) {
12451250
req->state = NVME_TCP_SEND_DATA;
12461251
if (queue->data_digest)
1247-
crypto_ahash_init(queue->snd_hash);
1252+
queue->snd_crc = NVME_TCP_CRC_SEED;
12481253
return 1;
12491254
}
12501255
req->offset += ret;
@@ -1384,41 +1389,6 @@ static void nvme_tcp_io_work(struct work_struct *w)
13841389
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
13851390
}
13861391

1387-
static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1388-
{
1389-
struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1390-
1391-
ahash_request_free(queue->rcv_hash);
1392-
ahash_request_free(queue->snd_hash);
1393-
crypto_free_ahash(tfm);
1394-
}
1395-
1396-
static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1397-
{
1398-
struct crypto_ahash *tfm;
1399-
1400-
tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1401-
if (IS_ERR(tfm))
1402-
return PTR_ERR(tfm);
1403-
1404-
queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1405-
if (!queue->snd_hash)
1406-
goto free_tfm;
1407-
ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1408-
1409-
queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1410-
if (!queue->rcv_hash)
1411-
goto free_snd_hash;
1412-
ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1413-
1414-
return 0;
1415-
free_snd_hash:
1416-
ahash_request_free(queue->snd_hash);
1417-
free_tfm:
1418-
crypto_free_ahash(tfm);
1419-
return -ENOMEM;
1420-
}
1421-
14221392
static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
14231393
{
14241394
struct nvme_tcp_request *async = &ctrl->async_req;
@@ -1451,9 +1421,6 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
14511421
if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
14521422
return;
14531423

1454-
if (queue->hdr_digest || queue->data_digest)
1455-
nvme_tcp_free_crypto(queue);
1456-
14571424
page_frag_cache_drain(&queue->pf_cache);
14581425

14591426
noreclaim_flag = memalloc_noreclaim_save();
@@ -1867,21 +1834,13 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
18671834

18681835
queue->hdr_digest = nctrl->opts->hdr_digest;
18691836
queue->data_digest = nctrl->opts->data_digest;
1870-
if (queue->hdr_digest || queue->data_digest) {
1871-
ret = nvme_tcp_alloc_crypto(queue);
1872-
if (ret) {
1873-
dev_err(nctrl->device,
1874-
"failed to allocate queue %d crypto\n", qid);
1875-
goto err_sock;
1876-
}
1877-
}
18781837

18791838
rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
18801839
nvme_tcp_hdgst_len(queue);
18811840
queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
18821841
if (!queue->pdu) {
18831842
ret = -ENOMEM;
1884-
goto err_crypto;
1843+
goto err_sock;
18851844
}
18861845

18871846
dev_dbg(nctrl->device, "connecting queue %d\n",
@@ -1914,9 +1873,6 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
19141873
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
19151874
err_rcv_pdu:
19161875
kfree(queue->pdu);
1917-
err_crypto:
1918-
if (queue->hdr_digest || queue->data_digest)
1919-
nvme_tcp_free_crypto(queue);
19201876
err_sock:
19211877
/* ->sock will be released by fput() */
19221878
fput(queue->sock->file);

0 commit comments

Comments
 (0)