Skip to content

Commit b14a260

Browse files
committed
Merge branch 'nfp-tls-fixes-for-initial-TLS-support'
Jakub Kicinski says: ==================== nfp: tls: fixes for initial TLS support This series brings various fixes to nfp tls offload recently added to net-next. First 4 patches revolve around device mailbox communication, trying to make it more reliable. Next patch fixes statistical counter. Patch 6 improves the TX resync if device communication failed. Patch 7 makes sure we remove keys from memory after talking to FW. Patch 8 adds missing tls context initialization, we fill in the context information from various places based on the configuration and looks like we missed the init in the case of where TX is offloaded, but RX wasn't initialized yet. Patches 9 and 10 make the nfp driver undo TLS state changes if we need to drop the frame (e.g. due to DMA mapping error). Last but not least TLS fallback should not adjust socket memory after skb_orphan_partial(). This code will go away once we forbid orphaning of skbs in need of crypto, but that's "real" -next material, so lets do a quick fix. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents 3cab2af + 5c4b460 commit b14a260

File tree

10 files changed

+143
-50
lines changed

10 files changed

+143
-50
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -160,9 +160,9 @@ static void mlx5e_tls_del(struct net_device *netdev,
160160
direction == TLS_OFFLOAD_CTX_DIR_TX);
161161
}
162162

163-
static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
164-
u32 seq, u8 *rcd_sn_data,
165-
enum tls_offload_ctx_dir direction)
163+
static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
164+
u32 seq, u8 *rcd_sn_data,
165+
enum tls_offload_ctx_dir direction)
166166
{
167167
struct tls_context *tls_ctx = tls_get_ctx(sk);
168168
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -177,6 +177,8 @@ static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
177177
be64_to_cpu(rcd_sn));
178178
mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
179179
atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
180+
181+
return 0;
180182
}
181183

182184
static const struct tlsdev_ops mlx5e_tls_ops = {

drivers/net/ethernet/netronome/nfp/ccm.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,10 @@ bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size);
118118
struct sk_buff *
119119
nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
120120
unsigned int reply_size, gfp_t flags);
121+
int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
122+
enum nfp_ccm_type type,
123+
unsigned int reply_size,
124+
unsigned int max_reply_size, bool critical);
121125
int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
122126
enum nfp_ccm_type type,
123127
unsigned int reply_size,

drivers/net/ethernet/netronome/nfp/ccm_mbox.c

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
* form a batch. Threads come in with CMSG formed in an skb, then
1414
* enqueue that skb onto the request queue. If threads skb is first
1515
* in queue this thread will handle the mailbox operation. It copies
16-
* up to 16 messages into the mailbox (making sure that both requests
16+
* up to 64 messages into the mailbox (making sure that both requests
1717
* and replies will fit. After FW is done processing the batch it
1818
* copies the data out and wakes waiting threads.
1919
* If a thread is waiting it either gets its the message completed
@@ -23,9 +23,9 @@
2323
* to limit potential cache line bounces.
2424
*/
2525

26-
#define NFP_CCM_MBOX_BATCH_LIMIT 16
26+
#define NFP_CCM_MBOX_BATCH_LIMIT 64
2727
#define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000)
28-
#define NFP_CCM_MAX_QLEN 256
28+
#define NFP_CCM_MAX_QLEN 1024
2929

3030
enum nfp_net_mbox_cmsg_state {
3131
NFP_NET_MBOX_CMSG_STATE_QUEUED,
@@ -515,13 +515,13 @@ nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
515515

516516
static int
517517
nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
518-
enum nfp_ccm_type type)
518+
enum nfp_ccm_type type, bool critical)
519519
{
520520
struct nfp_ccm_hdr *hdr;
521521

522522
assert_spin_locked(&nn->mbox_cmsg.queue.lock);
523523

524-
if (nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
524+
if (!critical && nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
525525
nn_dp_warn(&nn->dp, "mailbox request queue too long\n");
526526
return -EBUSY;
527527
}
@@ -536,10 +536,10 @@ nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
536536
return 0;
537537
}
538538

539-
int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
540-
enum nfp_ccm_type type,
541-
unsigned int reply_size,
542-
unsigned int max_reply_size)
539+
int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
540+
enum nfp_ccm_type type,
541+
unsigned int reply_size,
542+
unsigned int max_reply_size, bool critical)
543543
{
544544
int err;
545545

@@ -550,7 +550,7 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
550550

551551
spin_lock_bh(&nn->mbox_cmsg.queue.lock);
552552

553-
err = nfp_ccm_mbox_msg_enqueue(nn, skb, type);
553+
err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, critical);
554554
if (err)
555555
goto err_unlock;
556556

@@ -594,6 +594,15 @@ int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
594594
return err;
595595
}
596596

597+
int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
598+
enum nfp_ccm_type type,
599+
unsigned int reply_size,
600+
unsigned int max_reply_size)
601+
{
602+
return __nfp_ccm_mbox_communicate(nn, skb, type, reply_size,
603+
max_reply_size, false);
604+
}
605+
597606
static void nfp_ccm_mbox_post_runq_work(struct work_struct *work)
598607
{
599608
struct sk_buff *skb;
@@ -650,7 +659,7 @@ int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
650659

651660
spin_lock_bh(&nn->mbox_cmsg.queue.lock);
652661

653-
err = nfp_ccm_mbox_msg_enqueue(nn, skb, type);
662+
err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, false);
654663
if (err)
655664
goto err_unlock;
656665

drivers/net/ethernet/netronome/nfp/crypto/fw.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@ struct nfp_crypto_req_add_front {
3131
u8 key_len;
3232
__be16 ipver_vlan __packed;
3333
u8 l4_proto;
34+
#define NFP_NET_TLS_NON_ADDR_KEY_LEN 8
35+
u8 l3_addrs[0];
3436
};
3537

3638
struct nfp_crypto_req_add_back {

drivers/net/ethernet/netronome/nfp/crypto/tls.c

Lines changed: 66 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include <linux/bitfield.h>
55
#include <linux/ipv6.h>
66
#include <linux/skbuff.h>
7+
#include <linux/string.h>
78
#include <net/tls.h>
89

910
#include "../ccm.h"
@@ -112,8 +113,9 @@ nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
112113
struct nfp_crypto_reply_simple *reply;
113114
int err;
114115

115-
err = nfp_ccm_mbox_communicate(nn, skb, type,
116-
sizeof(*reply), sizeof(*reply));
116+
err = __nfp_ccm_mbox_communicate(nn, skb, type,
117+
sizeof(*reply), sizeof(*reply),
118+
type == NFP_CCM_TYPE_CRYPTO_DEL);
117119
if (err) {
118120
nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
119121
return err;
@@ -146,20 +148,38 @@ static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
146148
NFP_CCM_TYPE_CRYPTO_DEL);
147149
}
148150

151+
static void
152+
nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
153+
{
154+
front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
155+
FIELD_PREP(NFP_NET_TLS_VLAN,
156+
NFP_NET_TLS_VLAN_UNUSED));
157+
}
158+
159+
static void
160+
nfp_net_tls_assign_conn_id(struct nfp_net *nn,
161+
struct nfp_crypto_req_add_front *front)
162+
{
163+
u32 len;
164+
u64 id;
165+
166+
id = atomic64_inc_return(&nn->ktls_conn_id_gen);
167+
len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
168+
169+
memcpy(front->l3_addrs, &id, sizeof(id));
170+
memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
171+
}
172+
149173
static struct nfp_crypto_req_add_back *
150-
nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk,
151-
int direction)
174+
nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
175+
struct sock *sk, int direction)
152176
{
153177
struct inet_sock *inet = inet_sk(sk);
154178

155179
req->front.key_len += sizeof(__be32) * 2;
156-
req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 4) |
157-
FIELD_PREP(NFP_NET_TLS_VLAN,
158-
NFP_NET_TLS_VLAN_UNUSED));
159180

160181
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
161-
req->src_ip = inet->inet_saddr;
162-
req->dst_ip = inet->inet_daddr;
182+
nfp_net_tls_assign_conn_id(nn, &req->front);
163183
} else {
164184
req->src_ip = inet->inet_daddr;
165185
req->dst_ip = inet->inet_saddr;
@@ -169,20 +189,16 @@ nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk,
169189
}
170190

171191
static struct nfp_crypto_req_add_back *
172-
nfp_net_tls_set_ipv6(struct nfp_crypto_req_add_v6 *req, struct sock *sk,
173-
int direction)
192+
nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
193+
struct sock *sk, int direction)
174194
{
175195
#if IS_ENABLED(CONFIG_IPV6)
176196
struct ipv6_pinfo *np = inet6_sk(sk);
177197

178198
req->front.key_len += sizeof(struct in6_addr) * 2;
179-
req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 6) |
180-
FIELD_PREP(NFP_NET_TLS_VLAN,
181-
NFP_NET_TLS_VLAN_UNUSED));
182199

183200
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
184-
memcpy(req->src_ip, &np->saddr, sizeof(req->src_ip));
185-
memcpy(req->dst_ip, &sk->sk_v6_daddr, sizeof(req->dst_ip));
201+
nfp_net_tls_assign_conn_id(nn, &req->front);
186202
} else {
187203
memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
188204
memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
@@ -202,8 +218,8 @@ nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
202218
front->l4_proto = IPPROTO_TCP;
203219

204220
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
205-
back->src_port = inet->inet_sport;
206-
back->dst_port = inet->inet_dport;
221+
back->src_port = 0;
222+
back->dst_port = 0;
207223
} else {
208224
back->src_port = inet->inet_dport;
209225
back->dst_port = inet->inet_sport;
@@ -257,6 +273,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
257273
struct nfp_crypto_reply_add *reply;
258274
struct sk_buff *skb;
259275
size_t req_sz;
276+
void *req;
260277
bool ipv6;
261278
int err;
262279

@@ -299,14 +316,17 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
299316

300317
front = (void *)skb->data;
301318
front->ep_id = 0;
302-
front->key_len = 8;
319+
front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
303320
front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
304321
memset(front->resv, 0, sizeof(front->resv));
305322

323+
nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
324+
325+
req = (void *)skb->data;
306326
if (ipv6)
307-
back = nfp_net_tls_set_ipv6((void *)skb->data, sk, direction);
327+
back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
308328
else
309-
back = nfp_net_tls_set_ipv4((void *)skb->data, sk, direction);
329+
back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
310330

311331
nfp_net_tls_set_l4(front, back, sk, direction);
312332

@@ -321,15 +341,29 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
321341
memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
322342
memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
323343

344+
/* Get an extra ref on the skb so we can wipe the key after */
345+
skb_get(skb);
346+
324347
err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
325348
sizeof(*reply), sizeof(*reply));
349+
reply = (void *)skb->data;
350+
351+
/* We depend on CCM MBOX code not reallocating skb we sent
352+
* so we can clear the key material out of the memory.
353+
*/
354+
if (!WARN_ON_ONCE((u8 *)back < skb->head ||
355+
(u8 *)back > skb_end_pointer(skb)) &&
356+
!WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back))
357+
memzero_explicit(back, sizeof(*back));
358+
dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
359+
326360
if (err) {
327-
nn_dp_warn(&nn->dp, "failed to add TLS: %d\n", err);
361+
nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
362+
err, direction == TLS_OFFLOAD_CTX_DIR_TX);
328363
/* communicate frees skb on error */
329364
goto err_conn_remove;
330365
}
331366

332-
reply = (void *)skb->data;
333367
err = -be32_to_cpu(reply->error);
334368
if (err) {
335369
if (err == -ENOSPC) {
@@ -383,7 +417,7 @@ nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
383417
nfp_net_tls_del_fw(nn, ntls->fw_handle);
384418
}
385419

386-
static void
420+
static int
387421
nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
388422
u8 *rcd_sn, enum tls_offload_ctx_dir direction)
389423
{
@@ -392,11 +426,12 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
392426
struct nfp_crypto_req_update *req;
393427
struct sk_buff *skb;
394428
gfp_t flags;
429+
int err;
395430

396431
flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
397432
skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
398433
if (!skb)
399-
return;
434+
return -ENOMEM;
400435

401436
ntls = tls_driver_ctx(sk, direction);
402437
req = (void *)skb->data;
@@ -408,13 +443,17 @@ nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
408443
memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
409444

410445
if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
411-
nfp_net_tls_communicate_simple(nn, skb, "sync",
412-
NFP_CCM_TYPE_CRYPTO_UPDATE);
446+
err = nfp_net_tls_communicate_simple(nn, skb, "sync",
447+
NFP_CCM_TYPE_CRYPTO_UPDATE);
448+
if (err)
449+
return err;
413450
ntls->next_seq = seq;
414451
} else {
415452
nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
416453
sizeof(struct nfp_crypto_reply_simple));
417454
}
455+
456+
return 0;
418457
}
419458

420459
static const struct tlsdev_ops nfp_net_tls_ops = {

drivers/net/ethernet/netronome/nfp/nfp_net.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -583,6 +583,7 @@ struct nfp_net_dp {
583583
* @tlv_caps: Parsed TLV capabilities
584584
* @ktls_tx_conn_cnt: Number of offloaded kTLS TX connections
585585
* @ktls_rx_conn_cnt: Number of offloaded kTLS RX connections
586+
* @ktls_conn_id_gen: Trivial generator for kTLS connection ids (for TX)
586587
* @ktls_no_space: Counter of firmware rejecting kTLS connection due to
587588
* lack of space
588589
* @mbox_cmsg: Common Control Message via vNIC mailbox state
@@ -670,6 +671,8 @@ struct nfp_net {
670671
unsigned int ktls_tx_conn_cnt;
671672
unsigned int ktls_rx_conn_cnt;
672673

674+
atomic64_t ktls_conn_id_gen;
675+
673676
atomic_t ktls_no_space;
674677

675678
struct {

0 commit comments

Comments
 (0)