Skip to content

Commit f53c723

Browse files
committed
net: Add asynchronous callbacks for xfrm on layer 2.
This patch implements asynchronous crypto callbacks and a backlog handler that can be used when IPsec is done at layer 2 in the TX path. It also extends the skb validate functions so that we can update the driver transmit return codes based on async crypto operation or to indicate that we queued the packet in a backlog queue. Joint work with: Aviv Heller <avivh@mellanox.com> Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
1 parent 3dca3f3 commit f53c723

File tree

8 files changed

+175
-36
lines changed

8 files changed

+175
-36
lines changed

include/linux/netdevice.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2793,7 +2793,9 @@ struct softnet_data {
27932793
struct Qdisc *output_queue;
27942794
struct Qdisc **output_queue_tailp;
27952795
struct sk_buff *completion_queue;
2796-
2796+
#ifdef CONFIG_XFRM_OFFLOAD
2797+
struct sk_buff_head xfrm_backlog;
2798+
#endif
27972799
#ifdef CONFIG_RPS
27982800
/* input_queue_head should be written by cpu owning this struct,
27992801
* and only read by other cpus. Worth using a cache line.
@@ -3325,7 +3327,7 @@ int dev_get_phys_port_id(struct net_device *dev,
33253327
int dev_get_phys_port_name(struct net_device *dev,
33263328
char *name, size_t len);
33273329
int dev_change_proto_down(struct net_device *dev, bool proto_down);
3328-
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
3330+
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
33293331
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
33303332
struct netdev_queue *txq, int *ret);
33313333

include/net/xfrm.h

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1051,6 +1051,7 @@ struct xfrm_offload {
10511051
#define XFRM_GSO_SEGMENT 16
10521052
#define XFRM_GRO 32
10531053
#define XFRM_ESP_NO_TRAILER 64
1054+
#define XFRM_DEV_RESUME 128
10541055

10551056
__u32 status;
10561057
#define CRYPTO_SUCCESS 1
@@ -1874,21 +1875,28 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
18741875
{
18751876
return skb->sp->xvec[skb->sp->len - 1];
18761877
}
1878+
#endif
1879+
18771880
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
18781881
{
1882+
#ifdef CONFIG_XFRM
18791883
struct sec_path *sp = skb->sp;
18801884

18811885
if (!sp || !sp->olen || sp->len != sp->olen)
18821886
return NULL;
18831887

18841888
return &sp->ovec[sp->olen - 1];
1885-
}
1889+
#else
1890+
return NULL;
18861891
#endif
1892+
}
18871893

18881894
void __net_init xfrm_dev_init(void);
18891895

18901896
#ifdef CONFIG_XFRM_OFFLOAD
1891-
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
1897+
void xfrm_dev_resume(struct sk_buff *skb);
1898+
void xfrm_dev_backlog(struct softnet_data *sd);
1899+
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
18921900
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
18931901
struct xfrm_user_offload *xuo);
18941902
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
@@ -1929,7 +1937,15 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
19291937
}
19301938
}
19311939
#else
1932-
static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
1940+
static inline void xfrm_dev_resume(struct sk_buff *skb)
1941+
{
1942+
}
1943+
1944+
static inline void xfrm_dev_backlog(struct softnet_data *sd)
1945+
{
1946+
}
1947+
1948+
static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
19331949
{
19341950
return skb;
19351951
}

net/core/dev.c

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3059,7 +3059,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
30593059
}
30603060
EXPORT_SYMBOL(skb_csum_hwoffload_help);
30613061

3062-
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
3062+
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
30633063
{
30643064
netdev_features_t features;
30653065

@@ -3099,7 +3099,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
30993099
}
31003100
}
31013101

3102-
skb = validate_xmit_xfrm(skb, features);
3102+
skb = validate_xmit_xfrm(skb, features, again);
31033103

31043104
return skb;
31053105

@@ -3110,7 +3110,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
31103110
return NULL;
31113111
}
31123112

3113-
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
3113+
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
31143114
{
31153115
struct sk_buff *next, *head = NULL, *tail;
31163116

@@ -3121,7 +3121,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
31213121
/* in case skb wont be segmented, point to itself */
31223122
skb->prev = skb;
31233123

3124-
skb = validate_xmit_skb(skb, dev);
3124+
skb = validate_xmit_skb(skb, dev, again);
31253125
if (!skb)
31263126
continue;
31273127

@@ -3448,6 +3448,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
34483448
struct netdev_queue *txq;
34493449
struct Qdisc *q;
34503450
int rc = -ENOMEM;
3451+
bool again = false;
34513452

34523453
skb_reset_mac_header(skb);
34533454

@@ -3509,7 +3510,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
35093510
XMIT_RECURSION_LIMIT))
35103511
goto recursion_alert;
35113512

3512-
skb = validate_xmit_skb(skb, dev);
3513+
skb = validate_xmit_skb(skb, dev, &again);
35133514
if (!skb)
35143515
goto out;
35153516

@@ -4193,6 +4194,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
41934194
spin_unlock(root_lock);
41944195
}
41954196
}
4197+
4198+
xfrm_dev_backlog(sd);
41964199
}
41974200

41984201
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
@@ -8874,6 +8877,9 @@ static int __init net_dev_init(void)
88748877

88758878
skb_queue_head_init(&sd->input_pkt_queue);
88768879
skb_queue_head_init(&sd->process_queue);
8880+
#ifdef CONFIG_XFRM_OFFLOAD
8881+
skb_queue_head_init(&sd->xfrm_backlog);
8882+
#endif
88778883
INIT_LIST_HEAD(&sd->poll_list);
88788884
sd->output_queue_tailp = &sd->output_queue;
88798885
#ifdef CONFIG_RPS

net/ipv4/esp4.c

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
121121
static void esp_output_done(struct crypto_async_request *base, int err)
122122
{
123123
struct sk_buff *skb = base->data;
124+
struct xfrm_offload *xo = xfrm_offload(skb);
124125
void *tmp;
125-
struct dst_entry *dst = skb_dst(skb);
126-
struct xfrm_state *x = dst->xfrm;
126+
struct xfrm_state *x;
127+
128+
if (xo && (xo->flags & XFRM_DEV_RESUME))
129+
x = skb->sp->xvec[skb->sp->len - 1];
130+
else
131+
x = skb_dst(skb)->xfrm;
127132

128133
tmp = ESP_SKB_CB(skb)->tmp;
129134
esp_ssg_unref(x, tmp);
130135
kfree(tmp);
131-
xfrm_output_resume(skb, err);
136+
137+
if (xo && (xo->flags & XFRM_DEV_RESUME)) {
138+
if (err) {
139+
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
140+
kfree_skb(skb);
141+
return;
142+
}
143+
144+
skb_push(skb, skb->data - skb_mac_header(skb));
145+
secpath_reset(skb);
146+
xfrm_dev_resume(skb);
147+
} else {
148+
xfrm_output_resume(skb, err);
149+
}
132150
}
133151

134152
/* Move ESP header back into place. */

net/ipv6/esp6.c

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
141141
static void esp_output_done(struct crypto_async_request *base, int err)
142142
{
143143
struct sk_buff *skb = base->data;
144+
struct xfrm_offload *xo = xfrm_offload(skb);
144145
void *tmp;
145-
struct dst_entry *dst = skb_dst(skb);
146-
struct xfrm_state *x = dst->xfrm;
146+
struct xfrm_state *x;
147+
148+
if (xo && (xo->flags & XFRM_DEV_RESUME))
149+
x = skb->sp->xvec[skb->sp->len - 1];
150+
else
151+
x = skb_dst(skb)->xfrm;
147152

148153
tmp = ESP_SKB_CB(skb)->tmp;
149154
esp_ssg_unref(x, tmp);
150155
kfree(tmp);
151-
xfrm_output_resume(skb, err);
156+
157+
if (xo && (xo->flags & XFRM_DEV_RESUME)) {
158+
if (err) {
159+
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
160+
kfree_skb(skb);
161+
return;
162+
}
163+
164+
skb_push(skb, skb->data - skb_mac_header(skb));
165+
secpath_reset(skb);
166+
xfrm_dev_resume(skb);
167+
} else {
168+
xfrm_output_resume(skb, err);
169+
}
152170
}
153171

154172
/* Move ESP header back into place. */

net/packet/af_packet.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb)
247247
struct sk_buff *orig_skb = skb;
248248
struct netdev_queue *txq;
249249
int ret = NETDEV_TX_BUSY;
250+
bool again = false;
250251

251252
if (unlikely(!netif_running(dev) ||
252253
!netif_carrier_ok(dev)))
253254
goto drop;
254255

255-
skb = validate_xmit_skb_list(skb, dev);
256+
skb = validate_xmit_skb_list(skb, dev, &again);
256257
if (skb != orig_skb)
257258
goto drop;
258259

net/sched/sch_generic.c

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
#include <net/pkt_sched.h>
3333
#include <net/dst.h>
3434
#include <trace/events/qdisc.h>
35+
#include <net/xfrm.h>
3536

3637
/* Qdisc to use by default */
3738
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
@@ -230,6 +231,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
230231

231232
/* skb in gso_skb were already validated */
232233
*validate = false;
234+
if (xfrm_offload(skb))
235+
*validate = true;
233236
/* check the reason of requeuing without tx lock first */
234237
txq = skb_get_tx_queue(txq->dev, skb);
235238
if (!netif_xmit_frozen_or_stopped(txq)) {
@@ -285,14 +288,25 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
285288
spinlock_t *root_lock, bool validate)
286289
{
287290
int ret = NETDEV_TX_BUSY;
291+
bool again = false;
288292

289293
/* And release qdisc */
290294
if (root_lock)
291295
spin_unlock(root_lock);
292296

293297
/* Note that we validate skb (GSO, checksum, ...) outside of locks */
294298
if (validate)
295-
skb = validate_xmit_skb_list(skb, dev);
299+
skb = validate_xmit_skb_list(skb, dev, &again);
300+
301+
#ifdef CONFIG_XFRM_OFFLOAD
302+
if (unlikely(again)) {
303+
if (root_lock)
304+
spin_lock(root_lock);
305+
306+
dev_requeue_skb(skb, q);
307+
return false;
308+
}
309+
#endif
296310

297311
if (likely(skb)) {
298312
HARD_TX_LOCK(dev, txq, smp_processor_id());

0 commit comments

Comments
 (0)