Skip to content

Commit 3dca3f3

Browse files
committed
xfrm: Separate ESP handling from segmentation for GRO packets.
We change the ESP GSO handlers to only segment the packets. The ESP handling and encryption is defered to validate_xmit_xfrm() where this is done for non GRO packets too. This makes the code more robust and prepares for asynchronous crypto handling. Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
1 parent f39a5c0 commit 3dca3f3

File tree

7 files changed

+129
-132
lines changed

7 files changed

+129
-132
lines changed

include/net/xfrm.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1888,7 +1888,7 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
18881888
void __net_init xfrm_dev_init(void);
18891889

18901890
#ifdef CONFIG_XFRM_OFFLOAD
1891-
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
1891+
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
18921892
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
18931893
struct xfrm_user_offload *xuo);
18941894
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
@@ -1929,9 +1929,9 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
19291929
}
19301930
}
19311931
#else
1932-
static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
1932+
static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
19331933
{
1934-
return 0;
1934+
return skb;
19351935
}
19361936

19371937
static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)

net/core/dev.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3083,9 +3083,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
30833083
__skb_linearize(skb))
30843084
goto out_kfree_skb;
30853085

3086-
if (validate_xmit_xfrm(skb, features))
3087-
goto out_kfree_skb;
3088-
30893086
/* If packet is not checksummed and device does not
30903087
* support checksumming for this protocol, complete
30913088
* checksumming here.
@@ -3102,6 +3099,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
31023099
}
31033100
}
31043101

3102+
skb = validate_xmit_xfrm(skb, features);
3103+
31053104
return skb;
31063105

31073106
out_kfree_skb:

net/ipv4/esp4_offload.c

Lines changed: 21 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
108108
static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
109109
netdev_features_t features)
110110
{
111-
__u32 seq;
112-
int err = 0;
113-
struct sk_buff *skb2;
114111
struct xfrm_state *x;
115112
struct ip_esp_hdr *esph;
116113
struct crypto_aead *aead;
117-
struct sk_buff *segs = ERR_PTR(-EINVAL);
118114
netdev_features_t esp_features = features;
119115
struct xfrm_offload *xo = xfrm_offload(skb);
120116

121117
if (!xo)
122-
goto out;
123-
124-
seq = xo->seq.low;
118+
return ERR_PTR(-EINVAL);
125119

126120
x = skb->sp->xvec[skb->sp->len - 1];
127121
aead = x->data;
128122
esph = ip_esp_hdr(skb);
129123

130124
if (esph->spi != x->id.spi)
131-
goto out;
125+
return ERR_PTR(-EINVAL);
132126

133127
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
134-
goto out;
128+
return ERR_PTR(-EINVAL);
135129

136130
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
137131

138132
skb->encap_hdr_csum = 1;
139133

140-
if (!(features & NETIF_F_HW_ESP))
134+
if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
135+
(x->xso.dev != skb->dev))
141136
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
142137

143-
segs = x->outer_mode->gso_segment(x, skb, esp_features);
144-
if (IS_ERR_OR_NULL(segs))
145-
goto out;
146-
147-
__skb_pull(skb, skb->data - skb_mac_header(skb));
148-
149-
skb2 = segs;
150-
do {
151-
struct sk_buff *nskb = skb2->next;
152-
153-
xo = xfrm_offload(skb2);
154-
xo->flags |= XFRM_GSO_SEGMENT;
155-
xo->seq.low = seq;
156-
xo->seq.hi = xfrm_replay_seqhi(x, seq);
138+
xo->flags |= XFRM_GSO_SEGMENT;
157139

158-
if(!(features & NETIF_F_HW_ESP))
159-
xo->flags |= CRYPTO_FALLBACK;
160-
161-
x->outer_mode->xmit(x, skb2);
162-
163-
err = x->type_offload->xmit(x, skb2, esp_features);
164-
if (err) {
165-
kfree_skb_list(segs);
166-
return ERR_PTR(err);
167-
}
168-
169-
if (!skb_is_gso(skb2))
170-
seq++;
171-
else
172-
seq += skb_shinfo(skb2)->gso_segs;
173-
174-
skb_push(skb2, skb2->mac_len);
175-
skb2 = nskb;
176-
} while (skb2);
177-
178-
out:
179-
return segs;
140+
return x->outer_mode->gso_segment(x, skb, esp_features);
180141
}
181142

182143
static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
203164
struct crypto_aead *aead;
204165
struct esp_info esp;
205166
bool hw_offload = true;
167+
__u32 seq;
206168

207169
esp.inplace = true;
208170

@@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
241203
return esp.nfrags;
242204
}
243205

206+
seq = xo->seq.low;
207+
244208
esph = esp.esph;
245209
esph->spi = x->id.spi;
246210

247211
skb_push(skb, -skb_network_offset(skb));
248212

249213
if (xo->flags & XFRM_GSO_SEGMENT) {
250-
esph->seq_no = htonl(xo->seq.low);
251-
} else {
252-
ip_hdr(skb)->tot_len = htons(skb->len);
253-
ip_send_check(ip_hdr(skb));
214+
esph->seq_no = htonl(seq);
215+
216+
if (!skb_is_gso(skb))
217+
xo->seq.low++;
218+
else
219+
xo->seq.low += skb_shinfo(skb)->gso_segs;
254220
}
255221

222+
esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
223+
224+
ip_hdr(skb)->tot_len = htons(skb->len);
225+
ip_send_check(ip_hdr(skb));
226+
256227
if (hw_offload)
257228
return 0;
258229

259-
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
260-
261230
err = esp_output_tail(x, skb, &esp);
262231
if (err)
263232
return err;

net/ipv4/xfrm4_mode_tunnel.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
105105
{
106106
__skb_push(skb, skb->mac_len);
107107
return skb_mac_gso_segment(skb, features);
108-
109108
}
110109

111110
static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
112111
{
113112
struct xfrm_offload *xo = xfrm_offload(skb);
114113

115-
if (xo->flags & XFRM_GSO_SEGMENT) {
116-
skb->network_header = skb->network_header - x->props.header_len;
114+
if (xo->flags & XFRM_GSO_SEGMENT)
117115
skb->transport_header = skb->network_header +
118116
sizeof(struct iphdr);
119-
}
120117

121118
skb_reset_mac_len(skb);
122119
pskb_pull(skb, skb->mac_len + x->props.header_len);

net/ipv6/esp6_offload.c

Lines changed: 24 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
135135
static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
136136
netdev_features_t features)
137137
{
138-
__u32 seq;
139-
int err = 0;
140-
struct sk_buff *skb2;
141138
struct xfrm_state *x;
142139
struct ip_esp_hdr *esph;
143140
struct crypto_aead *aead;
144-
struct sk_buff *segs = ERR_PTR(-EINVAL);
145141
netdev_features_t esp_features = features;
146142
struct xfrm_offload *xo = xfrm_offload(skb);
147143

148144
if (!xo)
149-
goto out;
150-
151-
seq = xo->seq.low;
145+
return ERR_PTR(-EINVAL);
152146

153147
x = skb->sp->xvec[skb->sp->len - 1];
154148
aead = x->data;
155149
esph = ip_esp_hdr(skb);
156150

157151
if (esph->spi != x->id.spi)
158-
goto out;
152+
return ERR_PTR(-EINVAL);
159153

160154
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
161-
goto out;
155+
return ERR_PTR(-EINVAL);
162156

163157
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
164158

165159
skb->encap_hdr_csum = 1;
166160

167-
if (!(features & NETIF_F_HW_ESP))
161+
if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
162+
(x->xso.dev != skb->dev))
168163
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
169164

170-
segs = x->outer_mode->gso_segment(x, skb, esp_features);
171-
if (IS_ERR_OR_NULL(segs))
172-
goto out;
173-
174-
__skb_pull(skb, skb->data - skb_mac_header(skb));
175-
176-
skb2 = segs;
177-
do {
178-
struct sk_buff *nskb = skb2->next;
179-
180-
xo = xfrm_offload(skb2);
181-
xo->flags |= XFRM_GSO_SEGMENT;
182-
xo->seq.low = seq;
183-
xo->seq.hi = xfrm_replay_seqhi(x, seq);
184-
185-
if(!(features & NETIF_F_HW_ESP))
186-
xo->flags |= CRYPTO_FALLBACK;
187-
188-
x->outer_mode->xmit(x, skb2);
189-
190-
err = x->type_offload->xmit(x, skb2, esp_features);
191-
if (err) {
192-
kfree_skb_list(segs);
193-
return ERR_PTR(err);
194-
}
195-
196-
if (!skb_is_gso(skb2))
197-
seq++;
198-
else
199-
seq += skb_shinfo(skb2)->gso_segs;
200-
201-
skb_push(skb2, skb2->mac_len);
202-
skb2 = nskb;
203-
} while (skb2);
165+
xo->flags |= XFRM_GSO_SEGMENT;
204166

205-
out:
206-
return segs;
167+
return x->outer_mode->gso_segment(x, skb, esp_features);
207168
}
208169

209170
static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
222183

223184
static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
224185
{
186+
int len;
225187
int err;
226188
int alen;
227189
int blksize;
@@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
230192
struct crypto_aead *aead;
231193
struct esp_info esp;
232194
bool hw_offload = true;
195+
__u32 seq;
233196

234197
esp.inplace = true;
235198

@@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
265228
return esp.nfrags;
266229
}
267230

231+
seq = xo->seq.low;
232+
268233
esph = ip_esp_hdr(skb);
269234
esph->spi = x->id.spi;
270235

271236
skb_push(skb, -skb_network_offset(skb));
272237

273238
if (xo->flags & XFRM_GSO_SEGMENT) {
274-
esph->seq_no = htonl(xo->seq.low);
275-
} else {
276-
int len;
277-
278-
len = skb->len - sizeof(struct ipv6hdr);
279-
if (len > IPV6_MAXPLEN)
280-
len = 0;
239+
esph->seq_no = htonl(seq);
281240

282-
ipv6_hdr(skb)->payload_len = htons(len);
241+
if (!skb_is_gso(skb))
242+
xo->seq.low++;
243+
else
244+
xo->seq.low += skb_shinfo(skb)->gso_segs;
283245
}
284246

247+
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
248+
249+
len = skb->len - sizeof(struct ipv6hdr);
250+
if (len > IPV6_MAXPLEN)
251+
len = 0;
252+
253+
ipv6_hdr(skb)->payload_len = htons(len);
254+
285255
if (hw_offload)
286256
return 0;
287257

288-
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
289-
290258
err = esp6_output_tail(x, skb, &esp);
291259
if (err)
292260
return err;

net/ipv6/xfrm6_mode_tunnel.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
105105
{
106106
__skb_push(skb, skb->mac_len);
107107
return skb_mac_gso_segment(skb, features);
108-
109108
}
110109

111110
static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
112111
{
113112
struct xfrm_offload *xo = xfrm_offload(skb);
114113

115-
if (xo->flags & XFRM_GSO_SEGMENT) {
116-
skb->network_header = skb->network_header - x->props.header_len;
114+
if (xo->flags & XFRM_GSO_SEGMENT)
117115
skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
118-
}
119116

120117
skb_reset_mac_len(skb);
121118
pskb_pull(skb, skb->mac_len + x->props.header_len);

0 commit comments

Comments
 (0)