63
63
#include <linux/indirect_call_wrapper.h>
64
64
#include <linux/atomic.h>
65
65
#include <linux/refcount.h>
66
+ #include <linux/llist.h>
66
67
#include <net/dst.h>
67
68
#include <net/checksum.h>
68
69
#include <net/tcp_states.h>
@@ -284,9 +285,7 @@ struct bpf_local_storage;
284
285
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
285
286
* @sk_no_check_rx: allow zero checksum in RX packets
286
287
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
287
- * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
288
- * @sk_route_forced_caps: static, forced route capabilities
289
- * (set in tcp_init_sock())
288
+ * @sk_gso_disabled: if set, NETIF_F_GSO_MASK is forbidden.
290
289
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
291
290
* @sk_gso_max_size: Maximum GSO segment size to build
292
291
* @sk_gso_max_segs: Maximum number of GSO segments
@@ -391,6 +390,11 @@ struct sock {
391
390
#define sk_flags __sk_common.skc_flags
392
391
#define sk_rxhash __sk_common.skc_rxhash
393
392
393
+ /* early demux fields */
394
+ struct dst_entry * sk_rx_dst ;
395
+ int sk_rx_dst_ifindex ;
396
+ u32 sk_rx_dst_cookie ;
397
+
394
398
socket_lock_t sk_lock ;
395
399
atomic_t sk_drops ;
396
400
int sk_rcvlowat ;
@@ -410,6 +414,8 @@ struct sock {
410
414
struct sk_buff * head ;
411
415
struct sk_buff * tail ;
412
416
} sk_backlog ;
417
+ struct llist_head defer_list ;
418
+
413
419
#define sk_rmem_alloc sk_backlog.rmem_alloc
414
420
415
421
int sk_forward_alloc ;
@@ -431,9 +437,6 @@ struct sock {
431
437
#ifdef CONFIG_XFRM
432
438
struct xfrm_policy __rcu * sk_policy [2 ];
433
439
#endif
434
- struct dst_entry * sk_rx_dst ;
435
- int sk_rx_dst_ifindex ;
436
- u32 sk_rx_dst_cookie ;
437
440
438
441
struct dst_entry __rcu * sk_dst_cache ;
439
442
atomic_t sk_omem_alloc ;
@@ -460,8 +463,6 @@ struct sock {
460
463
unsigned long sk_max_pacing_rate ;
461
464
struct page_frag sk_frag ;
462
465
netdev_features_t sk_route_caps ;
463
- netdev_features_t sk_route_nocaps ;
464
- netdev_features_t sk_route_forced_caps ;
465
466
int sk_gso_type ;
466
467
unsigned int sk_gso_max_size ;
467
468
gfp_t sk_allocation ;
@@ -471,7 +472,7 @@ struct sock {
471
472
* Because of non atomicity rules, all
472
473
* changes are protected by socket lock.
473
474
*/
474
- u8 sk_padding : 1 ,
475
+ u8 sk_gso_disabled : 1 ,
475
476
sk_kern_sock : 1 ,
476
477
sk_no_check_tx : 1 ,
477
478
sk_no_check_rx : 1 ,
@@ -493,6 +494,7 @@ struct sock {
493
494
u16 sk_busy_poll_budget ;
494
495
#endif
495
496
spinlock_t sk_peer_lock ;
497
+ int sk_bind_phc ;
496
498
struct pid * sk_peer_pid ;
497
499
const struct cred * sk_peer_cred ;
498
500
@@ -502,7 +504,6 @@ struct sock {
502
504
seqlock_t sk_stamp_seq ;
503
505
#endif
504
506
u16 sk_tsflags ;
505
- int sk_bind_phc ;
506
507
u8 sk_shutdown ;
507
508
u32 sk_tskey ;
508
509
atomic_t sk_zckey ;
@@ -1022,12 +1023,18 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
1022
1023
1023
1024
int __sk_backlog_rcv (struct sock * sk , struct sk_buff * skb );
1024
1025
1026
+ INDIRECT_CALLABLE_DECLARE (int tcp_v4_do_rcv (struct sock * sk , struct sk_buff * skb ));
1027
+ INDIRECT_CALLABLE_DECLARE (int tcp_v6_do_rcv (struct sock * sk , struct sk_buff * skb ));
1028
+
1025
1029
static inline int sk_backlog_rcv (struct sock * sk , struct sk_buff * skb )
1026
1030
{
1027
1031
if (sk_memalloc_socks () && skb_pfmemalloc (skb ))
1028
1032
return __sk_backlog_rcv (sk , skb );
1029
1033
1030
- return sk -> sk_backlog_rcv (sk , skb );
1034
+ return INDIRECT_CALL_INET (sk -> sk_backlog_rcv ,
1035
+ tcp_v6_do_rcv ,
1036
+ tcp_v4_do_rcv ,
1037
+ sk , skb );
1031
1038
}
1032
1039
1033
1040
static inline void sk_incoming_cpu_update (struct sock * sk )
@@ -1210,7 +1217,9 @@ struct proto {
1210
1217
unsigned int inuse_idx ;
1211
1218
#endif
1212
1219
1220
+ #if IS_ENABLED (CONFIG_MPTCP )
1213
1221
int (* forward_alloc_get )(const struct sock * sk );
1222
+ #endif
1214
1223
1215
1224
bool (* stream_memory_free )(const struct sock * sk , int wake );
1216
1225
bool (* sock_is_readable )(struct sock * sk );
@@ -1299,10 +1308,11 @@ INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int
1299
1308
1300
1309
static inline int sk_forward_alloc_get (const struct sock * sk )
1301
1310
{
1302
- if (!sk -> sk_prot -> forward_alloc_get )
1303
- return sk -> sk_forward_alloc ;
1304
-
1305
- return sk -> sk_prot -> forward_alloc_get (sk );
1311
+ #if IS_ENABLED (CONFIG_MPTCP )
1312
+ if (sk -> sk_prot -> forward_alloc_get )
1313
+ return sk -> sk_prot -> forward_alloc_get (sk );
1314
+ #endif
1315
+ return sk -> sk_forward_alloc ;
1306
1316
}
1307
1317
1308
1318
static inline bool __sk_stream_memory_free (const struct sock * sk , int wake )
@@ -2124,10 +2134,10 @@ static inline bool sk_can_gso(const struct sock *sk)
2124
2134
2125
2135
void sk_setup_caps (struct sock * sk , struct dst_entry * dst );
2126
2136
2127
- static inline void sk_nocaps_add (struct sock * sk , netdev_features_t flags )
2137
+ static inline void sk_gso_disable (struct sock * sk )
2128
2138
{
2129
- sk -> sk_route_nocaps |= flags ;
2130
- sk -> sk_route_caps &= ~flags ;
2139
+ sk -> sk_gso_disabled = 1 ;
2140
+ sk -> sk_route_caps &= ~NETIF_F_GSO_MASK ;
2131
2141
}
2132
2142
2133
2143
static inline int skb_do_copy_data_nocache (struct sock * sk , struct sk_buff * skb ,
@@ -2638,6 +2648,11 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2638
2648
& skb_shinfo (skb )-> tskey );
2639
2649
}
2640
2650
2651
+ static inline bool sk_is_tcp (const struct sock * sk )
2652
+ {
2653
+ return sk -> sk_type == SOCK_STREAM && sk -> sk_protocol == IPPROTO_TCP ;
2654
+ }
2655
+
2641
2656
/**
2642
2657
* sk_eat_skb - Release a skb if it is no longer needed
2643
2658
* @sk: socket to eat this skb from
0 commit comments