@@ -37,6 +37,8 @@ struct mptcp_skb_cb {
3737
3838#define MPTCP_SKB_CB (__skb ) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
3939
40+ static struct percpu_counter mptcp_sockets_allocated ;
41+
4042/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
4143 * completed yet or has failed, return the subflow socket.
4244 * Otherwise return NULL.
@@ -333,9 +335,17 @@ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
333335 df -> data_seq + df -> data_len == msk -> write_seq ;
334336}
335337
336- static void dfrag_clear (struct mptcp_data_frag * dfrag )
338+ static void dfrag_uncharge (struct sock * sk , int len )
339+ {
340+ sk_mem_uncharge (sk , len );
341+ }
342+
343+ static void dfrag_clear (struct sock * sk , struct mptcp_data_frag * dfrag )
337344{
345+ int len = dfrag -> data_len + dfrag -> overhead ;
346+
338347 list_del (& dfrag -> list );
348+ dfrag_uncharge (sk , len );
339349 put_page (dfrag -> page );
340350}
341351
@@ -344,12 +354,18 @@ static void mptcp_clean_una(struct sock *sk)
344354 struct mptcp_sock * msk = mptcp_sk (sk );
345355 struct mptcp_data_frag * dtmp , * dfrag ;
346356 u64 snd_una = atomic64_read (& msk -> snd_una );
357+ bool cleaned = false;
347358
348359 list_for_each_entry_safe (dfrag , dtmp , & msk -> rtx_queue , list ) {
349360 if (after64 (dfrag -> data_seq + dfrag -> data_len , snd_una ))
350361 break ;
351362
352- dfrag_clear (dfrag );
363+ dfrag_clear (sk , dfrag );
364+ cleaned = true;
365+ }
366+
367+ if (cleaned ) {
368+ sk_mem_reclaim_partial (sk );
353369 }
354370}
355371
@@ -461,6 +477,9 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
461477 if (!psize )
462478 return - EINVAL ;
463479
480+ if (!sk_wmem_schedule (sk , psize + dfrag -> overhead ))
481+ return - ENOMEM ;
482+
464483 /* tell the TCP stack to delay the push so that we can safely
465484 * access the skb after the sendpages call
466485 */
@@ -482,6 +501,11 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
482501 list_add_tail (& dfrag -> list , & msk -> rtx_queue );
483502 }
484503
504+ /* charge data on mptcp rtx queue to the master socket
505+ * Note: we charge such data both to sk and ssk
506+ */
507+ sk -> sk_forward_alloc -= frag_truesize ;
508+
485509 /* if the tail skb extension is still the cached one, collapsing
486510 * really happened. Note: we can't check for 'same skb' as the sk_buff
487511 * hdr on tail can be transmitted, freed and re-allocated by the
@@ -933,6 +957,8 @@ static int mptcp_init_sock(struct sock *sk)
933957 if (ret )
934958 return ret ;
935959
960+ sk_sockets_allocated_inc (sk );
961+
936962 if (!mptcp_is_enabled (sock_net (sk )))
937963 return - ENOPROTOOPT ;
938964
@@ -947,7 +973,7 @@ static void __mptcp_clear_xmit(struct sock *sk)
947973 sk_stop_timer (sk , & msk -> sk .icsk_retransmit_timer );
948974
949975 list_for_each_entry_safe (dfrag , dtmp , & msk -> rtx_queue , list )
950- dfrag_clear (dfrag );
976+ dfrag_clear (sk , dfrag );
951977}
952978
953979static void mptcp_cancel_work (struct sock * sk )
@@ -1182,6 +1208,8 @@ static void mptcp_destroy(struct sock *sk)
11821208
11831209 if (msk -> cached_ext )
11841210 __skb_ext_put (msk -> cached_ext );
1211+
1212+ sk_sockets_allocated_dec (sk );
11851213}
11861214
11871215static int mptcp_setsockopt (struct sock * sk , int level , int optname ,
@@ -1391,7 +1419,12 @@ static struct proto mptcp_prot = {
13911419 .hash = inet_hash ,
13921420 .unhash = inet_unhash ,
13931421 .get_port = mptcp_get_port ,
1422+ .sockets_allocated = & mptcp_sockets_allocated ,
1423+ .memory_allocated = & tcp_memory_allocated ,
1424+ .memory_pressure = & tcp_memory_pressure ,
13941425 .stream_memory_free = mptcp_memory_free ,
1426+ .sysctl_wmem_offset = offsetof(struct net , ipv4 .sysctl_tcp_wmem ),
1427+ .sysctl_mem = sysctl_tcp_mem ,
13951428 .obj_size = sizeof (struct mptcp_sock ),
13961429 .no_autobind = true,
13971430};
@@ -1680,6 +1713,9 @@ void mptcp_proto_init(void)
16801713{
16811714 mptcp_prot .h .hashinfo = tcp_prot .h .hashinfo ;
16821715
1716+ if (percpu_counter_init (& mptcp_sockets_allocated , 0 , GFP_KERNEL ))
1717+ panic ("Failed to allocate MPTCP pcpu counter\n" );
1718+
16831719 mptcp_subflow_init ();
16841720 mptcp_pm_init ();
16851721
0 commit comments