@@ -2253,6 +2253,10 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
22532253 return true;
22542254}
22552255
2256+ /* flags for __mptcp_close_ssk() */
2257+ #define MPTCP_CF_PUSH BIT(1)
2258+ #define MPTCP_CF_FASTCLOSE BIT(2)
2259+
22562260/* subflow sockets can be either outgoing (connect) or incoming
22572261 * (accept).
22582262 *
@@ -2262,22 +2266,37 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
22622266 * parent socket.
22632267 */
22642268static void __mptcp_close_ssk (struct sock * sk , struct sock * ssk ,
2265- struct mptcp_subflow_context * subflow )
2269+ struct mptcp_subflow_context * subflow ,
2270+ unsigned int flags )
22662271{
22672272 struct mptcp_sock * msk = mptcp_sk (sk );
2268- bool need_push ;
2273+ bool need_push , dispose_it ;
22692274
2270- list_del (& subflow -> node );
2275+ dispose_it = !msk -> subflow || ssk != msk -> subflow -> sk ;
2276+ if (dispose_it )
2277+ list_del (& subflow -> node );
22712278
22722279 lock_sock_nested (ssk , SINGLE_DEPTH_NESTING );
22732280
2281+ if (flags & MPTCP_CF_FASTCLOSE )
2282+ subflow -> send_fastclose = 1 ;
2283+
2284+ need_push = (flags & MPTCP_CF_PUSH ) && __mptcp_retransmit_pending_data (sk );
2285+ if (!dispose_it ) {
2286+ tcp_disconnect (ssk , 0 );
2287+ msk -> subflow -> state = SS_UNCONNECTED ;
2288+ mptcp_subflow_ctx_reset (subflow );
2289+ release_sock (ssk );
2290+
2291+ goto out ;
2292+ }
2293+
22742294 /* if we are invoked by the msk cleanup code, the subflow is
22752295 * already orphaned
22762296 */
22772297 if (ssk -> sk_socket )
22782298 sock_orphan (ssk );
22792299
2280- need_push = __mptcp_retransmit_pending_data (sk );
22812300 subflow -> disposable = 1 ;
22822301
22832302 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
@@ -2297,14 +2316,12 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
22972316
22982317 sock_put (ssk );
22992318
2300- if (ssk == msk -> last_snd )
2301- msk -> last_snd = NULL ;
2302-
23032319 if (ssk == msk -> first )
23042320 msk -> first = NULL ;
23052321
2306- if (msk -> subflow && ssk == msk -> subflow -> sk )
2307- mptcp_dispose_initial_subflow (msk );
2322+ out :
2323+ if (ssk == msk -> last_snd )
2324+ msk -> last_snd = NULL ;
23082325
23092326 if (need_push )
23102327 __mptcp_push_pending (sk , 0 );
@@ -2315,7 +2332,7 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
23152332{
23162333 if (sk -> sk_state == TCP_ESTABLISHED )
23172334 mptcp_event (MPTCP_EVENT_SUB_CLOSED , mptcp_sk (sk ), ssk , GFP_KERNEL );
2318- __mptcp_close_ssk (sk , ssk , subflow );
2335+ __mptcp_close_ssk (sk , ssk , subflow , MPTCP_CF_PUSH );
23192336}
23202337
23212338static unsigned int mptcp_sync_mss (struct sock * sk , u32 pmtu )
@@ -2533,9 +2550,20 @@ static int __mptcp_init_sock(struct sock *sk)
25332550 return 0 ;
25342551}
25352552
2536- static int mptcp_init_sock (struct sock * sk )
2553+ static void mptcp_ca_reset (struct sock * sk )
25372554{
25382555 struct inet_connection_sock * icsk = inet_csk (sk );
2556+
2557+ tcp_assign_congestion_control (sk );
2558+ strcpy (mptcp_sk (sk )-> ca_name , icsk -> icsk_ca_ops -> name );
2559+
2560+ /* no need to keep a reference to the ops, the name will suffice */
2561+ tcp_cleanup_congestion_control (sk );
2562+ icsk -> icsk_ca_ops = NULL ;
2563+ }
2564+
2565+ static int mptcp_init_sock (struct sock * sk )
2566+ {
25392567 struct net * net = sock_net (sk );
25402568 int ret ;
25412569
@@ -2556,12 +2584,7 @@ static int mptcp_init_sock(struct sock *sk)
25562584 /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
25572585 * propagate the correct value
25582586 */
2559- tcp_assign_congestion_control (sk );
2560- strcpy (mptcp_sk (sk )-> ca_name , icsk -> icsk_ca_ops -> name );
2561-
2562- /* no need to keep a reference to the ops, the name will suffice */
2563- tcp_cleanup_congestion_control (sk );
2564- icsk -> icsk_ca_ops = NULL ;
2587+ mptcp_ca_reset (sk );
25652588
25662589 sk_sockets_allocated_inc (sk );
25672590 sk -> sk_rcvbuf = sock_net (sk )-> ipv4 .sysctl_tcp_rmem [1 ];
@@ -2720,9 +2743,13 @@ static void __mptcp_destroy_sock(struct sock *sk)
27202743 sk_stop_timer (sk , & sk -> sk_timer );
27212744 msk -> pm .status = 0 ;
27222745
2746+ /* clears msk->subflow, allowing the following loop to close
2747+ * even the initial subflow
2748+ */
2749+ mptcp_dispose_initial_subflow (msk );
27232750 list_for_each_entry_safe (subflow , tmp , & conn_list , node ) {
27242751 struct sock * ssk = mptcp_subflow_tcp_sock (subflow );
2725- __mptcp_close_ssk (sk , ssk , subflow );
2752+ __mptcp_close_ssk (sk , ssk , subflow , 0 );
27262753 }
27272754
27282755 sk -> sk_prot -> destroy (sk );
@@ -2733,7 +2760,6 @@ static void __mptcp_destroy_sock(struct sock *sk)
27332760 xfrm_sk_free_policy (sk );
27342761
27352762 sk_refcnt_debug_release (sk );
2736- mptcp_dispose_initial_subflow (msk );
27372763 sock_put (sk );
27382764}
27392765
@@ -2769,6 +2795,9 @@ static void mptcp_close(struct sock *sk, long timeout)
27692795
27702796 sock_hold (sk );
27712797 pr_debug ("msk=%p state=%d" , sk , sk -> sk_state );
2798+ if (mptcp_sk (sk )-> token )
2799+ mptcp_event (MPTCP_EVENT_CLOSED , mptcp_sk (sk ), NULL , GFP_KERNEL );
2800+
27722801 if (sk -> sk_state == TCP_CLOSE ) {
27732802 __mptcp_destroy_sock (sk );
27742803 do_cancel_work = true;
@@ -2779,9 +2808,6 @@ static void mptcp_close(struct sock *sk, long timeout)
27792808 if (do_cancel_work )
27802809 mptcp_cancel_work (sk );
27812810
2782- if (mptcp_sk (sk )-> token )
2783- mptcp_event (MPTCP_EVENT_CLOSED , mptcp_sk (sk ), NULL , GFP_KERNEL );
2784-
27852811 sock_put (sk );
27862812}
27872813
@@ -2815,13 +2841,36 @@ static int mptcp_disconnect(struct sock *sk, int flags)
28152841
28162842 mptcp_do_flush_join_list (msk );
28172843
2844+ inet_sk_state_store (sk , TCP_CLOSE );
2845+
28182846 mptcp_for_each_subflow (msk , subflow ) {
28192847 struct sock * ssk = mptcp_subflow_tcp_sock (subflow );
28202848
2821- lock_sock (ssk );
2822- tcp_disconnect (ssk , flags );
2823- release_sock (ssk );
2849+ __mptcp_close_ssk (sk , ssk , subflow , MPTCP_CF_FASTCLOSE );
28242850 }
2851+
2852+ sk_stop_timer (sk , & msk -> sk .icsk_retransmit_timer );
2853+ sk_stop_timer (sk , & sk -> sk_timer );
2854+
2855+ if (mptcp_sk (sk )-> token )
2856+ mptcp_event (MPTCP_EVENT_CLOSED , mptcp_sk (sk ), NULL , GFP_KERNEL );
2857+
2858+ mptcp_destroy_common (msk );
2859+ msk -> last_snd = NULL ;
2860+ msk -> flags = 0 ;
2861+ msk -> recovery = false;
2862+ msk -> can_ack = false;
2863+ msk -> fully_established = false;
2864+ msk -> rcv_data_fin = false;
2865+ msk -> snd_data_fin_enable = false;
2866+ msk -> rcv_fastclose = false;
2867+ msk -> use_64bit_ack = false;
2868+ WRITE_ONCE (msk -> csum_enabled , mptcp_is_checksum_enabled (sock_net (sk )));
2869+ mptcp_pm_data_reset (msk );
2870+ mptcp_ca_reset (sk );
2871+
2872+ sk -> sk_shutdown = 0 ;
2873+ sk_error_report (sk );
28252874 return 0 ;
28262875}
28272876
@@ -2961,9 +3010,11 @@ void mptcp_destroy_common(struct mptcp_sock *msk)
29613010 __mptcp_clear_xmit (sk );
29623011
29633012 /* move to sk_receive_queue, sk_stream_kill_queues will purge it */
3013+ mptcp_data_lock (sk );
29643014 skb_queue_splice_tail_init (& msk -> receive_queue , & sk -> sk_receive_queue );
29653015 __skb_queue_purge (& sk -> sk_receive_queue );
29663016 skb_rbtree_purge (& msk -> out_of_order_queue );
3017+ mptcp_data_unlock (sk );
29673018
29683019 /* move all the rx fwd alloc into the sk_mem_reclaim_final in
29693020 * inet_sock_destruct() will dispose it
0 commit comments