@@ -767,14 +767,15 @@ static void tcp_tasklet_func(unsigned long data)
767767 list_for_each_safe (q , n , & list ) {
768768 tp = list_entry (q , struct tcp_sock , tsq_node );
769769 list_del (& tp -> tsq_node );
770- clear_bit (TSQ_QUEUED , & tp -> tsq_flags );
771770
772771 sk = (struct sock * )tp ;
772+ clear_bit (TSQ_QUEUED , & sk -> sk_tsq_flags );
773+
773774 if (!sk -> sk_lock .owned &&
774- test_bit (TCP_TSQ_DEFERRED , & tp -> tsq_flags )) {
775+ test_bit (TCP_TSQ_DEFERRED , & sk -> sk_tsq_flags )) {
775776 bh_lock_sock (sk );
776777 if (!sock_owned_by_user (sk )) {
777- clear_bit (TCP_TSQ_DEFERRED , & tp -> tsq_flags );
778+ clear_bit (TCP_TSQ_DEFERRED , & sk -> sk_tsq_flags );
778779 tcp_tsq_handler (sk );
779780 }
780781 bh_unlock_sock (sk );
@@ -797,16 +798,15 @@ static void tcp_tasklet_func(unsigned long data)
797798 */
798799void tcp_release_cb (struct sock * sk )
799800{
800- struct tcp_sock * tp = tcp_sk (sk );
801801 unsigned long flags , nflags ;
802802
803803 /* perform an atomic operation only if at least one flag is set */
804804 do {
805- flags = tp -> tsq_flags ;
805+ flags = sk -> sk_tsq_flags ;
806806 if (!(flags & TCP_DEFERRED_ALL ))
807807 return ;
808808 nflags = flags & ~TCP_DEFERRED_ALL ;
809- } while (cmpxchg (& tp -> tsq_flags , flags , nflags ) != flags );
809+ } while (cmpxchg (& sk -> sk_tsq_flags , flags , nflags ) != flags );
810810
811811 if (flags & TCPF_TSQ_DEFERRED )
812812 tcp_tsq_handler (sk );
@@ -878,15 +878,15 @@ void tcp_wfree(struct sk_buff *skb)
878878 if (wmem >= SKB_TRUESIZE (1 ) && this_cpu_ksoftirqd () == current )
879879 goto out ;
880880
881- for (oval = READ_ONCE (tp -> tsq_flags );; oval = nval ) {
881+ for (oval = READ_ONCE (sk -> sk_tsq_flags );; oval = nval ) {
882882 struct tsq_tasklet * tsq ;
883883 bool empty ;
884884
885885 if (!(oval & TSQF_THROTTLED ) || (oval & TSQF_QUEUED ))
886886 goto out ;
887887
888888 nval = (oval & ~TSQF_THROTTLED ) | TSQF_QUEUED | TCPF_TSQ_DEFERRED ;
889- nval = cmpxchg (& tp -> tsq_flags , oval , nval );
889+ nval = cmpxchg (& sk -> sk_tsq_flags , oval , nval );
890890 if (nval != oval )
891891 continue ;
892892
@@ -2100,7 +2100,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
21002100 skb -> prev == sk -> sk_write_queue .next )
21012101 return false;
21022102
2103- set_bit (TSQ_THROTTLED , & tcp_sk ( sk ) -> tsq_flags );
2103+ set_bit (TSQ_THROTTLED , & sk -> sk_tsq_flags );
21042104 /* It is possible TX completion already happened
21052105 * before we set TSQ_THROTTLED, so we must
21062106 * test again the condition.
@@ -2241,8 +2241,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
22412241 unlikely (tso_fragment (sk , skb , limit , mss_now , gfp )))
22422242 break ;
22432243
2244- if (test_bit (TCP_TSQ_DEFERRED , & tp -> tsq_flags ))
2245- clear_bit (TCP_TSQ_DEFERRED , & tp -> tsq_flags );
2244+ if (test_bit (TCP_TSQ_DEFERRED , & sk -> sk_tsq_flags ))
2245+ clear_bit (TCP_TSQ_DEFERRED , & sk -> sk_tsq_flags );
22462246 if (tcp_small_queue_check (sk , skb , 0 ))
22472247 break ;
22482248
@@ -3545,8 +3545,6 @@ void tcp_send_ack(struct sock *sk)
35453545 /* We do not want pure acks influencing TCP Small Queues or fq/pacing
35463546 * too much.
35473547 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
3548- * We also avoid tcp_wfree() overhead (cache line miss accessing
3549- * tp->tsq_flags) by using regular sock_wfree()
35503548 */
35513549 skb_set_tcp_pure_ack (buff );
35523550
0 commit comments