Skip to content

Commit 491f14d

Browse files
committed
Merge branch 'tcp-improve-delivered-counts-in-SCM_TSTAMP_ACK'
Yousuk Seung says: ==================== tcp: improve delivered counts in SCM_TSTAMP_ACK Currently delivered and delivered_ce in OPT_STATS of SCM_TSTAMP_ACK do not fully reflect the current ack being timestamped. Also they are not in sync as the delivered count includes packets being sacked and some of cumulatively acked but delivered_ce includes none. This patch series updates tp->delivered and tp->delivered_ce together to keep them in sync. It also moves generating SCM_TSTAMP_ACK to later in tcp_clean_rtx_queue() to reflect packets being cumulatively acked up until the current skb for sack-enabled connections. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents b08866f + 082d4fa commit 491f14d

File tree

1 file changed

+39
-20
lines changed

1 file changed

+39
-20
lines changed

net/ipv4/tcp_input.c

Lines changed: 39 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -962,6 +962,15 @@ void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
962962
}
963963
}
964964

965+
/* Updates the delivered and delivered_ce counts */
966+
static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
967+
bool ece_ack)
968+
{
969+
tp->delivered += delivered;
970+
if (ece_ack)
971+
tp->delivered_ce += delivered;
972+
}
973+
965974
/* This procedure tags the retransmission queue when SACKs arrive.
966975
*
967976
* We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
@@ -1138,6 +1147,7 @@ struct tcp_sacktag_state {
11381147
struct rate_sample *rate;
11391148
int flag;
11401149
unsigned int mss_now;
1150+
u32 sack_delivered;
11411151
};
11421152

11431153
/* Check if skb is fully within the SACK block. In presence of GSO skbs,
@@ -1258,7 +1268,8 @@ static u8 tcp_sacktag_one(struct sock *sk,
12581268
sacked |= TCPCB_SACKED_ACKED;
12591269
state->flag |= FLAG_DATA_SACKED;
12601270
tp->sacked_out += pcount;
1261-
tp->delivered += pcount; /* Out-of-order packets delivered */
1271+
/* Out-of-order packets delivered */
1272+
state->sack_delivered += pcount;
12621273

12631274
/* Lost marker hint past SACKed? Tweak RFC3517 cnt */
12641275
if (tp->lost_skb_hint &&
@@ -1684,7 +1695,8 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
16841695
num_sacks, prior_snd_una);
16851696
if (found_dup_sack) {
16861697
state->flag |= FLAG_DSACKING_ACK;
1687-
tp->delivered++; /* A spurious retransmission is delivered */
1698+
/* A spurious retransmission is delivered */
1699+
state->sack_delivered++;
16881700
}
16891701

16901702
/* Eliminate too old ACKs, but take into
@@ -1893,7 +1905,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
18931905

18941906
/* Emulate SACKs for SACKless connection: account for a new dupack. */
18951907

1896-
static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
1908+
static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack)
18971909
{
18981910
if (num_dupack) {
18991911
struct tcp_sock *tp = tcp_sk(sk);
@@ -1904,20 +1916,21 @@ static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
19041916
tcp_check_reno_reordering(sk, 0);
19051917
delivered = tp->sacked_out - prior_sacked;
19061918
if (delivered > 0)
1907-
tp->delivered += delivered;
1919+
tcp_count_delivered(tp, delivered, ece_ack);
19081920
tcp_verify_left_out(tp);
19091921
}
19101922
}
19111923

19121924
/* Account for ACK, ACKing some data in Reno Recovery phase. */
19131925

1914-
static void tcp_remove_reno_sacks(struct sock *sk, int acked)
1926+
static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack)
19151927
{
19161928
struct tcp_sock *tp = tcp_sk(sk);
19171929

19181930
if (acked > 0) {
19191931
/* One ACK acked hole. The rest eat duplicate ACKs. */
1920-
tp->delivered += max_t(int, acked - tp->sacked_out, 1);
1932+
tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1),
1933+
ece_ack);
19211934
if (acked - 1 >= tp->sacked_out)
19221935
tp->sacked_out = 0;
19231936
else
@@ -2697,7 +2710,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
26972710
* delivered. Lower inflight to clock out (re)tranmissions.
26982711
*/
26992712
if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
2700-
tcp_add_reno_sack(sk, num_dupack);
2713+
tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE);
27012714
else if (flag & FLAG_SND_UNA_ADVANCED)
27022715
tcp_reset_reno_sack(tp);
27032716
}
@@ -2779,6 +2792,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
27792792
struct inet_connection_sock *icsk = inet_csk(sk);
27802793
struct tcp_sock *tp = tcp_sk(sk);
27812794
int fast_rexmit = 0, flag = *ack_flag;
2795+
bool ece_ack = flag & FLAG_ECE;
27822796
bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
27832797
tcp_force_fast_retransmit(sk));
27842798

@@ -2787,7 +2801,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
27872801

27882802
/* Now state machine starts.
27892803
* A. ECE, hence prohibit cwnd undoing, the reduction is required. */
2790-
if (flag & FLAG_ECE)
2804+
if (ece_ack)
27912805
tp->prior_ssthresh = 0;
27922806

27932807
/* B. In all the states check for reneging SACKs. */
@@ -2828,7 +2842,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
28282842
case TCP_CA_Recovery:
28292843
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
28302844
if (tcp_is_reno(tp))
2831-
tcp_add_reno_sack(sk, num_dupack);
2845+
tcp_add_reno_sack(sk, num_dupack, ece_ack);
28322846
} else {
28332847
if (tcp_try_undo_partial(sk, prior_snd_una))
28342848
return;
@@ -2853,7 +2867,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
28532867
if (tcp_is_reno(tp)) {
28542868
if (flag & FLAG_SND_UNA_ADVANCED)
28552869
tcp_reset_reno_sack(tp);
2856-
tcp_add_reno_sack(sk, num_dupack);
2870+
tcp_add_reno_sack(sk, num_dupack, ece_ack);
28572871
}
28582872

28592873
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
@@ -2877,7 +2891,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
28772891
}
28782892

28792893
/* Otherwise enter Recovery state */
2880-
tcp_enter_recovery(sk, (flag & FLAG_ECE));
2894+
tcp_enter_recovery(sk, ece_ack);
28812895
fast_rexmit = 1;
28822896
}
28832897

@@ -3053,7 +3067,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
30533067
*/
30543068
static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
30553069
u32 prior_snd_una,
3056-
struct tcp_sacktag_state *sack)
3070+
struct tcp_sacktag_state *sack, bool ece_ack)
30573071
{
30583072
const struct inet_connection_sock *icsk = inet_csk(sk);
30593073
u64 first_ackt, last_ackt;
@@ -3078,8 +3092,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
30783092
u8 sacked = scb->sacked;
30793093
u32 acked_pcount;
30803094

3081-
tcp_ack_tstamp(sk, skb, prior_snd_una);
3082-
30833095
/* Determine how many packets and what bytes were acked, tso and else */
30843096
if (after(scb->end_seq, tp->snd_una)) {
30853097
if (tcp_skb_pcount(skb) == 1 ||
@@ -3114,7 +3126,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
31143126
if (sacked & TCPCB_SACKED_ACKED) {
31153127
tp->sacked_out -= acked_pcount;
31163128
} else if (tcp_is_sack(tp)) {
3117-
tp->delivered += acked_pcount;
3129+
tcp_count_delivered(tp, acked_pcount, ece_ack);
31183130
if (!tcp_skb_spurious_retrans(tp, skb))
31193131
tcp_rack_advance(tp, sacked, scb->end_seq,
31203132
tcp_skb_timestamp_us(skb));
@@ -3143,6 +3155,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
31433155
if (!fully_acked)
31443156
break;
31453157

3158+
tcp_ack_tstamp(sk, skb, prior_snd_una);
3159+
31463160
next = skb_rb_next(skb);
31473161
if (unlikely(skb == tp->retransmit_skb_hint))
31483162
tp->retransmit_skb_hint = NULL;
@@ -3191,7 +3205,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
31913205
}
31923206

31933207
if (tcp_is_reno(tp)) {
3194-
tcp_remove_reno_sacks(sk, pkts_acked);
3208+
tcp_remove_reno_sacks(sk, pkts_acked, ece_ack);
31953209

31963210
/* If any of the cumulatively ACKed segments was
31973211
* retransmitted, non-SACK case cannot confirm that
@@ -3558,10 +3572,9 @@ static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag)
35583572

35593573
delivered = tp->delivered - prior_delivered;
35603574
NET_ADD_STATS(net, LINUX_MIB_TCPDELIVERED, delivered);
3561-
if (flag & FLAG_ECE) {
3562-
tp->delivered_ce += delivered;
3575+
if (flag & FLAG_ECE)
35633576
NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, delivered);
3564-
}
3577+
35653578
return delivered;
35663579
}
35673580

@@ -3585,6 +3598,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
35853598

35863599
sack_state.first_sackt = 0;
35873600
sack_state.rate = &rs;
3601+
sack_state.sack_delivered = 0;
35883602

35893603
/* We very likely will need to access rtx queue. */
35903604
prefetch(sk->tcp_rtx_queue.rb_node);
@@ -3660,6 +3674,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
36603674
ack_ev_flags |= CA_ACK_ECE;
36613675
}
36623676

3677+
if (sack_state.sack_delivered)
3678+
tcp_count_delivered(tp, sack_state.sack_delivered,
3679+
flag & FLAG_ECE);
3680+
36633681
if (flag & FLAG_WIN_UPDATE)
36643682
ack_ev_flags |= CA_ACK_WIN_UPDATE;
36653683

@@ -3685,7 +3703,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
36853703
goto no_queue;
36863704

36873705
/* See if we can take anything off of the retransmit queue. */
3688-
flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state);
3706+
flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state,
3707+
flag & FLAG_ECE);
36893708

36903709
tcp_rack_update_reo_wnd(sk, &rs);
36913710

0 commit comments

Comments
 (0)