Skip to content

Commit 40b215e

Browse files
xemuldavem330
authored andcommitted
tcp: de-bloat a bit with factoring NET_INC_STATS_BH out
There are some places in TCP that select one MIB index to bump snmp statistics like this: if (<something>) NET_INC_STATS_BH(<some_id>); else if (<something_else>) NET_INC_STATS_BH(<some_other_id>); ... else NET_INC_STATS_BH(<default_id>); or in a more tricky but still similar way. On the other hand, this NET_INC_STATS_BH is a camouflaged increment of percpu variable, which is not that small. Factoring those cases out de-bloats 235 bytes on non-preemptible i386 config and drives parts of the code into 80 columns. add/remove: 0/0 grow/shrink: 0/7 up/down: 0/-235 (-235) function old new delta tcp_fastretrans_alert 1437 1424 -13 tcp_dsack_set 137 124 -13 tcp_xmit_retransmit_queue 690 676 -14 tcp_try_undo_recovery 283 265 -18 tcp_sacktag_write_queue 1550 1515 -35 tcp_update_reordering 162 106 -56 tcp_retransmit_timer 990 904 -86 Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent b4653e9 commit 40b215e

File tree

3 files changed

+46
-22
lines changed

3 files changed

+46
-22
lines changed

net/ipv4/tcp_input.c

Lines changed: 32 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -947,17 +947,21 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
947947
{
948948
struct tcp_sock *tp = tcp_sk(sk);
949949
if (metric > tp->reordering) {
950+
int mib_idx;
951+
950952
tp->reordering = min(TCP_MAX_REORDERING, metric);
951953

952954
/* This exciting event is worth to be remembered. 8) */
953955
if (ts)
954-
NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
956+
mib_idx = LINUX_MIB_TCPTSREORDER;
955957
else if (tcp_is_reno(tp))
956-
NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
958+
mib_idx = LINUX_MIB_TCPRENOREORDER;
957959
else if (tcp_is_fack(tp))
958-
NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
960+
mib_idx = LINUX_MIB_TCPFACKREORDER;
959961
else
960-
NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
962+
mib_idx = LINUX_MIB_TCPSACKREORDER;
963+
964+
NET_INC_STATS_BH(mib_idx);
961965
#if FASTRETRANS_DEBUG > 1
962966
printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
963967
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1456,18 +1460,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
14561460
if (!tcp_is_sackblock_valid(tp, dup_sack,
14571461
sp[used_sacks].start_seq,
14581462
sp[used_sacks].end_seq)) {
1463+
int mib_idx;
1464+
14591465
if (dup_sack) {
14601466
if (!tp->undo_marker)
1461-
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO);
1467+
mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
14621468
else
1463-
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD);
1469+
mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
14641470
} else {
14651471
/* Don't count olds caused by ACK reordering */
14661472
if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
14671473
!after(sp[used_sacks].end_seq, tp->snd_una))
14681474
continue;
1469-
NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD);
1475+
mib_idx = LINUX_MIB_TCPSACKDISCARD;
14701476
}
1477+
1478+
NET_INC_STATS_BH(mib_idx);
14711479
if (i == 0)
14721480
first_sack_index = -1;
14731481
continue;
@@ -2380,15 +2388,19 @@ static int tcp_try_undo_recovery(struct sock *sk)
23802388
struct tcp_sock *tp = tcp_sk(sk);
23812389

23822390
if (tcp_may_undo(tp)) {
2391+
int mib_idx;
2392+
23832393
/* Happy end! We did not retransmit anything
23842394
* or our original transmission succeeded.
23852395
*/
23862396
DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
23872397
tcp_undo_cwr(sk, 1);
23882398
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2389-
NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
2399+
mib_idx = LINUX_MIB_TCPLOSSUNDO;
23902400
else
2391-
NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
2401+
mib_idx = LINUX_MIB_TCPFULLUNDO;
2402+
2403+
NET_INC_STATS_BH(mib_idx);
23922404
tp->undo_marker = 0;
23932405
}
23942406
if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2560,7 +2572,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
25602572
int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
25612573
int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
25622574
(tcp_fackets_out(tp) > tp->reordering));
2563-
int fast_rexmit = 0;
2575+
int fast_rexmit = 0, mib_idx;
25642576

25652577
if (WARN_ON(!tp->packets_out && tp->sacked_out))
25662578
tp->sacked_out = 0;
@@ -2683,9 +2695,11 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
26832695
/* Otherwise enter Recovery state */
26842696

26852697
if (tcp_is_reno(tp))
2686-
NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
2698+
mib_idx = LINUX_MIB_TCPRENORECOVERY;
26872699
else
2688-
NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
2700+
mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2701+
2702+
NET_INC_STATS_BH(mib_idx);
26892703

26902704
tp->high_seq = tp->snd_nxt;
26912705
tp->prior_ssthresh = 0;
@@ -3700,10 +3714,14 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
37003714
static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
37013715
{
37023716
if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
3717+
int mib_idx;
3718+
37033719
if (before(seq, tp->rcv_nxt))
3704-
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
3720+
mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
37053721
else
3706-
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
3722+
mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
3723+
3724+
NET_INC_STATS_BH(mib_idx);
37073725

37083726
tp->rx_opt.dsack = 1;
37093727
tp->duplicate_sack[0].start_seq = seq;

net/ipv4/tcp_output.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1985,14 +1985,17 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
19851985

19861986
if (sacked & TCPCB_LOST) {
19871987
if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
1988+
int mib_idx;
1989+
19881990
if (tcp_retransmit_skb(sk, skb)) {
19891991
tp->retransmit_skb_hint = NULL;
19901992
return;
19911993
}
19921994
if (icsk->icsk_ca_state != TCP_CA_Loss)
1993-
NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
1995+
mib_idx = LINUX_MIB_TCPFASTRETRANS;
19941996
else
1995-
NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
1997+
mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
1998+
NET_INC_STATS_BH(mib_idx);
19961999

19972000
if (skb == tcp_write_queue_head(sk))
19982001
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,

net/ipv4/tcp_timer.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -326,24 +326,27 @@ static void tcp_retransmit_timer(struct sock *sk)
326326
goto out;
327327

328328
if (icsk->icsk_retransmits == 0) {
329+
int mib_idx;
330+
329331
if (icsk->icsk_ca_state == TCP_CA_Disorder ||
330332
icsk->icsk_ca_state == TCP_CA_Recovery) {
331333
if (tcp_is_sack(tp)) {
332334
if (icsk->icsk_ca_state == TCP_CA_Recovery)
333-
NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
335+
mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
334336
else
335-
NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
337+
mib_idx = LINUX_MIB_TCPSACKFAILURES;
336338
} else {
337339
if (icsk->icsk_ca_state == TCP_CA_Recovery)
338-
NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
340+
mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
339341
else
340-
NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
342+
mib_idx = LINUX_MIB_TCPRENOFAILURES;
341343
}
342344
} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
343-
NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
345+
mib_idx = LINUX_MIB_TCPLOSSFAILURES;
344346
} else {
345-
NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
347+
mib_idx = LINUX_MIB_TCPTIMEOUTS;
346348
}
349+
NET_INC_STATS_BH(mib_idx);
347350
}
348351

349352
if (tcp_use_frto(sk)) {

0 commit comments

Comments
 (0)