Skip to content

Commit 50dc9a8

Browse files
a-darwishdavem330
authored andcommitted
net: sched: Merge Qdisc::bstats and Qdisc::cpu_bstats data types
The only factor differentiating per-CPU bstats data type (struct gnet_stats_basic_cpu) from the packed non-per-CPU one (struct gnet_stats_basic_packed) was a u64_stats sync point inside the former. The two data types are now equivalent: earlier commits added a u64_stats sync point to the latter. Combine both data types into "struct gnet_stats_basic_sync". This eliminates redundancy and simplifies the bstats read/write APIs. Use u64_stats_t for bstats "packets" and "bytes" data types. On 64-bit architectures, u64_stats sync points do not use sequence counter protection. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent f56940d commit 50dc9a8

30 files changed

+155
-160
lines changed

drivers/net/ethernet/netronome/nfp/abm/qdisc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
458458
static void
459459
nfp_abm_stats_calculate(struct nfp_alink_stats *new,
460460
struct nfp_alink_stats *old,
461-
struct gnet_stats_basic_packed *bstats,
461+
struct gnet_stats_basic_sync *bstats,
462462
struct gnet_stats_queue *qstats)
463463
{
464464
_bstats_update(bstats, new->tx_bytes - old->tx_bytes,

include/net/act_api.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,13 +30,13 @@ struct tc_action {
3030
atomic_t tcfa_bindcnt;
3131
int tcfa_action;
3232
struct tcf_t tcfa_tm;
33-
struct gnet_stats_basic_packed tcfa_bstats;
34-
struct gnet_stats_basic_packed tcfa_bstats_hw;
33+
struct gnet_stats_basic_sync tcfa_bstats;
34+
struct gnet_stats_basic_sync tcfa_bstats_hw;
3535
struct gnet_stats_queue tcfa_qstats;
3636
struct net_rate_estimator __rcu *tcfa_rate_est;
3737
spinlock_t tcfa_lock;
38-
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
39-
struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
38+
struct gnet_stats_basic_sync __percpu *cpu_bstats;
39+
struct gnet_stats_basic_sync __percpu *cpu_bstats_hw;
4040
struct gnet_stats_queue __percpu *cpu_qstats;
4141
struct tc_cookie __rcu *act_cookie;
4242
struct tcf_chain __rcu *goto_chain;
@@ -206,7 +206,7 @@ static inline void tcf_action_update_bstats(struct tc_action *a,
206206
struct sk_buff *skb)
207207
{
208208
if (likely(a->cpu_bstats)) {
209-
bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
209+
bstats_update(this_cpu_ptr(a->cpu_bstats), skb);
210210
return;
211211
}
212212
spin_lock(&a->tcfa_lock);

include/net/gen_stats.h

Lines changed: 23 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,17 @@
77
#include <linux/rtnetlink.h>
88
#include <linux/pkt_sched.h>
99

10-
/* Note: this used to be in include/uapi/linux/gen_stats.h */
11-
struct gnet_stats_basic_packed {
12-
__u64 bytes;
13-
__u64 packets;
14-
struct u64_stats_sync syncp;
15-
};
16-
17-
struct gnet_stats_basic_cpu {
18-
struct gnet_stats_basic_packed bstats;
10+
/* Throughput stats.
11+
* Must be initialized beforehand with gnet_stats_basic_sync_init().
12+
*
13+
* If no reads can ever occur parallel to writes (e.g. stack-allocated
14+
* bstats), then the internal stat values can be written to and read
15+
* from directly. Otherwise, use _bstats_set/update() for writes and
16+
* gnet_stats_add_basic() for reads.
17+
*/
18+
struct gnet_stats_basic_sync {
19+
u64_stats_t bytes;
20+
u64_stats_t packets;
1921
struct u64_stats_sync syncp;
2022
} __aligned(2 * sizeof(u64));
2123

@@ -35,7 +37,7 @@ struct gnet_dump {
3537
struct tc_stats tc_stats;
3638
};
3739

38-
void gnet_stats_basic_packed_init(struct gnet_stats_basic_packed *b);
40+
void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
3941
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
4042
struct gnet_dump *d, int padattr);
4143

@@ -46,16 +48,16 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
4648

4749
int gnet_stats_copy_basic(const seqcount_t *running,
4850
struct gnet_dump *d,
49-
struct gnet_stats_basic_cpu __percpu *cpu,
50-
struct gnet_stats_basic_packed *b);
51+
struct gnet_stats_basic_sync __percpu *cpu,
52+
struct gnet_stats_basic_sync *b);
5153
void gnet_stats_add_basic(const seqcount_t *running,
52-
struct gnet_stats_basic_packed *bstats,
53-
struct gnet_stats_basic_cpu __percpu *cpu,
54-
struct gnet_stats_basic_packed *b);
54+
struct gnet_stats_basic_sync *bstats,
55+
struct gnet_stats_basic_sync __percpu *cpu,
56+
struct gnet_stats_basic_sync *b);
5557
int gnet_stats_copy_basic_hw(const seqcount_t *running,
5658
struct gnet_dump *d,
57-
struct gnet_stats_basic_cpu __percpu *cpu,
58-
struct gnet_stats_basic_packed *b);
59+
struct gnet_stats_basic_sync __percpu *cpu,
60+
struct gnet_stats_basic_sync *b);
5961
int gnet_stats_copy_rate_est(struct gnet_dump *d,
6062
struct net_rate_estimator __rcu **ptr);
6163
int gnet_stats_copy_queue(struct gnet_dump *d,
@@ -68,14 +70,14 @@ int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
6870

6971
int gnet_stats_finish_copy(struct gnet_dump *d);
7072

71-
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
72-
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
73+
int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
74+
struct gnet_stats_basic_sync __percpu *cpu_bstats,
7375
struct net_rate_estimator __rcu **rate_est,
7476
spinlock_t *lock,
7577
seqcount_t *running, struct nlattr *opt);
7678
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
77-
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
78-
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
79+
int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
80+
struct gnet_stats_basic_sync __percpu *cpu_bstats,
7981
struct net_rate_estimator __rcu **ptr,
8082
spinlock_t *lock,
8183
seqcount_t *running, struct nlattr *opt);

include/net/netfilter/xt_rateest.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
struct xt_rateest {
88
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
9-
struct gnet_stats_basic_packed bstats;
9+
struct gnet_stats_basic_sync bstats;
1010
spinlock_t lock;
1111

1212

include/net/pkt_cls.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -765,7 +765,7 @@ struct tc_cookie {
765765
};
766766

767767
struct tc_qopt_offload_stats {
768-
struct gnet_stats_basic_packed *bstats;
768+
struct gnet_stats_basic_sync *bstats;
769769
struct gnet_stats_queue *qstats;
770770
};
771771

@@ -885,7 +885,7 @@ struct tc_gred_qopt_offload_params {
885885
};
886886

887887
struct tc_gred_qopt_offload_stats {
888-
struct gnet_stats_basic_packed bstats[MAX_DPs];
888+
struct gnet_stats_basic_sync bstats[MAX_DPs];
889889
struct gnet_stats_queue qstats[MAX_DPs];
890890
struct red_stats *xstats[MAX_DPs];
891891
};

include/net/sch_generic.h

Lines changed: 9 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ struct Qdisc {
9797
struct netdev_queue *dev_queue;
9898

9999
struct net_rate_estimator __rcu *rate_est;
100-
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
100+
struct gnet_stats_basic_sync __percpu *cpu_bstats;
101101
struct gnet_stats_queue __percpu *cpu_qstats;
102102
int pad;
103103
refcount_t refcnt;
@@ -107,7 +107,7 @@ struct Qdisc {
107107
*/
108108
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
109109
struct qdisc_skb_head q;
110-
struct gnet_stats_basic_packed bstats;
110+
struct gnet_stats_basic_sync bstats;
111111
seqcount_t running;
112112
struct gnet_stats_queue qstats;
113113
unsigned long state;
@@ -849,43 +849,27 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
849849
return sch->enqueue(skb, sch, to_free);
850850
}
851851

852-
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
852+
static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
853853
__u64 bytes, __u32 packets)
854854
{
855855
u64_stats_update_begin(&bstats->syncp);
856-
bstats->bytes += bytes;
857-
bstats->packets += packets;
856+
u64_stats_add(&bstats->bytes, bytes);
857+
u64_stats_add(&bstats->packets, packets);
858858
u64_stats_update_end(&bstats->syncp);
859859
}
860860

861-
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
861+
static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
862862
const struct sk_buff *skb)
863863
{
864864
_bstats_update(bstats,
865865
qdisc_pkt_len(skb),
866866
skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
867867
}
868868

869-
static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
870-
__u64 bytes, __u32 packets)
871-
{
872-
u64_stats_update_begin(&bstats->syncp);
873-
_bstats_update(&bstats->bstats, bytes, packets);
874-
u64_stats_update_end(&bstats->syncp);
875-
}
876-
877-
static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
878-
const struct sk_buff *skb)
879-
{
880-
u64_stats_update_begin(&bstats->syncp);
881-
bstats_update(&bstats->bstats, skb);
882-
u64_stats_update_end(&bstats->syncp);
883-
}
884-
885869
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
886870
const struct sk_buff *skb)
887871
{
888-
bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
872+
bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
889873
}
890874

891875
static inline void qdisc_bstats_update(struct Qdisc *sch,
@@ -1317,15 +1301,15 @@ void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
13171301
struct mini_Qdisc {
13181302
struct tcf_proto *filter_list;
13191303
struct tcf_block *block;
1320-
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
1304+
struct gnet_stats_basic_sync __percpu *cpu_bstats;
13211305
struct gnet_stats_queue __percpu *cpu_qstats;
13221306
struct rcu_head rcu;
13231307
};
13241308

13251309
static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
13261310
const struct sk_buff *skb)
13271311
{
1328-
bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
1312+
bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
13291313
}
13301314

13311315
static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)

net/core/gen_estimator.c

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,10 @@
4040
*/
4141

4242
struct net_rate_estimator {
43-
struct gnet_stats_basic_packed *bstats;
43+
struct gnet_stats_basic_sync *bstats;
4444
spinlock_t *stats_lock;
4545
seqcount_t *running;
46-
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
46+
struct gnet_stats_basic_sync __percpu *cpu_bstats;
4747
u8 ewma_log;
4848
u8 intvl_log; /* period : (250ms << intvl_log) */
4949

@@ -60,9 +60,9 @@ struct net_rate_estimator {
6060
};
6161

6262
static void est_fetch_counters(struct net_rate_estimator *e,
63-
struct gnet_stats_basic_packed *b)
63+
struct gnet_stats_basic_sync *b)
6464
{
65-
gnet_stats_basic_packed_init(b);
65+
gnet_stats_basic_sync_init(b);
6666
if (e->stats_lock)
6767
spin_lock(e->stats_lock);
6868

@@ -76,23 +76,27 @@ static void est_fetch_counters(struct net_rate_estimator *e,
7676
static void est_timer(struct timer_list *t)
7777
{
7878
struct net_rate_estimator *est = from_timer(est, t, timer);
79-
struct gnet_stats_basic_packed b;
79+
struct gnet_stats_basic_sync b;
80+
u64 b_bytes, b_packets;
8081
u64 rate, brate;
8182

8283
est_fetch_counters(est, &b);
83-
brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
84+
b_bytes = u64_stats_read(&b.bytes);
85+
b_packets = u64_stats_read(&b.packets);
86+
87+
brate = (b_bytes - est->last_bytes) << (10 - est->intvl_log);
8488
brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
8589

86-
rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
90+
rate = (b_packets - est->last_packets) << (10 - est->intvl_log);
8791
rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
8892

8993
write_seqcount_begin(&est->seq);
9094
est->avbps += brate;
9195
est->avpps += rate;
9296
write_seqcount_end(&est->seq);
9397

94-
est->last_bytes = b.bytes;
95-
est->last_packets = b.packets;
98+
est->last_bytes = b_bytes;
99+
est->last_packets = b_packets;
96100

97101
est->next_jiffies += ((HZ/4) << est->intvl_log);
98102

@@ -121,16 +125,16 @@ static void est_timer(struct timer_list *t)
121125
* Returns 0 on success or a negative error code.
122126
*
123127
*/
124-
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
125-
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
128+
int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
129+
struct gnet_stats_basic_sync __percpu *cpu_bstats,
126130
struct net_rate_estimator __rcu **rate_est,
127131
spinlock_t *lock,
128132
seqcount_t *running,
129133
struct nlattr *opt)
130134
{
131135
struct gnet_estimator *parm = nla_data(opt);
132136
struct net_rate_estimator *old, *est;
133-
struct gnet_stats_basic_packed b;
137+
struct gnet_stats_basic_sync b;
134138
int intvl_log;
135139

136140
if (nla_len(opt) < sizeof(*parm))
@@ -164,8 +168,8 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
164168
est_fetch_counters(est, &b);
165169
if (lock)
166170
local_bh_enable();
167-
est->last_bytes = b.bytes;
168-
est->last_packets = b.packets;
171+
est->last_bytes = u64_stats_read(&b.bytes);
172+
est->last_packets = u64_stats_read(&b.packets);
169173

170174
if (lock)
171175
spin_lock_bh(lock);
@@ -222,8 +226,8 @@ EXPORT_SYMBOL(gen_kill_estimator);
222226
*
223227
* Returns 0 on success or a negative error code.
224228
*/
225-
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
226-
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
229+
int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
230+
struct gnet_stats_basic_sync __percpu *cpu_bstats,
227231
struct net_rate_estimator __rcu **rate_est,
228232
spinlock_t *lock,
229233
seqcount_t *running, struct nlattr *opt)

0 commit comments

Comments
 (0)