Skip to content

Commit

Permalink
net: Update the seqcount_t removal from Qdisc.
Browse files Browse the repository at this point in the history
This is an all-in-one patch udpating the seqcount_t removal from Qdisc
to the version recently posted on the list.
   Try to simplify the gnet_stats and remove qdisc->running sequence counter.
   https://lore.kernel.org/all/20211016084910.4029084-1-bigeasy@linutronix.de/

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  • Loading branch information
Sebastian Andrzej Siewior committed Oct 18, 2021
1 parent 3cd6a94 commit 3ed7763
Show file tree
Hide file tree
Showing 8 changed files with 61 additions and 107 deletions.
35 changes: 10 additions & 25 deletions include/net/gen_stats.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,27 +13,14 @@
* If no reads can ever occur parallel to writes (e.g. stack-allocated
* bstats), then the internal stat values can be written to and read
* from directly. Otherwise, use _bstats_set/update() for writes and
* __gnet_stats_copy_basic() for reads.
* gnet_stats_add_basic() for reads.
*/
struct gnet_stats_basic_sync {
u64_stats_t bytes;
u64_stats_t packets;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));

#ifdef CONFIG_LOCKDEP
void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);

#else

static inline void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b)
{
u64_stats_set(&b->bytes, 0);
u64_stats_set(&b->packets, 0);
u64_stats_init(&b->syncp);
}
#endif

struct net_rate_estimator;

struct gnet_dump {
Expand All @@ -50,6 +37,7 @@ struct gnet_dump {
struct tc_stats tc_stats;
};

void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
struct gnet_dump *d, int padattr);

Expand All @@ -60,24 +48,21 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,

int gnet_stats_copy_basic(struct gnet_dump *d,
struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_sync *b,
bool running);
void __gnet_stats_copy_basic(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_sync *b,
bool running);
struct gnet_stats_basic_sync *b, bool running);
void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_sync *b, bool running);
int gnet_stats_copy_basic_hw(struct gnet_dump *d,
struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_sync *b,
bool unning);
struct gnet_stats_basic_sync *b, bool running);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
struct gnet_stats_queue __percpu *cpu_q,
struct gnet_stats_queue *q, __u32 qlen);
void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
const struct gnet_stats_queue __percpu *cpu_q,
const struct gnet_stats_queue *q, __u32 qlen);
void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
const struct gnet_stats_queue __percpu *cpu_q,
const struct gnet_stats_queue *q);
int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);

int gnet_stats_finish_copy(struct gnet_dump *d);
Expand Down
21 changes: 2 additions & 19 deletions include/net/sch_generic.h
Original file line number Diff line number Diff line change
Expand Up @@ -849,22 +849,6 @@ static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
u64_stats_update_end(&bstats->syncp);
}

static inline void bstats_read_add(struct gnet_stats_basic_sync *bstats,
__u64 *bytes, __u64 *packets)
{
u64 t_bytes, t_packets;
unsigned int start;

do {
start = u64_stats_fetch_begin_irq(&bstats->syncp);
t_bytes = u64_stats_read(&bstats->bytes);
t_packets = u64_stats_read(&bstats->packets);
} while (u64_stats_fetch_retry_irq(&bstats->syncp, start));

*bytes = t_bytes;
*packets = t_packets;
}

static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb)
{
Expand Down Expand Up @@ -965,10 +949,9 @@ static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
__u32 *backlog)
{
struct gnet_stats_queue qstats = { 0 };
__u32 len = qdisc_qlen_sum(sch);

__gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
*qlen = qstats.qlen;
gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
*qlen = qstats.qlen + qdisc_qlen(sch);
*backlog = qstats.backlog;
}

Expand Down
2 changes: 1 addition & 1 deletion net/core/gen_estimator.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ static void est_fetch_counters(struct net_rate_estimator *e,
if (e->stats_lock)
spin_lock(e->stats_lock);

__gnet_stats_copy_basic(b, e->cpu_bstats, e->bstats, e->running);
gnet_stats_add_basic(b, e->cpu_bstats, e->bstats, e->running);

if (e->stats_lock)
spin_unlock(e->stats_lock);
Expand Down
48 changes: 20 additions & 28 deletions net/core/gen_stats.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,6 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
}
EXPORT_SYMBOL(gnet_stats_start_copy);

#ifdef CONFIG_LOCKDEP
/* Must not be inlined, due to u64_stats seqcount_t lockdep key */
void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b)
{
Expand All @@ -123,11 +122,9 @@ void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b)
u64_stats_init(&b->syncp);
}
EXPORT_SYMBOL(gnet_stats_basic_sync_init);
#endif

static void
__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu)
static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu)
{
u64 t_bytes = 0, t_packets = 0;
int i;
Expand All @@ -149,20 +146,18 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_sync *bstats,
_bstats_update(bstats, t_bytes, t_packets);
}

void
__gnet_stats_copy_basic(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_sync *b,
bool running)
void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
struct gnet_stats_basic_sync __percpu *cpu,
struct gnet_stats_basic_sync *b, bool running)
{
unsigned int start;
__u64 bytes = 0;
__u64 packets = 0;
u64 bytes = 0;
u64 packets = 0;

WARN_ON_ONCE((cpu || running) && !in_task());

if (cpu) {
__gnet_stats_copy_basic_cpu(bstats, cpu);
gnet_stats_add_basic_cpu(bstats, cpu);
return;
}
do {
Expand All @@ -174,7 +169,7 @@ __gnet_stats_copy_basic(struct gnet_stats_basic_sync *bstats,

_bstats_update(bstats, bytes, packets);
}
EXPORT_SYMBOL(__gnet_stats_copy_basic);
EXPORT_SYMBOL(gnet_stats_add_basic);

static int
___gnet_stats_copy_basic(struct gnet_dump *d,
Expand All @@ -186,7 +181,7 @@ ___gnet_stats_copy_basic(struct gnet_dump *d,
u64 bstats_bytes, bstats_packets;

gnet_stats_basic_sync_init(&bstats);
__gnet_stats_copy_basic(&bstats, cpu, b, running);
gnet_stats_add_basic(&bstats, cpu, b, running);

bstats_bytes = u64_stats_read(&bstats.bytes);
bstats_packets = u64_stats_read(&bstats.packets);
Expand Down Expand Up @@ -311,41 +306,37 @@ gnet_stats_copy_rate_est(struct gnet_dump *d,
}
EXPORT_SYMBOL(gnet_stats_copy_rate_est);

static void
__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
const struct gnet_stats_queue __percpu *q)
static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats,
const struct gnet_stats_queue __percpu *q)
{
int i;

for_each_possible_cpu(i) {
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);

qstats->qlen = 0;
qstats->qlen += qcpu->backlog;
qstats->backlog += qcpu->backlog;
qstats->drops += qcpu->drops;
qstats->requeues += qcpu->requeues;
qstats->overlimits += qcpu->overlimits;
}
}

void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
const struct gnet_stats_queue __percpu *cpu,
const struct gnet_stats_queue *q,
__u32 qlen)
void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
const struct gnet_stats_queue __percpu *cpu,
const struct gnet_stats_queue *q)
{
if (cpu) {
__gnet_stats_copy_queue_cpu(qstats, cpu);
gnet_stats_add_queue_cpu(qstats, cpu);
} else {
qstats->qlen += q->qlen;
qstats->backlog += q->backlog;
qstats->drops += q->drops;
qstats->requeues += q->requeues;
qstats->overlimits += q->overlimits;
}

qstats->qlen += qlen;
}
EXPORT_SYMBOL(__gnet_stats_copy_queue);
EXPORT_SYMBOL(gnet_stats_add_queue);

/**
* gnet_stats_copy_queue - copy queue statistics into statistics TLV
Expand All @@ -368,7 +359,8 @@ gnet_stats_copy_queue(struct gnet_dump *d,
{
struct gnet_stats_queue qstats = {0};

__gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
gnet_stats_add_queue(&qstats, cpu_q, q);
qstats.qlen = qlen;

if (d->compat_tc_stats) {
d->tc_stats.drops = qstats.drops;
Expand Down
2 changes: 1 addition & 1 deletion net/sched/act_api.c
Original file line number Diff line number Diff line change
Expand Up @@ -1174,7 +1174,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
goto errout;

if (gnet_stats_copy_basic(&d, p->cpu_bstats,
&p->tcfa_bstats, false ) < 0 ||
&p->tcfa_bstats, false) < 0 ||
gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
&p->tcfa_bstats_hw, false) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
Expand Down
18 changes: 9 additions & 9 deletions net/sched/sch_htb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1324,10 +1324,12 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
if (p != cl)
continue;

bstats_read_add(&c->bstats_bias, &bytes, &packets);
if (c->level == 0)
bstats_read_add(&c->leaf.q->bstats,
&bytes, &packets);
bytes += u64_stats_read(&c->bstats_bias.bytes);
packets += u64_stats_read(&c->bstats_bias.packets);
if (c->level == 0) {
bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
packets += u64_stats_read(&c->leaf.q->bstats.packets);
}
}
}
_bstats_update(&cl->bstats, bytes, packets);
Expand All @@ -1354,15 +1356,13 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)

if (q->offload) {
if (!cl->level) {
u64 bytes = 0, packets = 0;

if (cl->leaf.q)
cl->bstats = cl->leaf.q->bstats;
else
gnet_stats_basic_sync_init(&cl->bstats);

bstats_read_add(&cl->bstats_bias, &bytes, &packets);
_bstats_update(&cl->bstats, bytes, packets);
_bstats_update(&cl->bstats,
u64_stats_read(&cl->bstats_bias.bytes),
u64_stats_read(&cl->bstats_bias.packets));
} else {
htb_offload_aggregate_stats(q, cl);
}
Expand Down
13 changes: 5 additions & 8 deletions net/sched/sch_mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
struct net_device *dev = qdisc_dev(sch);
struct Qdisc *qdisc;
unsigned int ntx;
__u32 qlen = 0;

sch->q.qlen = 0;
gnet_stats_basic_sync_init(&sch->bstats);
Expand All @@ -145,14 +144,12 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
spin_lock_bh(qdisc_lock(qdisc));

qlen = qdisc_qlen_sum(qdisc);
gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
&qdisc->bstats, false);
gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
&qdisc->qstats);
sch->q.qlen += qdisc_qlen(qdisc);

__gnet_stats_copy_basic(&sch->bstats, qdisc->cpu_bstats,
&qdisc->bstats, false);
__gnet_stats_copy_queue(&sch->qstats,
qdisc->cpu_qstats,
&qdisc->qstats, qlen);
sch->q.qlen += qlen;
spin_unlock_bh(qdisc_lock(qdisc));
}

Expand Down
29 changes: 13 additions & 16 deletions net/sched/sch_mqprio.c
Original file line number Diff line number Diff line change
Expand Up @@ -399,17 +399,14 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
* qdisc totals are added at end.
*/
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
u32 qlen = qdisc_qlen_sum(qdisc);

qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
spin_lock_bh(qdisc_lock(qdisc));

__gnet_stats_copy_basic(&sch->bstats, qdisc->cpu_bstats,
&qdisc->bstats, false);
__gnet_stats_copy_queue(&sch->qstats,
qdisc->cpu_qstats,
&qdisc->qstats, qlen);
sch->q.qlen += qlen;
gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
&qdisc->bstats, false);
gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
&qdisc->qstats);
sch->q.qlen += qdisc_qlen(qdisc);

spin_unlock_bh(qdisc_lock(qdisc));
}
Expand Down Expand Up @@ -501,7 +498,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
{
if (cl >= TC_H_MIN_PRIORITY) {
int i;
__u32 qlen = 0;
__u32 qlen;
struct gnet_stats_queue qstats = {0};
struct gnet_stats_basic_sync bstats;
struct net_device *dev = qdisc_dev(sch);
Expand All @@ -522,15 +519,15 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,

spin_lock_bh(qdisc_lock(qdisc));

qlen = qdisc_qlen_sum(qdisc);
__gnet_stats_copy_basic(&bstats, qdisc->cpu_bstats,
&qdisc->bstats, false);
__gnet_stats_copy_queue(&qstats,
qdisc->cpu_qstats,
&qdisc->qstats,
qlen);
gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
&qdisc->bstats, false);
gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
&qdisc->qstats);
sch->q.qlen += qdisc_qlen(qdisc);

spin_unlock_bh(qdisc_lock(qdisc));
}
qlen = qdisc_qlen(sch) + qstats.qlen;

/* Reclaim root sleeping lock before completing stats */
if (d->lock)
Expand Down

0 comments on commit 3ed7763

Please sign in to comment.