@@ -388,32 +388,50 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
388388 struct nlattr * nla = (struct nlattr * )skb_tail_pointer (skb );
389389 struct tc_mqprio_qopt opt = { 0 };
390390 struct Qdisc * qdisc ;
391- unsigned int i ;
391+ unsigned int ntx , tc ;
392392
393393 sch -> q .qlen = 0 ;
394394 memset (& sch -> bstats , 0 , sizeof (sch -> bstats ));
395395 memset (& sch -> qstats , 0 , sizeof (sch -> qstats ));
396396
397- for (i = 0 ; i < dev -> num_tx_queues ; i ++ ) {
398- qdisc = rtnl_dereference (netdev_get_tx_queue (dev , i )-> qdisc );
397+ /* MQ supports lockless qdiscs. However, statistics accounting needs
398+ * to account for all, none, or a mix of locked and unlocked child
399+ * qdiscs. Percpu stats are added to counters in-band and locking
400+ * qdisc totals are added at end.
401+ */
402+ for (ntx = 0 ; ntx < dev -> num_tx_queues ; ntx ++ ) {
403+ qdisc = netdev_get_tx_queue (dev , ntx )-> qdisc_sleeping ;
399404 spin_lock_bh (qdisc_lock (qdisc ));
400- sch -> q .qlen += qdisc -> q .qlen ;
401- sch -> bstats .bytes += qdisc -> bstats .bytes ;
402- sch -> bstats .packets += qdisc -> bstats .packets ;
403- sch -> qstats .backlog += qdisc -> qstats .backlog ;
404- sch -> qstats .drops += qdisc -> qstats .drops ;
405- sch -> qstats .requeues += qdisc -> qstats .requeues ;
406- sch -> qstats .overlimits += qdisc -> qstats .overlimits ;
405+
406+ if (qdisc_is_percpu_stats (qdisc )) {
407+ __u32 qlen = qdisc_qlen_sum (qdisc );
408+
409+ __gnet_stats_copy_basic (NULL , & sch -> bstats ,
410+ qdisc -> cpu_bstats ,
411+ & qdisc -> bstats );
412+ __gnet_stats_copy_queue (& sch -> qstats ,
413+ qdisc -> cpu_qstats ,
414+ & qdisc -> qstats , qlen );
415+ } else {
416+ sch -> q .qlen += qdisc -> q .qlen ;
417+ sch -> bstats .bytes += qdisc -> bstats .bytes ;
418+ sch -> bstats .packets += qdisc -> bstats .packets ;
419+ sch -> qstats .backlog += qdisc -> qstats .backlog ;
420+ sch -> qstats .drops += qdisc -> qstats .drops ;
421+ sch -> qstats .requeues += qdisc -> qstats .requeues ;
422+ sch -> qstats .overlimits += qdisc -> qstats .overlimits ;
423+ }
424+
407425 spin_unlock_bh (qdisc_lock (qdisc ));
408426 }
409427
410428 opt .num_tc = netdev_get_num_tc (dev );
411429 memcpy (opt .prio_tc_map , dev -> prio_tc_map , sizeof (opt .prio_tc_map ));
412430 opt .hw = priv -> hw_offload ;
413431
414- for (i = 0 ; i < netdev_get_num_tc (dev ); i ++ ) {
415- opt .count [i ] = dev -> tc_to_txq [i ].count ;
416- opt .offset [i ] = dev -> tc_to_txq [i ].offset ;
432+ for (tc = 0 ; tc < netdev_get_num_tc (dev ); tc ++ ) {
433+ opt .count [tc ] = dev -> tc_to_txq [tc ].count ;
434+ opt .offset [tc ] = dev -> tc_to_txq [tc ].offset ;
417435 }
418436
419437 if (nla_put (skb , TCA_OPTIONS , NLA_ALIGN (sizeof (opt )), & opt ))
@@ -495,7 +513,6 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
495513 if (cl >= TC_H_MIN_PRIORITY ) {
496514 int i ;
497515 __u32 qlen = 0 ;
498- struct Qdisc * qdisc ;
499516 struct gnet_stats_queue qstats = {0 };
500517 struct gnet_stats_basic_packed bstats = {0 };
501518 struct net_device * dev = qdisc_dev (sch );
@@ -511,18 +528,26 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
511528
512529 for (i = tc .offset ; i < tc .offset + tc .count ; i ++ ) {
513530 struct netdev_queue * q = netdev_get_tx_queue (dev , i );
531+ struct Qdisc * qdisc = rtnl_dereference (q -> qdisc );
532+ struct gnet_stats_basic_cpu __percpu * cpu_bstats = NULL ;
533+ struct gnet_stats_queue __percpu * cpu_qstats = NULL ;
514534
515- qdisc = rtnl_dereference (q -> qdisc );
516535 spin_lock_bh (qdisc_lock (qdisc ));
517- qlen += qdisc -> q .qlen ;
518- bstats .bytes += qdisc -> bstats .bytes ;
519- bstats .packets += qdisc -> bstats .packets ;
520- qstats .backlog += qdisc -> qstats .backlog ;
521- qstats .drops += qdisc -> qstats .drops ;
522- qstats .requeues += qdisc -> qstats .requeues ;
523- qstats .overlimits += qdisc -> qstats .overlimits ;
536+ if (qdisc_is_percpu_stats (qdisc )) {
537+ cpu_bstats = qdisc -> cpu_bstats ;
538+ cpu_qstats = qdisc -> cpu_qstats ;
539+ }
540+
541+ qlen = qdisc_qlen_sum (qdisc );
542+ __gnet_stats_copy_basic (NULL , & sch -> bstats ,
543+ cpu_bstats , & qdisc -> bstats );
544+ __gnet_stats_copy_queue (& sch -> qstats ,
545+ cpu_qstats ,
546+ & qdisc -> qstats ,
547+ qlen );
524548 spin_unlock_bh (qdisc_lock (qdisc ));
525549 }
550+
526551 /* Reclaim root sleeping lock before completing stats */
527552 if (d -> lock )
528553 spin_lock_bh (d -> lock );
0 commit comments