Skip to content

Commit ab6dddd

Browse files
Subbaraya Sundeepdavem330
authored andcommitted
octeontx2-pf: qos send queues management
Current implementation is such that the number of Send queues (SQs) are decided on the device probe which is equal to the number of online cpus. These SQs are allocated and deallocated in interface open and c lose calls respectively. This patch defines new APIs for initializing and deinitializing Send queues dynamically and allocates more number of transmit queues for QOS feature. Signed-off-by: Subbaraya Sundeep <[email protected]> Signed-off-by: Hariprasad Kelam <[email protected]> Signed-off-by: Sunil Kovvuri Goutham <[email protected]> Reviewed-by: Simon Horman <[email protected]> Reviewed-by: Jacob Keller <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 508c58f commit ab6dddd

File tree

10 files changed

+426
-42
lines changed

10 files changed

+426
-42
lines changed

drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1222,6 +1222,11 @@ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
12221222

12231223
for (aura = id; aura < max_id; aura++) {
12241224
aq_req.aura_id = aura;
1225+
1226+
/* Skip if queue is uninitialized */
1227+
if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1228+
continue;
1229+
12251230
seq_printf(m, "======%s : %d=======\n",
12261231
(ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
12271232
aq_req.aura_id);

drivers/net/ethernet/marvell/octeontx2/nic/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
88

99
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
1010
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
11-
otx2_devlink.o
11+
otx2_devlink.o qos_sq.o
1212
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
1313

1414
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o

drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c

Lines changed: 28 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -513,8 +513,8 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
513513
(pfvf->hw.cq_ecount_wait - 1));
514514
}
515515

516-
int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
517-
dma_addr_t *dma)
516+
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
517+
dma_addr_t *dma)
518518
{
519519
u8 *buf;
520520

@@ -532,8 +532,8 @@ int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
532532
return 0;
533533
}
534534

535-
static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
536-
dma_addr_t *dma)
535+
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
536+
dma_addr_t *dma)
537537
{
538538
int ret;
539539

@@ -758,11 +758,16 @@ int otx2_txschq_stop(struct otx2_nic *pfvf)
758758
void otx2_sqb_flush(struct otx2_nic *pfvf)
759759
{
760760
int qidx, sqe_tail, sqe_head;
761+
struct otx2_snd_queue *sq;
761762
u64 incr, *ptr, val;
762763
int timeout = 1000;
763764

764765
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
765-
for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
766+
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
767+
sq = &pfvf->qset.sq[qidx];
768+
if (!sq->sqb_ptrs)
769+
continue;
770+
766771
incr = (u64)qidx << 32;
767772
while (timeout) {
768773
val = otx2_atomic64_add(incr, ptr);
@@ -862,7 +867,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
862867
return otx2_sync_mbox_msg(&pfvf->mbox);
863868
}
864869

865-
static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
870+
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
866871
{
867872
struct otx2_qset *qset = &pfvf->qset;
868873
struct otx2_snd_queue *sq;
@@ -935,9 +940,17 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
935940
cq->cint_idx = qidx - pfvf->hw.rx_queues;
936941
cq->cqe_cnt = qset->sqe_cnt;
937942
} else {
938-
cq->cq_type = CQ_XDP;
939-
cq->cint_idx = qidx - non_xdp_queues;
940-
cq->cqe_cnt = qset->sqe_cnt;
943+
if (pfvf->hw.xdp_queues &&
944+
qidx < non_xdp_queues + pfvf->hw.xdp_queues) {
945+
cq->cq_type = CQ_XDP;
946+
cq->cint_idx = qidx - non_xdp_queues;
947+
cq->cqe_cnt = qset->sqe_cnt;
948+
} else {
949+
cq->cq_type = CQ_QOS;
950+
cq->cint_idx = qidx - non_xdp_queues -
951+
pfvf->hw.xdp_queues;
952+
cq->cqe_cnt = qset->sqe_cnt;
953+
}
941954
}
942955
cq->cqe_size = pfvf->qset.xqe_size;
943956

@@ -1095,7 +1108,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
10951108

10961109
/* Set RQ/SQ/CQ counts */
10971110
nixlf->rq_cnt = pfvf->hw.rx_queues;
1098-
nixlf->sq_cnt = pfvf->hw.non_qos_queues;
1111+
nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf);
10991112
nixlf->cq_cnt = pfvf->qset.cq_cnt;
11001113
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
11011114
nixlf->rss_grps = MAX_RSS_GROUPS;
@@ -1133,7 +1146,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
11331146
int sqb, qidx;
11341147
u64 iova, pa;
11351148

1136-
for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
1149+
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
11371150
sq = &qset->sq[qidx];
11381151
if (!sq->sqb_ptrs)
11391152
continue;
@@ -1201,8 +1214,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
12011214
pfvf->qset.pool = NULL;
12021215
}
12031216

1204-
static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
1205-
int pool_id, int numptrs)
1217+
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
1218+
int pool_id, int numptrs)
12061219
{
12071220
struct npa_aq_enq_req *aq;
12081221
struct otx2_pool *pool;
@@ -1278,8 +1291,8 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
12781291
return 0;
12791292
}
12801293

1281-
static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
1282-
int stack_pages, int numptrs, int buf_size)
1294+
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
1295+
int stack_pages, int numptrs, int buf_size)
12831296
{
12841297
struct npa_aq_enq_req *aq;
12851298
struct otx2_pool *pool;

drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h

Lines changed: 34 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
#include "otx2_txrx.h"
2929
#include "otx2_devlink.h"
3030
#include <rvu_trace.h>
31+
#include "qos.h"
3132

3233
/* IPv4 flag more fragment bit */
3334
#define IPV4_FLAG_MORE 0x20
@@ -190,6 +191,7 @@ struct otx2_hw {
190191
u16 rx_queues;
191192
u16 tx_queues;
192193
u16 xdp_queues;
194+
u16 tc_tx_queues;
193195
u16 non_qos_queues; /* tx queues plus xdp queues */
194196
u16 max_queues;
195197
u16 pool_cnt;
@@ -506,6 +508,8 @@ struct otx2_nic {
506508
u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
507509
bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
508510
#endif
511+
/* qos */
512+
struct otx2_qos qos;
509513

510514
/* napi event count. It is needed for adaptive irq coalescing. */
511515
u32 napi_events;
@@ -750,8 +754,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
750754
/* Alloc pointer from pool/aura */
751755
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
752756
{
753-
u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
754-
NPA_LF_AURA_OP_ALLOCX(0));
757+
u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
755758
u64 incr = (u64)aura | BIT_ULL(63);
756759

757760
return otx2_atomic64_add(incr, ptr);
@@ -893,12 +896,23 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
893896

894897
static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
895898
{
899+
u16 smq;
896900
#ifdef CONFIG_DCB
897901
if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
898902
return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
899903
#endif
904+
/* check if qidx falls under QOS queues */
905+
if (qidx >= pfvf->hw.non_qos_queues)
906+
smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues];
907+
else
908+
smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
900909

901-
return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
910+
return smq;
911+
}
912+
913+
static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf)
914+
{
915+
return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues;
902916
}
903917

904918
/* MSI-X APIs */
@@ -927,17 +941,22 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
927941
int otx2_txsch_alloc(struct otx2_nic *pfvf);
928942
int otx2_txschq_stop(struct otx2_nic *pfvf);
929943
void otx2_sqb_flush(struct otx2_nic *pfvf);
930-
int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
931-
dma_addr_t *dma);
944+
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
945+
dma_addr_t *dma);
932946
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
933947
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
934948
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
935949
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
936950
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
951+
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
937952
int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
938953
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
939954
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
940955
dma_addr_t *dma);
956+
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
957+
int stack_pages, int numptrs, int buf_size);
958+
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
959+
int pool_id, int numptrs);
941960

942961
/* RSS configuration APIs*/
943962
int otx2_rss_init(struct otx2_nic *pfvf);
@@ -1045,4 +1064,14 @@ static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
10451064
{}
10461065
#endif /* CONFIG_MACSEC */
10471066

1067+
/* qos support */
1068+
static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs)
1069+
{
1070+
struct otx2_hw *hw = &pfvf->hw;
1071+
1072+
hw->tc_tx_queues = qos_txqs;
1073+
}
1074+
1075+
u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
1076+
struct net_device *sb_dev);
10481077
#endif /* OTX2_COMMON_H */

drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c

Lines changed: 34 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include "otx2_struct.h"
2424
#include "otx2_ptp.h"
2525
#include "cn10k.h"
26+
#include "qos.h"
2627
#include <rvu_trace.h>
2728

2829
#define DRV_NAME "rvu_nicpf"
@@ -1228,6 +1229,7 @@ static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
12281229
static irqreturn_t otx2_q_intr_handler(int irq, void *data)
12291230
{
12301231
struct otx2_nic *pf = data;
1232+
struct otx2_snd_queue *sq;
12311233
u64 val, *ptr;
12321234
u64 qidx = 0;
12331235

@@ -1257,10 +1259,14 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
12571259
}
12581260

12591261
/* SQ */
1260-
for (qidx = 0; qidx < pf->hw.non_qos_queues; qidx++) {
1262+
for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
12611263
u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
12621264
u8 sq_op_err_code, mnq_err_code, snd_err_code;
12631265

1266+
sq = &pf->qset.sq[qidx];
1267+
if (!sq->sqb_ptrs)
1268+
continue;
1269+
12641270
/* Below debug registers captures first errors corresponding to
12651271
* those registers. We don't have to check against SQ qid as
12661272
* these are fatal errors.
@@ -1383,7 +1389,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
13831389
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
13841390
/* Free SQB pointers */
13851391
otx2_sq_free_sqbs(pf);
1386-
for (qidx = 0; qidx < pf->hw.non_qos_queues; qidx++) {
1392+
for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
13871393
sq = &qset->sq[qidx];
13881394
qmem_free(pf->dev, sq->sqe);
13891395
qmem_free(pf->dev, sq->tso_hdrs);
@@ -1433,7 +1439,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
14331439
* so, aura count = pool count.
14341440
*/
14351441
hw->rqpool_cnt = hw->rx_queues;
1436-
hw->sqpool_cnt = hw->non_qos_queues;
1442+
hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
14371443
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
14381444

14391445
/* Maximum hardware supported transmit length */
@@ -1688,11 +1694,14 @@ int otx2_open(struct net_device *netdev)
16881694

16891695
netif_carrier_off(netdev);
16901696

1691-
pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.non_qos_queues;
16921697
/* RQ and SQs are mapped to different CQs,
16931698
* so find out max CQ IRQs (i.e CINTs) needed.
16941699
*/
1695-
pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
1700+
pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
1701+
pf->hw.tc_tx_queues);
1702+
1703+
pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf);
1704+
16961705
qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
16971706
if (!qset->napi)
16981707
return -ENOMEM;
@@ -1743,6 +1752,11 @@ int otx2_open(struct net_device *netdev)
17431752
else
17441753
cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
17451754

1755+
cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ?
1756+
(qidx + pf->hw.rx_queues +
1757+
pf->hw.non_qos_queues) :
1758+
CINT_INVALID_CQ;
1759+
17461760
cq_poll->dev = (void *)pf;
17471761
cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
17481762
INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
@@ -1947,6 +1961,12 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
19471961
int qidx = skb_get_queue_mapping(skb);
19481962
struct otx2_snd_queue *sq;
19491963
struct netdev_queue *txq;
1964+
int sq_idx;
1965+
1966+
/* XDP SQs are not mapped with TXQs
1967+
* advance qid to derive correct sq mapped with QOS
1968+
*/
1969+
sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx;
19501970

19511971
/* Check for minimum and maximum packet length */
19521972
if (skb->len <= ETH_HLEN ||
@@ -1955,7 +1975,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
19551975
return NETDEV_TX_OK;
19561976
}
19571977

1958-
sq = &pf->qset.sq[qidx];
1978+
sq = &pf->qset.sq[sq_idx];
19591979
txq = netdev_get_tx_queue(netdev, qidx);
19601980

19611981
if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
@@ -1973,8 +1993,8 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
19731993
return NETDEV_TX_OK;
19741994
}
19751995

1976-
static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
1977-
struct net_device *sb_dev)
1996+
u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
1997+
struct net_device *sb_dev)
19781998
{
19791999
#ifdef CONFIG_DCB
19802000
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1996,6 +2016,7 @@ static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
19962016
#endif
19972017
return netdev_pick_tx(netdev, skb, NULL);
19982018
}
2019+
EXPORT_SYMBOL(otx2_select_queue);
19992020

20002021
static netdev_features_t otx2_fix_features(struct net_device *dev,
20012022
netdev_features_t features)
@@ -2712,10 +2733,10 @@ static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
27122733
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
27132734
{
27142735
struct device *dev = &pdev->dev;
2736+
int err, qcount, qos_txqs;
27152737
struct net_device *netdev;
27162738
struct otx2_nic *pf;
27172739
struct otx2_hw *hw;
2718-
int err, qcount;
27192740
int num_vec;
27202741

27212742
err = pcim_enable_device(pdev);
@@ -2740,8 +2761,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
27402761

27412762
/* Set number of queues */
27422763
qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
2764+
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
27432765

2744-
netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
2766+
netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
27452767
if (!netdev) {
27462768
err = -ENOMEM;
27472769
goto err_release_regions;
@@ -2929,6 +2951,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
29292951
goto err_pf_sriov_init;
29302952
#endif
29312953

2954+
otx2_qos_init(pf, qos_txqs);
2955+
29322956
return 0;
29332957

29342958
err_pf_sriov_init:

0 commit comments

Comments
 (0)