Skip to content

Commit 99c969a

Browse files
sumang-mrvlPaolo Abeni
authored andcommitted
octeontx2-pf: Add egress PFC support
As of now all transmit queues transmit packets out of same scheduler queue hierarchy. Due to this PFC frames sent by peer are not handled properly, either all transmit queues are backpressured or none. To fix this when user enables PFC for a given priority map relavant transmit queue to a different scheduler queue hierarcy, so that backpressure is applied only to the traffic egressing out of that TXQ. Signed-off-by: Suman Ghosh <sumang@marvell.com> Link: https://lore.kernel.org/r/20220830120304.158060-1-sumang@marvell.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
1 parent a102c89 commit 99c969a

File tree

5 files changed

+427
-17
lines changed

5 files changed

+427
-17
lines changed

drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
8686
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
8787
aq->sq.cq_ena = 1;
8888
aq->sq.ena = 1;
89-
/* Only one SMQ is allocated, map all SQ's to that SMQ */
90-
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
89+
aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
9190
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
9291
aq->sq.default_chan = pfvf->hw.tx_chan_base;
9392
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */

drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c

Lines changed: 47 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -586,8 +586,9 @@ void otx2_get_mac_from_af(struct net_device *netdev)
586586
}
587587
EXPORT_SYMBOL(otx2_get_mac_from_af);
588588

589-
int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
589+
int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc)
590590
{
591+
u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC];
591592
struct otx2_hw *hw = &pfvf->hw;
592593
struct nix_txschq_config *req;
593594
u64 schq, parent;
@@ -602,7 +603,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
602603
req->lvl = lvl;
603604
req->num_regs = 1;
604605

605-
schq = hw->txschq_list[lvl][0];
606+
schq_list = hw->txschq_list;
607+
#ifdef CONFIG_DCB
608+
if (txschq_for_pfc)
609+
schq_list = pfvf->pfc_schq_list;
610+
#endif
611+
612+
schq = schq_list[lvl][prio];
606613
/* Set topology e.t.c configuration */
607614
if (lvl == NIX_TXSCH_LVL_SMQ) {
608615
req->reg[0] = NIX_AF_SMQX_CFG(schq);
@@ -611,22 +618,22 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
611618
(0x2ULL << 36);
612619
req->num_regs++;
613620
/* MDQ config */
614-
parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
621+
parent = schq_list[NIX_TXSCH_LVL_TL4][prio];
615622
req->reg[1] = NIX_AF_MDQX_PARENT(schq);
616623
req->regval[1] = parent << 16;
617624
req->num_regs++;
618625
/* Set DWRR quantum */
619626
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
620627
req->regval[2] = dwrr_val;
621628
} else if (lvl == NIX_TXSCH_LVL_TL4) {
622-
parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
629+
parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
623630
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
624631
req->regval[0] = parent << 16;
625632
req->num_regs++;
626633
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
627634
req->regval[1] = dwrr_val;
628635
} else if (lvl == NIX_TXSCH_LVL_TL3) {
629-
parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
636+
parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
630637
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
631638
req->regval[0] = parent << 16;
632639
req->num_regs++;
@@ -635,11 +642,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
635642
if (lvl == hw->txschq_link_cfg_lvl) {
636643
req->num_regs++;
637644
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
638-
/* Enable this queue and backpressure */
639-
req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
645+
/* Enable this queue and backpressure
646+
* and set relative channel
647+
*/
648+
req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
640649
}
641650
} else if (lvl == NIX_TXSCH_LVL_TL2) {
642-
parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
651+
parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
643652
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
644653
req->regval[0] = parent << 16;
645654

@@ -650,8 +659,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
650659
if (lvl == hw->txschq_link_cfg_lvl) {
651660
req->num_regs++;
652661
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
653-
/* Enable this queue and backpressure */
654-
req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
662+
/* Enable this queue and backpressure
663+
* and set relative channel
664+
*/
665+
req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
655666
}
656667
} else if (lvl == NIX_TXSCH_LVL_TL1) {
657668
/* Default config for TL1.
@@ -676,6 +687,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
676687

677688
return otx2_sync_mbox_msg(&pfvf->mbox);
678689
}
690+
EXPORT_SYMBOL(otx2_txschq_config);
691+
692+
int otx2_smq_flush(struct otx2_nic *pfvf, int smq)
693+
{
694+
struct nix_txschq_config *req;
695+
int rc;
696+
697+
mutex_lock(&pfvf->mbox.lock);
698+
699+
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
700+
if (!req) {
701+
mutex_unlock(&pfvf->mbox.lock);
702+
return -ENOMEM;
703+
}
704+
705+
req->lvl = NIX_TXSCH_LVL_SMQ;
706+
req->reg[0] = NIX_AF_SMQX_CFG(smq);
707+
req->regval[0] |= BIT_ULL(49);
708+
req->num_regs++;
709+
710+
rc = otx2_sync_mbox_msg(&pfvf->mbox);
711+
mutex_unlock(&pfvf->mbox.lock);
712+
return rc;
713+
}
714+
EXPORT_SYMBOL(otx2_smq_flush);
679715

680716
int otx2_txsch_alloc(struct otx2_nic *pfvf)
681717
{
@@ -806,8 +842,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
806842
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
807843
aq->sq.cq_ena = 1;
808844
aq->sq.ena = 1;
809-
/* Only one SMQ is allocated, map all SQ's to that SMQ */
810-
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
845+
aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
811846
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
812847
aq->sq.default_chan = pfvf->hw.tx_chan_base;
813848
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */

drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,11 @@
4040

4141
#define NAME_SIZE 32
4242

43+
#ifdef CONFIG_DCB
44+
/* Max priority supported for PFC */
45+
#define NIX_PF_PFC_PRIO_MAX 8
46+
#endif
47+
4348
enum arua_mapped_qtypes {
4449
AURA_NIX_RQ,
4550
AURA_NIX_SQ,
@@ -196,7 +201,7 @@ struct otx2_hw {
196201

197202
/* NIX */
198203
u8 txschq_link_cfg_lvl;
199-
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
204+
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
200205
u16 matchall_ipolicer;
201206
u32 dwrr_mtu;
202207

@@ -415,6 +420,8 @@ struct otx2_nic {
415420
/* PFC */
416421
u8 pfc_en;
417422
u8 *queue_to_pfc_map;
423+
u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
424+
bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
418425
#endif
419426

420427
/* napi event count. It is needed for adaptive irq coalescing. */
@@ -785,6 +792,16 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
785792
dir, DMA_ATTR_SKIP_CPU_SYNC);
786793
}
787794

795+
static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
796+
{
797+
#ifdef CONFIG_DCB
798+
if (pfvf->pfc_alloc_status[qidx])
799+
return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
800+
#endif
801+
802+
return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
803+
}
804+
788805
/* MSI-X APIs */
789806
void otx2_free_cints(struct otx2_nic *pfvf, int n);
790807
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
@@ -807,7 +824,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
807824
void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
808825
int otx2_config_nix(struct otx2_nic *pfvf);
809826
int otx2_config_nix_queues(struct otx2_nic *pfvf);
810-
int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
827+
int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
811828
int otx2_txsch_alloc(struct otx2_nic *pfvf);
812829
int otx2_txschq_stop(struct otx2_nic *pfvf);
813830
void otx2_sqb_flush(struct otx2_nic *pfvf);
@@ -888,6 +905,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
888905
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
889906
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
890907
netdev_features_t features);
908+
int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
909+
891910
/* tc support */
892911
int otx2_init_tc(struct otx2_nic *nic);
893912
void otx2_shutdown_tc(struct otx2_nic *nic);
@@ -907,5 +926,10 @@ void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
907926
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
908927
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
909928
int otx2_dcbnl_set_ops(struct net_device *dev);
929+
/* PFC support */
930+
int otx2_pfc_txschq_config(struct otx2_nic *pfvf);
931+
int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
932+
int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
933+
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
910934
#endif
911935
#endif /* OTX2_COMMON_H */

0 commit comments

Comments
 (0)