Skip to content

Commit d695964

Browse files
IronShendavem330
authored andcommitted
net: hns3: Add flow director initialization
Flow director is a new feature supported by hardware with revision 0x21. This patch adds flow direcor initialization for each PF. It queries flow director mode and tcam resource from firmware, selects tuples used for input key. Signed-off-by: Jian Shen <[email protected]> Signed-off-by: Peng Li <[email protected]> Signed-off-by: Salil Mehta <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 26cf48a commit d695964

File tree

5 files changed

+349
-0
lines changed

5 files changed

+349
-0
lines changed

drivers/net/ethernet/hisilicon/hns3/hnae3.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@
5151
#define HNAE3_KNIC_CLIENT_INITED_B 0x3
5252
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
5353
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
54+
#define HNAE3_DEV_SUPPORT_FD_B 0x6
5455

5556
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
5657
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -61,6 +62,9 @@
6162
#define hnae3_dev_dcb_supported(hdev) \
6263
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
6364

65+
#define hnae3_dev_fd_supported(hdev) \
66+
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
67+
6468
#define ring_ptr_move_fw(ring, p) \
6569
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
6670
#define ring_ptr_move_bw(ring, p) \

drivers/net/ethernet/hisilicon/hns3/hns3_enet.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1622,6 +1622,13 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
16221622
pci_disable_sriov(pdev);
16231623
}
16241624

1625+
static void hns3_get_dev_capability(struct pci_dev *pdev,
1626+
struct hnae3_ae_dev *ae_dev)
1627+
{
1628+
if (pdev->revision >= 0x21)
1629+
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1630+
}
1631+
16251632
/* hns3_probe - Device initialization routine
16261633
* @pdev: PCI device information struct
16271634
* @ent: entry in hns3_pci_tbl
@@ -1647,6 +1654,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
16471654
ae_dev->pdev = pdev;
16481655
ae_dev->flag = ent->driver_data;
16491656
ae_dev->dev_type = HNAE3_DEV_KNIC;
1657+
hns3_get_dev_capability(pdev, ae_dev);
16501658
pci_set_drvdata(pdev, ae_dev);
16511659

16521660
hnae3_register_ae_dev(ae_dev);

drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,11 @@ enum hclge_opcode_type {
190190
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
191191
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
192192

193+
/* Flow Director commands */
194+
HCLGE_OPC_FD_MODE_CTRL = 0x1200,
195+
HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
196+
HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
197+
193198
/* MDIO command */
194199
HCLGE_OPC_MDIO_CONFIG = 0x1900,
195200

@@ -819,6 +824,33 @@ struct hclge_set_led_state_cmd {
819824
u8 rsv2[20];
820825
};
821826

827+
struct hclge_get_fd_mode_cmd {
828+
u8 mode;
829+
u8 enable;
830+
u8 rsv[22];
831+
};
832+
833+
struct hclge_get_fd_allocation_cmd {
834+
__le32 stage1_entry_num;
835+
__le32 stage2_entry_num;
836+
__le16 stage1_counter_num;
837+
__le16 stage2_counter_num;
838+
u8 rsv[12];
839+
};
840+
841+
struct hclge_set_fd_key_config_cmd {
842+
u8 stage;
843+
u8 key_select;
844+
u8 inner_sipv6_word_en;
845+
u8 inner_dipv6_word_en;
846+
u8 outer_sipv6_word_en;
847+
u8 outer_dipv6_word_en;
848+
u8 rsv1[2];
849+
__le32 tuple_mask;
850+
__le32 meta_data_mask;
851+
u8 rsv2[8];
852+
};
853+
822854
int hclge_cmd_init(struct hclge_dev *hdev);
823855
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
824856
{

drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c

Lines changed: 157 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3328,6 +3328,149 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
33283328
hclge_cmd_set_promisc_mode(hdev, &param);
33293329
}
33303330

3331+
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3332+
{
3333+
struct hclge_get_fd_mode_cmd *req;
3334+
struct hclge_desc desc;
3335+
int ret;
3336+
3337+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3338+
3339+
req = (struct hclge_get_fd_mode_cmd *)desc.data;
3340+
3341+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3342+
if (ret) {
3343+
dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3344+
return ret;
3345+
}
3346+
3347+
*fd_mode = req->mode;
3348+
3349+
return ret;
3350+
}
3351+
3352+
static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3353+
u32 *stage1_entry_num,
3354+
u32 *stage2_entry_num,
3355+
u16 *stage1_counter_num,
3356+
u16 *stage2_counter_num)
3357+
{
3358+
struct hclge_get_fd_allocation_cmd *req;
3359+
struct hclge_desc desc;
3360+
int ret;
3361+
3362+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3363+
3364+
req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3365+
3366+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3367+
if (ret) {
3368+
dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3369+
ret);
3370+
return ret;
3371+
}
3372+
3373+
*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3374+
*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3375+
*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3376+
*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3377+
3378+
return ret;
3379+
}
3380+
3381+
static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3382+
{
3383+
struct hclge_set_fd_key_config_cmd *req;
3384+
struct hclge_fd_key_cfg *stage;
3385+
struct hclge_desc desc;
3386+
int ret;
3387+
3388+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3389+
3390+
req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3391+
stage = &hdev->fd_cfg.key_cfg[stage_num];
3392+
req->stage = stage_num;
3393+
req->key_select = stage->key_sel;
3394+
req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3395+
req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3396+
req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3397+
req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3398+
req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3399+
req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3400+
3401+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3402+
if (ret)
3403+
dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3404+
3405+
return ret;
3406+
}
3407+
3408+
static int hclge_init_fd_config(struct hclge_dev *hdev)
3409+
{
3410+
#define LOW_2_WORDS 0x03
3411+
struct hclge_fd_key_cfg *key_cfg;
3412+
int ret;
3413+
3414+
if (!hnae3_dev_fd_supported(hdev))
3415+
return 0;
3416+
3417+
ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3418+
if (ret)
3419+
return ret;
3420+
3421+
switch (hdev->fd_cfg.fd_mode) {
3422+
case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3423+
hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3424+
break;
3425+
case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3426+
hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3427+
break;
3428+
default:
3429+
dev_err(&hdev->pdev->dev,
3430+
"Unsupported flow director mode %d\n",
3431+
hdev->fd_cfg.fd_mode);
3432+
return -EOPNOTSUPP;
3433+
}
3434+
3435+
hdev->fd_cfg.fd_en = true;
3436+
hdev->fd_cfg.proto_support =
3437+
TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3438+
UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3439+
key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3440+
key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3441+
key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3442+
key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3443+
key_cfg->outer_sipv6_word_en = 0;
3444+
key_cfg->outer_dipv6_word_en = 0;
3445+
3446+
key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3447+
BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3448+
BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3449+
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3450+
3451+
/* If use max 400bit key, we can support tuples for ether type */
3452+
if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3453+
hdev->fd_cfg.proto_support |= ETHER_FLOW;
3454+
key_cfg->tuple_active |=
3455+
BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3456+
}
3457+
3458+
/* roce_type is used to filter roce frames
3459+
* dst_vport is used to specify the rule
3460+
*/
3461+
key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3462+
3463+
ret = hclge_get_fd_allocation(hdev,
3464+
&hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3465+
&hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3466+
&hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3467+
&hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3468+
if (ret)
3469+
return ret;
3470+
3471+
return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3472+
}
3473+
33313474
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
33323475
{
33333476
struct hclge_desc desc;
@@ -5502,6 +5645,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
55025645
goto err_mdiobus_unreg;
55035646
}
55045647

5648+
ret = hclge_init_fd_config(hdev);
5649+
if (ret) {
5650+
dev_err(&pdev->dev,
5651+
"fd table init fail, ret=%d\n", ret);
5652+
goto err_mdiobus_unreg;
5653+
}
5654+
55055655
hclge_dcb_ops_set(hdev);
55065656

55075657
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
@@ -5608,6 +5758,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
56085758
return ret;
56095759
}
56105760

5761+
ret = hclge_init_fd_config(hdev);
5762+
if (ret) {
5763+
dev_err(&pdev->dev,
5764+
"fd table init fail, ret=%d\n", ret);
5765+
return ret;
5766+
}
5767+
56115768
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
56125769
HCLGE_DRIVER_NAME);
56135770

0 commit comments

Comments
 (0)