Skip to content

Commit 1173286

Browse files
IronShendavem330
authored andcommitted
net: hns3: Add input key and action config support for flow director
Each flow director rule consists of input key and action. The input key is the condition for matching, includes tuples of L2/L3/L4 header. Action is the behaviour when a packet matches with the input key, such as drop the packet, or forward to a specified queue. The input key is stored in the tcam blocks, Each bit of input key can be masked. Signed-off-by: Jian Shen <[email protected]> Signed-off-by: Peng Li <[email protected]> Signed-off-by: Salil Mehta <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent d695964 commit 1173286

File tree

3 files changed

+456
-0
lines changed

3 files changed

+456
-0
lines changed

drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,8 @@ enum hclge_opcode_type {
194194
HCLGE_OPC_FD_MODE_CTRL = 0x1200,
195195
HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
196196
HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
197+
HCLGE_OPC_FD_TCAM_OP = 0x1203,
198+
HCLGE_OPC_FD_AD_OP = 0x1204,
197199

198200
/* MDIO command */
199201
HCLGE_OPC_MDIO_CONFIG = 0x1900,
@@ -851,6 +853,49 @@ struct hclge_set_fd_key_config_cmd {
851853
u8 rsv2[8];
852854
};
853855

856+
#define HCLGE_FD_EPORT_SW_EN_B 0
857+
struct hclge_fd_tcam_config_1_cmd {
858+
u8 stage;
859+
u8 xy_sel;
860+
u8 port_info;
861+
u8 rsv1[1];
862+
__le32 index;
863+
u8 entry_vld;
864+
u8 rsv2[7];
865+
u8 tcam_data[8];
866+
};
867+
868+
struct hclge_fd_tcam_config_2_cmd {
869+
u8 tcam_data[24];
870+
};
871+
872+
struct hclge_fd_tcam_config_3_cmd {
873+
u8 tcam_data[20];
874+
u8 rsv[4];
875+
};
876+
877+
#define HCLGE_FD_AD_DROP_B 0
878+
#define HCLGE_FD_AD_DIRECT_QID_B 1
879+
#define HCLGE_FD_AD_QID_S 2
880+
#define HCLGE_FD_AD_QID_M GENMASK(12, 2)
881+
#define HCLGE_FD_AD_USE_COUNTER_B 12
882+
#define HCLGE_FD_AD_COUNTER_NUM_S 13
883+
#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
884+
#define HCLGE_FD_AD_NXT_STEP_B 20
885+
#define HCLGE_FD_AD_NXT_KEY_S 21
886+
#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21)
887+
#define HCLGE_FD_AD_WR_RULE_ID_B 0
888+
#define HCLGE_FD_AD_RULE_ID_S 1
889+
#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1)
890+
891+
struct hclge_fd_ad_config_cmd {
892+
u8 stage;
893+
u8 rsv1[3];
894+
__le32 index;
895+
__le64 ad_data;
896+
u8 rsv2[8];
897+
};
898+
854899
int hclge_cmd_init(struct hclge_dev *hdev);
855900
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
856901
{

drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c

Lines changed: 329 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3471,6 +3471,335 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
34713471
return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
34723472
}
34733473

3474+
static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3475+
int loc, u8 *key, bool is_add)
3476+
{
3477+
struct hclge_fd_tcam_config_1_cmd *req1;
3478+
struct hclge_fd_tcam_config_2_cmd *req2;
3479+
struct hclge_fd_tcam_config_3_cmd *req3;
3480+
struct hclge_desc desc[3];
3481+
int ret;
3482+
3483+
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3484+
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3485+
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3486+
desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3487+
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3488+
3489+
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3490+
req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3491+
req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3492+
3493+
req1->stage = stage;
3494+
req1->xy_sel = sel_x ? 1 : 0;
3495+
hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3496+
req1->index = cpu_to_le32(loc);
3497+
req1->entry_vld = sel_x ? is_add : 0;
3498+
3499+
if (key) {
3500+
memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3501+
memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3502+
sizeof(req2->tcam_data));
3503+
memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3504+
sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3505+
}
3506+
3507+
ret = hclge_cmd_send(&hdev->hw, desc, 3);
3508+
if (ret)
3509+
dev_err(&hdev->pdev->dev,
3510+
"config tcam key fail, ret=%d\n",
3511+
ret);
3512+
3513+
return ret;
3514+
}
3515+
3516+
static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3517+
struct hclge_fd_ad_data *action)
3518+
{
3519+
struct hclge_fd_ad_config_cmd *req;
3520+
struct hclge_desc desc;
3521+
u64 ad_data = 0;
3522+
int ret;
3523+
3524+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3525+
3526+
req = (struct hclge_fd_ad_config_cmd *)desc.data;
3527+
req->index = cpu_to_le32(loc);
3528+
req->stage = stage;
3529+
3530+
hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3531+
action->write_rule_id_to_bd);
3532+
hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
3533+
action->rule_id);
3534+
ad_data <<= 32;
3535+
hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
3536+
hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
3537+
action->forward_to_direct_queue);
3538+
hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
3539+
action->queue_id);
3540+
hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
3541+
hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
3542+
HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
3543+
hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
3544+
hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
3545+
action->counter_id);
3546+
3547+
req->ad_data = cpu_to_le64(ad_data);
3548+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3549+
if (ret)
3550+
dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
3551+
3552+
return ret;
3553+
}
3554+
3555+
static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
3556+
struct hclge_fd_rule *rule)
3557+
{
3558+
u16 tmp_x_s, tmp_y_s;
3559+
u32 tmp_x_l, tmp_y_l;
3560+
int i;
3561+
3562+
if (rule->unused_tuple & tuple_bit)
3563+
return true;
3564+
3565+
switch (tuple_bit) {
3566+
case 0:
3567+
return false;
3568+
case BIT(INNER_DST_MAC):
3569+
for (i = 0; i < 6; i++) {
3570+
calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
3571+
rule->tuples_mask.dst_mac[i]);
3572+
calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
3573+
rule->tuples_mask.dst_mac[i]);
3574+
}
3575+
3576+
return true;
3577+
case BIT(INNER_SRC_MAC):
3578+
for (i = 0; i < 6; i++) {
3579+
calc_x(key_x[5 - i], rule->tuples.src_mac[i],
3580+
rule->tuples.src_mac[i]);
3581+
calc_y(key_y[5 - i], rule->tuples.src_mac[i],
3582+
rule->tuples.src_mac[i]);
3583+
}
3584+
3585+
return true;
3586+
case BIT(INNER_VLAN_TAG_FST):
3587+
calc_x(tmp_x_s, rule->tuples.vlan_tag1,
3588+
rule->tuples_mask.vlan_tag1);
3589+
calc_y(tmp_y_s, rule->tuples.vlan_tag1,
3590+
rule->tuples_mask.vlan_tag1);
3591+
*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3592+
*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3593+
3594+
return true;
3595+
case BIT(INNER_ETH_TYPE):
3596+
calc_x(tmp_x_s, rule->tuples.ether_proto,
3597+
rule->tuples_mask.ether_proto);
3598+
calc_y(tmp_y_s, rule->tuples.ether_proto,
3599+
rule->tuples_mask.ether_proto);
3600+
*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3601+
*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3602+
3603+
return true;
3604+
case BIT(INNER_IP_TOS):
3605+
calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3606+
calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
3607+
3608+
return true;
3609+
case BIT(INNER_IP_PROTO):
3610+
calc_x(*key_x, rule->tuples.ip_proto,
3611+
rule->tuples_mask.ip_proto);
3612+
calc_y(*key_y, rule->tuples.ip_proto,
3613+
rule->tuples_mask.ip_proto);
3614+
3615+
return true;
3616+
case BIT(INNER_SRC_IP):
3617+
calc_x(tmp_x_l, rule->tuples.src_ip[3],
3618+
rule->tuples_mask.src_ip[3]);
3619+
calc_y(tmp_y_l, rule->tuples.src_ip[3],
3620+
rule->tuples_mask.src_ip[3]);
3621+
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3622+
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3623+
3624+
return true;
3625+
case BIT(INNER_DST_IP):
3626+
calc_x(tmp_x_l, rule->tuples.dst_ip[3],
3627+
rule->tuples_mask.dst_ip[3]);
3628+
calc_y(tmp_y_l, rule->tuples.dst_ip[3],
3629+
rule->tuples_mask.dst_ip[3]);
3630+
*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
3631+
*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
3632+
3633+
return true;
3634+
case BIT(INNER_SRC_PORT):
3635+
calc_x(tmp_x_s, rule->tuples.src_port,
3636+
rule->tuples_mask.src_port);
3637+
calc_y(tmp_y_s, rule->tuples.src_port,
3638+
rule->tuples_mask.src_port);
3639+
*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3640+
*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3641+
3642+
return true;
3643+
case BIT(INNER_DST_PORT):
3644+
calc_x(tmp_x_s, rule->tuples.dst_port,
3645+
rule->tuples_mask.dst_port);
3646+
calc_y(tmp_y_s, rule->tuples.dst_port,
3647+
rule->tuples_mask.dst_port);
3648+
*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3649+
*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3650+
3651+
return true;
3652+
default:
3653+
return false;
3654+
}
3655+
}
3656+
3657+
static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
3658+
u8 vf_id, u8 network_port_id)
3659+
{
3660+
u32 port_number = 0;
3661+
3662+
if (port_type == HOST_PORT) {
3663+
hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
3664+
pf_id);
3665+
hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
3666+
vf_id);
3667+
hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
3668+
} else {
3669+
hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
3670+
HCLGE_NETWORK_PORT_ID_S, network_port_id);
3671+
hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
3672+
}
3673+
3674+
return port_number;
3675+
}
3676+
3677+
static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
3678+
__le32 *key_x, __le32 *key_y,
3679+
struct hclge_fd_rule *rule)
3680+
{
3681+
u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
3682+
u8 cur_pos = 0, tuple_size, shift_bits;
3683+
int i;
3684+
3685+
for (i = 0; i < MAX_META_DATA; i++) {
3686+
tuple_size = meta_data_key_info[i].key_length;
3687+
tuple_bit = key_cfg->meta_data_active & BIT(i);
3688+
3689+
switch (tuple_bit) {
3690+
case BIT(ROCE_TYPE):
3691+
hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
3692+
cur_pos += tuple_size;
3693+
break;
3694+
case BIT(DST_VPORT):
3695+
port_number = hclge_get_port_number(HOST_PORT, 0,
3696+
rule->vf_id, 0);
3697+
hnae3_set_field(meta_data,
3698+
GENMASK(cur_pos + tuple_size, cur_pos),
3699+
cur_pos, port_number);
3700+
cur_pos += tuple_size;
3701+
break;
3702+
default:
3703+
break;
3704+
}
3705+
}
3706+
3707+
calc_x(tmp_x, meta_data, 0xFFFFFFFF);
3708+
calc_y(tmp_y, meta_data, 0xFFFFFFFF);
3709+
shift_bits = sizeof(meta_data) * 8 - cur_pos;
3710+
3711+
*key_x = cpu_to_le32(tmp_x << shift_bits);
3712+
*key_y = cpu_to_le32(tmp_y << shift_bits);
3713+
}
3714+
3715+
/* A complete key is combined with meta data key and tuple key.
3716+
* Meta data key is stored at the MSB region, and tuple key is stored at
3717+
* the LSB region, unused bits will be filled 0.
3718+
*/
3719+
static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
3720+
struct hclge_fd_rule *rule)
3721+
{
3722+
struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
3723+
u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
3724+
u8 *cur_key_x, *cur_key_y;
3725+
int i, ret, tuple_size;
3726+
u8 meta_data_region;
3727+
3728+
memset(key_x, 0, sizeof(key_x));
3729+
memset(key_y, 0, sizeof(key_y));
3730+
cur_key_x = key_x;
3731+
cur_key_y = key_y;
3732+
3733+
for (i = 0 ; i < MAX_TUPLE; i++) {
3734+
bool tuple_valid;
3735+
u32 check_tuple;
3736+
3737+
tuple_size = tuple_key_info[i].key_length / 8;
3738+
check_tuple = key_cfg->tuple_active & BIT(i);
3739+
3740+
tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
3741+
cur_key_y, rule);
3742+
if (tuple_valid) {
3743+
cur_key_x += tuple_size;
3744+
cur_key_y += tuple_size;
3745+
}
3746+
}
3747+
3748+
meta_data_region = hdev->fd_cfg.max_key_length / 8 -
3749+
MAX_META_DATA_LENGTH / 8;
3750+
3751+
hclge_fd_convert_meta_data(key_cfg,
3752+
(__le32 *)(key_x + meta_data_region),
3753+
(__le32 *)(key_y + meta_data_region),
3754+
rule);
3755+
3756+
ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
3757+
true);
3758+
if (ret) {
3759+
dev_err(&hdev->pdev->dev,
3760+
"fd key_y config fail, loc=%d, ret=%d\n",
3761+
rule->queue_id, ret);
3762+
return ret;
3763+
}
3764+
3765+
ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
3766+
true);
3767+
if (ret)
3768+
dev_err(&hdev->pdev->dev,
3769+
"fd key_x config fail, loc=%d, ret=%d\n",
3770+
rule->queue_id, ret);
3771+
return ret;
3772+
}
3773+
3774+
static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
3775+
struct hclge_fd_rule *rule)
3776+
{
3777+
struct hclge_fd_ad_data ad_data;
3778+
3779+
ad_data.ad_id = rule->location;
3780+
3781+
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
3782+
ad_data.drop_packet = true;
3783+
ad_data.forward_to_direct_queue = false;
3784+
ad_data.queue_id = 0;
3785+
} else {
3786+
ad_data.drop_packet = false;
3787+
ad_data.forward_to_direct_queue = true;
3788+
ad_data.queue_id = rule->queue_id;
3789+
}
3790+
3791+
ad_data.use_counter = false;
3792+
ad_data.counter_id = 0;
3793+
3794+
ad_data.use_next_stage = false;
3795+
ad_data.next_input_key = 0;
3796+
3797+
ad_data.write_rule_id_to_bd = true;
3798+
ad_data.rule_id = rule->location;
3799+
3800+
return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
3801+
}
3802+
34743803
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
34753804
{
34763805
struct hclge_desc desc;

0 commit comments

Comments
 (0)