@@ -222,6 +222,13 @@ enum res_fs_rule_states {
222222struct res_fs_rule {
223223 struct res_common com ;
224224 int qpn ;
225+ /* VF DMFS mbox with port flipped */
226+ void * mirr_mbox ;
227+ /* > 0 --> apply mirror when getting into HA mode */
228+ /* = 0 --> un-apply mirror when getting out of HA mode */
229+ u32 mirr_mbox_size ;
230+ struct list_head mirr_list ;
231+ u64 mirr_rule_id ;
225232};
226233
227234static void * res_tracker_lookup (struct rb_root * root , u64 res_id )
@@ -4284,6 +4291,22 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
42844291 return err ;
42854292}
42864293
4294+ static u32 qp_attach_mbox_size (void * mbox )
4295+ {
4296+ u32 size = sizeof (struct mlx4_net_trans_rule_hw_ctrl );
4297+ struct _rule_hw * rule_header ;
4298+
4299+ rule_header = (struct _rule_hw * )(mbox + size );
4300+
4301+ while (rule_header -> size ) {
4302+ size += rule_header -> size * sizeof (u32 );
4303+ rule_header += 1 ;
4304+ }
4305+ return size ;
4306+ }
4307+
4308+ static int mlx4_do_mirror_rule (struct mlx4_dev * dev , struct res_fs_rule * fs_rule );
4309+
42874310int mlx4_QP_FLOW_STEERING_ATTACH_wrapper (struct mlx4_dev * dev , int slave ,
42884311 struct mlx4_vhcr * vhcr ,
42894312 struct mlx4_cmd_mailbox * inbox ,
@@ -4300,6 +4323,8 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
43004323 struct mlx4_net_trans_rule_hw_ctrl * ctrl ;
43014324 struct _rule_hw * rule_header ;
43024325 int header_id ;
4326+ struct res_fs_rule * rrule ;
4327+ u32 mbox_size ;
43034328
43044329 if (dev -> caps .steering_mode !=
43054330 MLX4_STEERING_MODE_DEVICE_MANAGED )
@@ -4328,7 +4353,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
43284353 case MLX4_NET_TRANS_RULE_ID_ETH :
43294354 if (validate_eth_header_mac (slave , rule_header , rlist )) {
43304355 err = - EINVAL ;
4331- goto err_put ;
4356+ goto err_put_qp ;
43324357 }
43334358 break ;
43344359 case MLX4_NET_TRANS_RULE_ID_IB :
@@ -4339,15 +4364,15 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
43394364 pr_warn ("Can't attach FS rule without L2 headers, adding L2 header\n" );
43404365 if (add_eth_header (dev , slave , inbox , rlist , header_id )) {
43414366 err = - EINVAL ;
4342- goto err_put ;
4367+ goto err_put_qp ;
43434368 }
43444369 vhcr -> in_modifier +=
43454370 sizeof (struct mlx4_net_trans_rule_hw_eth ) >> 2 ;
43464371 break ;
43474372 default :
43484373 pr_err ("Corrupted mailbox\n" );
43494374 err = - EINVAL ;
4350- goto err_put ;
4375+ goto err_put_qp ;
43514376 }
43524377
43534378execute :
@@ -4356,23 +4381,69 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
43564381 MLX4_QP_FLOW_STEERING_ATTACH , MLX4_CMD_TIME_CLASS_A ,
43574382 MLX4_CMD_NATIVE );
43584383 if (err )
4359- goto err_put ;
4384+ goto err_put_qp ;
4385+
43604386
43614387 err = add_res_range (dev , slave , vhcr -> out_param , 1 , RES_FS_RULE , qpn );
43624388 if (err ) {
43634389 mlx4_err (dev , "Fail to add flow steering resources\n" );
4364- /* detach rule*/
4390+ goto err_detach ;
4391+ }
4392+
4393+ err = get_res (dev , slave , vhcr -> out_param , RES_FS_RULE , & rrule );
4394+ if (err )
4395+ goto err_detach ;
4396+
4397+ mbox_size = qp_attach_mbox_size (inbox -> buf );
4398+ rrule -> mirr_mbox = kmalloc (mbox_size , GFP_KERNEL );
4399+ if (!rrule -> mirr_mbox ) {
4400+ err = - ENOMEM ;
4401+ goto err_put_rule ;
4402+ }
4403+ rrule -> mirr_mbox_size = mbox_size ;
4404+ rrule -> mirr_rule_id = 0 ;
4405+ memcpy (rrule -> mirr_mbox , inbox -> buf , mbox_size );
4406+
4407+ /* set different port */
4408+ ctrl = (struct mlx4_net_trans_rule_hw_ctrl * )rrule -> mirr_mbox ;
4409+ if (ctrl -> port == 1 )
4410+ ctrl -> port = 2 ;
4411+ else
4412+ ctrl -> port = 1 ;
4413+
4414+ if (mlx4_is_bonded (dev ))
4415+ mlx4_do_mirror_rule (dev , rrule );
4416+
4417+ atomic_inc (& rqp -> ref_count );
4418+
4419+ err_put_rule :
4420+ put_res (dev , slave , vhcr -> out_param , RES_FS_RULE );
4421+ err_detach :
4422+ /* detach rule on error */
4423+ if (err )
43654424 mlx4_cmd (dev , vhcr -> out_param , 0 , 0 ,
43664425 MLX4_QP_FLOW_STEERING_DETACH , MLX4_CMD_TIME_CLASS_A ,
43674426 MLX4_CMD_NATIVE );
4368- goto err_put ;
4369- }
4370- atomic_inc (& rqp -> ref_count );
4371- err_put :
4427+ err_put_qp :
43724428 put_res (dev , slave , qpn , RES_QP );
43734429 return err ;
43744430}
43754431
4432+ static int mlx4_undo_mirror_rule (struct mlx4_dev * dev , struct res_fs_rule * fs_rule )
4433+ {
4434+ int err ;
4435+
4436+ err = rem_res_range (dev , fs_rule -> com .owner , fs_rule -> com .res_id , 1 , RES_FS_RULE , 0 );
4437+ if (err ) {
4438+ mlx4_err (dev , "Fail to remove flow steering resources\n" );
4439+ return err ;
4440+ }
4441+
4442+ mlx4_cmd (dev , fs_rule -> com .res_id , 0 , 0 , MLX4_QP_FLOW_STEERING_DETACH ,
4443+ MLX4_CMD_TIME_CLASS_A , MLX4_CMD_NATIVE );
4444+ return 0 ;
4445+ }
4446+
43764447int mlx4_QP_FLOW_STEERING_DETACH_wrapper (struct mlx4_dev * dev , int slave ,
43774448 struct mlx4_vhcr * vhcr ,
43784449 struct mlx4_cmd_mailbox * inbox ,
@@ -4382,6 +4453,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
43824453 int err ;
43834454 struct res_qp * rqp ;
43844455 struct res_fs_rule * rrule ;
4456+ u64 mirr_reg_id ;
43854457
43864458 if (dev -> caps .steering_mode !=
43874459 MLX4_STEERING_MODE_DEVICE_MANAGED )
@@ -4390,12 +4462,30 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
43904462 err = get_res (dev , slave , vhcr -> in_param , RES_FS_RULE , & rrule );
43914463 if (err )
43924464 return err ;
4465+
4466+ if (!rrule -> mirr_mbox ) {
4467+ mlx4_err (dev , "Mirror rules cannot be removed explicitly\n" );
4468+ put_res (dev , slave , vhcr -> in_param , RES_FS_RULE );
4469+ return - EINVAL ;
4470+ }
4471+ mirr_reg_id = rrule -> mirr_rule_id ;
4472+ kfree (rrule -> mirr_mbox );
4473+
43934474 /* Release the rule form busy state before removal */
43944475 put_res (dev , slave , vhcr -> in_param , RES_FS_RULE );
43954476 err = get_res (dev , slave , rrule -> qpn , RES_QP , & rqp );
43964477 if (err )
43974478 return err ;
43984479
4480+ if (mirr_reg_id && mlx4_is_bonded (dev )) {
4481+ err = get_res (dev , slave , mirr_reg_id , RES_FS_RULE , & rrule );
4482+ if (err ) {
4483+ mlx4_err (dev , "Fail to get resource of mirror rule\n" );
4484+ } else {
4485+ put_res (dev , slave , mirr_reg_id , RES_FS_RULE );
4486+ mlx4_undo_mirror_rule (dev , rrule );
4487+ }
4488+ }
43994489 err = rem_res_range (dev , slave , vhcr -> in_param , 1 , RES_FS_RULE , 0 );
44004490 if (err ) {
44014491 mlx4_err (dev , "Fail to remove flow steering resources\n" );
@@ -4833,6 +4923,91 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
48334923 spin_unlock_irq (mlx4_tlock (dev ));
48344924}
48354925
4926+ static int mlx4_do_mirror_rule (struct mlx4_dev * dev , struct res_fs_rule * fs_rule )
4927+ {
4928+ struct mlx4_cmd_mailbox * mailbox ;
4929+ int err ;
4930+ struct res_fs_rule * mirr_rule ;
4931+ u64 reg_id ;
4932+
4933+ mailbox = mlx4_alloc_cmd_mailbox (dev );
4934+ if (IS_ERR (mailbox ))
4935+ return PTR_ERR (mailbox );
4936+
4937+ if (!fs_rule -> mirr_mbox ) {
4938+ mlx4_err (dev , "rule mirroring mailbox is null\n" );
4939+ return - EINVAL ;
4940+ }
4941+ memcpy (mailbox -> buf , fs_rule -> mirr_mbox , fs_rule -> mirr_mbox_size );
4942+ err = mlx4_cmd_imm (dev , mailbox -> dma , & reg_id , fs_rule -> mirr_mbox_size >> 2 , 0 ,
4943+ MLX4_QP_FLOW_STEERING_ATTACH , MLX4_CMD_TIME_CLASS_A ,
4944+ MLX4_CMD_NATIVE );
4945+ mlx4_free_cmd_mailbox (dev , mailbox );
4946+
4947+ if (err )
4948+ goto err ;
4949+
4950+ err = add_res_range (dev , fs_rule -> com .owner , reg_id , 1 , RES_FS_RULE , fs_rule -> qpn );
4951+ if (err )
4952+ goto err_detach ;
4953+
4954+ err = get_res (dev , fs_rule -> com .owner , reg_id , RES_FS_RULE , & mirr_rule );
4955+ if (err )
4956+ goto err_rem ;
4957+
4958+ fs_rule -> mirr_rule_id = reg_id ;
4959+ mirr_rule -> mirr_rule_id = 0 ;
4960+ mirr_rule -> mirr_mbox_size = 0 ;
4961+ mirr_rule -> mirr_mbox = NULL ;
4962+ put_res (dev , fs_rule -> com .owner , reg_id , RES_FS_RULE );
4963+
4964+ return 0 ;
4965+ err_rem :
4966+ rem_res_range (dev , fs_rule -> com .owner , reg_id , 1 , RES_FS_RULE , 0 );
4967+ err_detach :
4968+ mlx4_cmd (dev , reg_id , 0 , 0 , MLX4_QP_FLOW_STEERING_DETACH ,
4969+ MLX4_CMD_TIME_CLASS_A , MLX4_CMD_NATIVE );
4970+ err :
4971+ return err ;
4972+ }
4973+
4974+ static int mlx4_mirror_fs_rules (struct mlx4_dev * dev , bool bond )
4975+ {
4976+ struct mlx4_priv * priv = mlx4_priv (dev );
4977+ struct mlx4_resource_tracker * tracker =
4978+ & priv -> mfunc .master .res_tracker ;
4979+ struct rb_root * root = & tracker -> res_tree [RES_FS_RULE ];
4980+ struct rb_node * p ;
4981+ struct res_fs_rule * fs_rule ;
4982+ int err = 0 ;
4983+ LIST_HEAD (mirr_list );
4984+
4985+ for (p = rb_first (root ); p ; p = rb_next (p )) {
4986+ fs_rule = rb_entry (p , struct res_fs_rule , com .node );
4987+ if ((bond && fs_rule -> mirr_mbox_size ) ||
4988+ (!bond && !fs_rule -> mirr_mbox_size ))
4989+ list_add_tail (& fs_rule -> mirr_list , & mirr_list );
4990+ }
4991+
4992+ list_for_each_entry (fs_rule , & mirr_list , mirr_list ) {
4993+ if (bond )
4994+ err += mlx4_do_mirror_rule (dev , fs_rule );
4995+ else
4996+ err += mlx4_undo_mirror_rule (dev , fs_rule );
4997+ }
4998+ return err ;
4999+ }
5000+
5001+ int mlx4_bond_fs_rules (struct mlx4_dev * dev )
5002+ {
5003+ return mlx4_mirror_fs_rules (dev , true);
5004+ }
5005+
5006+ int mlx4_unbond_fs_rules (struct mlx4_dev * dev )
5007+ {
5008+ return mlx4_mirror_fs_rules (dev , false);
5009+ }
5010+
48365011static void rem_slave_fs_rule (struct mlx4_dev * dev , int slave )
48375012{
48385013 struct mlx4_priv * priv = mlx4_priv (dev );
0 commit comments