@@ -30,163 +30,63 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
3030 return work_done ;
3131}
3232
33- static int mlx5e_alloc_trap_rq (struct mlx5e_priv * priv , struct mlx5e_rq_param * rqp ,
34- struct mlx5e_rq_stats * stats , struct mlx5e_params * params ,
35- struct mlx5e_ch_stats * ch_stats ,
36- struct mlx5e_rq * rq )
33+ static void mlx5e_free_trap_rq (struct mlx5e_rq * rq )
3734{
38- void * rqc_wq = MLX5_ADDR_OF (rqc , rqp -> rqc , wq );
39- struct mlx5_core_dev * mdev = priv -> mdev ;
40- struct page_pool_params pp_params = {};
41- int node = dev_to_node (mdev -> device );
42- u32 pool_size ;
43- int wq_sz ;
44- int err ;
45- int i ;
46-
47- rqp -> wq .db_numa_node = node ;
48-
49- rq -> wq_type = params -> rq_wq_type ;
50- rq -> pdev = mdev -> device ;
51- rq -> netdev = priv -> netdev ;
52- rq -> mdev = mdev ;
53- rq -> priv = priv ;
54- rq -> stats = stats ;
55- rq -> clock = & mdev -> clock ;
56- rq -> tstamp = & priv -> tstamp ;
57- rq -> hw_mtu = MLX5E_SW2HW_MTU (params , params -> sw_mtu );
58-
59- xdp_rxq_info_unused (& rq -> xdp_rxq );
60-
61- rq -> buff .map_dir = DMA_FROM_DEVICE ;
62- rq -> buff .headroom = mlx5e_get_rq_headroom (mdev , params , NULL );
63- pool_size = 1 << params -> log_rq_mtu_frames ;
64-
65- err = mlx5_wq_cyc_create (mdev , & rqp -> wq , rqc_wq , & rq -> wqe .wq , & rq -> wq_ctrl );
66- if (err )
67- return err ;
68-
69- rq -> wqe .wq .db = & rq -> wqe .wq .db [MLX5_RCV_DBR ];
70-
71- wq_sz = mlx5_wq_cyc_get_size (& rq -> wqe .wq );
72-
73- rq -> wqe .info = rqp -> frags_info ;
74- rq -> buff .frame0_sz = rq -> wqe .info .arr [0 ].frag_stride ;
75- rq -> wqe .frags = kvzalloc_node (array_size (sizeof (* rq -> wqe .frags ),
76- (wq_sz << rq -> wqe .info .log_num_frags )),
77- GFP_KERNEL , node );
78- if (!rq -> wqe .frags ) {
79- err = - ENOMEM ;
80- goto err_wq_cyc_destroy ;
81- }
82-
83- err = mlx5e_init_di_list (rq , wq_sz , node );
84- if (err )
85- goto err_free_frags ;
86-
87- rq -> mkey_be = cpu_to_be32 (mdev -> mlx5e_res .hw_objs .mkey .key );
88-
89- mlx5e_rq_set_trap_handlers (rq , params );
90-
91- /* Create a page_pool and register it with rxq */
92- pp_params .order = 0 ;
93- pp_params .flags = 0 ; /* No-internal DMA mapping in page_pool */
94- pp_params .pool_size = pool_size ;
95- pp_params .nid = node ;
96- pp_params .dev = mdev -> device ;
97- pp_params .dma_dir = rq -> buff .map_dir ;
98-
99- /* page_pool can be used even when there is no rq->xdp_prog,
100- * given page_pool does not handle DMA mapping there is no
101- * required state to clear. And page_pool gracefully handle
102- * elevated refcnt.
103- */
104- rq -> page_pool = page_pool_create (& pp_params );
105- if (IS_ERR (rq -> page_pool )) {
106- err = PTR_ERR (rq -> page_pool );
107- rq -> page_pool = NULL ;
108- goto err_free_di_list ;
109- }
110- for (i = 0 ; i < wq_sz ; i ++ ) {
111- struct mlx5e_rx_wqe_cyc * wqe =
112- mlx5_wq_cyc_get_wqe (& rq -> wqe .wq , i );
113- int f ;
114-
115- for (f = 0 ; f < rq -> wqe .info .num_frags ; f ++ ) {
116- u32 frag_size = rq -> wqe .info .arr [f ].frag_size |
117- MLX5_HW_START_PADDING ;
118-
119- wqe -> data [f ].byte_count = cpu_to_be32 (frag_size );
120- wqe -> data [f ].lkey = rq -> mkey_be ;
121- }
122- /* check if num_frags is not a pow of two */
123- if (rq -> wqe .info .num_frags < (1 << rq -> wqe .info .log_num_frags )) {
124- wqe -> data [f ].byte_count = 0 ;
125- wqe -> data [f ].lkey = cpu_to_be32 (MLX5_INVALID_LKEY );
126- wqe -> data [f ].addr = 0 ;
127- }
128- }
129- return 0 ;
130-
131- err_free_di_list :
35+ page_pool_destroy (rq -> page_pool );
13236 mlx5e_free_di_list (rq );
133- err_free_frags :
13437 kvfree (rq -> wqe .frags );
135- err_wq_cyc_destroy :
13638 mlx5_wq_destroy (& rq -> wq_ctrl );
137-
138- return err ;
13939}
14040
141- static void mlx5e_free_trap_rq (struct mlx5e_rq * rq )
41+ static void mlx5e_init_trap_rq (struct mlx5e_trap * t , struct mlx5e_params * params ,
42+ struct mlx5e_rq * rq )
14243{
143- page_pool_destroy (rq -> page_pool );
144- mlx5e_free_di_list (rq );
145- kvfree (rq -> wqe .frags );
146- mlx5_wq_destroy (& rq -> wq_ctrl );
44+ struct mlx5_core_dev * mdev = t -> mdev ;
45+ struct mlx5e_priv * priv = t -> priv ;
46+
47+ rq -> wq_type = params -> rq_wq_type ;
48+ rq -> pdev = mdev -> device ;
49+ rq -> netdev = priv -> netdev ;
50+ rq -> priv = priv ;
51+ rq -> clock = & mdev -> clock ;
52+ rq -> tstamp = & priv -> tstamp ;
53+ rq -> mdev = mdev ;
54+ rq -> hw_mtu = MLX5E_SW2HW_MTU (params , params -> sw_mtu );
55+ rq -> stats = & priv -> trap_stats .rq ;
56+ rq -> ptp_cyc2time = mlx5_rq_ts_translator (mdev );
57+ xdp_rxq_info_unused (& rq -> xdp_rxq );
58+ mlx5e_rq_set_trap_handlers (rq , params );
14759}
14860
149- static int mlx5e_open_trap_rq (struct mlx5e_priv * priv , struct napi_struct * napi ,
150- struct mlx5e_rq_stats * stats , struct mlx5e_params * params ,
151- struct mlx5e_rq_param * rq_param ,
152- struct mlx5e_ch_stats * ch_stats ,
153- struct mlx5e_rq * rq )
61+ static int mlx5e_open_trap_rq (struct mlx5e_priv * priv , struct mlx5e_trap * t )
15462{
63+ struct mlx5e_rq_param * rq_param = & t -> rq_param ;
15564 struct mlx5_core_dev * mdev = priv -> mdev ;
15665 struct mlx5e_create_cq_param ccp = {};
15766 struct dim_cq_moder trap_moder = {};
158- struct mlx5e_cq * cq = & rq -> cq ;
67+ struct mlx5e_rq * rq = & t -> rq ;
68+ int node ;
15969 int err ;
16070
161- ccp .node = dev_to_node (mdev -> device );
162- ccp .ch_stats = ch_stats ;
163- ccp .napi = napi ;
71+ node = dev_to_node (mdev -> device );
72+
73+ ccp .node = node ;
74+ ccp .ch_stats = t -> stats ;
75+ ccp .napi = & t -> napi ;
16476 ccp .ix = 0 ;
165- err = mlx5e_open_cq (priv , trap_moder , & rq_param -> cqp , & ccp , cq );
77+ err = mlx5e_open_cq (priv , trap_moder , & rq_param -> cqp , & ccp , & rq -> cq );
16678 if (err )
16779 return err ;
16880
169- err = mlx5e_alloc_trap_rq (priv , rq_param , stats , params , ch_stats , rq );
81+ mlx5e_init_trap_rq (t , & t -> params , rq );
82+ err = mlx5e_open_rq (& t -> params , rq_param , NULL , node , rq );
17083 if (err )
17184 goto err_destroy_cq ;
17285
173- err = mlx5e_create_rq (rq , rq_param );
174- if (err )
175- goto err_free_rq ;
176-
177- err = mlx5e_modify_rq_state (rq , MLX5_RQC_STATE_RST , MLX5_RQC_STATE_RDY );
178- if (err )
179- goto err_destroy_rq ;
180-
18186 return 0 ;
18287
183- err_destroy_rq :
184- mlx5e_destroy_rq (rq );
185- mlx5e_free_rx_descs (rq );
186- err_free_rq :
187- mlx5e_free_trap_rq (rq );
18888err_destroy_cq :
189- mlx5e_close_cq (cq );
89+ mlx5e_close_cq (& rq -> cq );
19090
19191 return err ;
19292}
@@ -273,11 +173,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
273173
274174 netif_napi_add (netdev , & t -> napi , mlx5e_trap_napi_poll , 64 );
275175
276- err = mlx5e_open_trap_rq (priv , & t -> napi ,
277- & priv -> trap_stats .rq ,
278- & t -> params , & t -> rq_param ,
279- & priv -> trap_stats .ch ,
280- & t -> rq );
176+ err = mlx5e_open_trap_rq (priv , t );
281177 if (unlikely (err ))
282178 goto err_napi_del ;
283179
0 commit comments