@@ -85,18 +85,25 @@ static const struct counter_desc sw_rep_stats_desc[] = {
85
85
{ MLX5E_DECLARE_STAT (struct mlx5e_sw_stats , tx_bytes ) },
86
86
};
87
87
88
- struct vport_stats {
89
- u64 vport_rx_packets ;
90
- u64 vport_tx_packets ;
91
- u64 vport_rx_bytes ;
92
- u64 vport_tx_bytes ;
93
- };
94
-
95
88
static const struct counter_desc vport_rep_stats_desc [] = {
96
- { MLX5E_DECLARE_STAT (struct vport_stats , vport_rx_packets ) },
97
- { MLX5E_DECLARE_STAT (struct vport_stats , vport_rx_bytes ) },
98
- { MLX5E_DECLARE_STAT (struct vport_stats , vport_tx_packets ) },
99
- { MLX5E_DECLARE_STAT (struct vport_stats , vport_tx_bytes ) },
89
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats , vport_rx_packets ) },
90
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats , vport_rx_bytes ) },
91
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats , vport_tx_packets ) },
92
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats , vport_tx_bytes ) },
93
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats ,
94
+ rx_vport_rdma_unicast_packets ) },
95
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats , rx_vport_rdma_unicast_bytes ) },
96
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats ,
97
+ tx_vport_rdma_unicast_packets ) },
98
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats , tx_vport_rdma_unicast_bytes ) },
99
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats ,
100
+ rx_vport_rdma_multicast_packets ) },
101
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats ,
102
+ rx_vport_rdma_multicast_bytes ) },
103
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats ,
104
+ tx_vport_rdma_multicast_packets ) },
105
+ { MLX5E_DECLARE_STAT (struct mlx5e_rep_stats ,
106
+ tx_vport_rdma_multicast_bytes ) },
100
107
};
101
108
102
109
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
@@ -161,33 +168,80 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
161
168
int i ;
162
169
163
170
for (i = 0 ; i < NUM_VPORT_REP_HW_COUNTERS ; i ++ )
164
- data [idx ++ ] = MLX5E_READ_CTR64_CPU (& priv -> stats .vf_vport ,
171
+ data [idx ++ ] = MLX5E_READ_CTR64_CPU (& priv -> stats .rep_stats ,
165
172
vport_rep_stats_desc , i );
166
173
return idx ;
167
174
}
168
175
169
176
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS (vport_rep )
170
177
{
178
+ struct mlx5e_rep_stats * rep_stats = & priv -> stats .rep_stats ;
179
+ int outlen = MLX5_ST_SZ_BYTES (query_vport_counter_out );
171
180
struct mlx5_eswitch * esw = priv -> mdev -> priv .eswitch ;
172
181
struct mlx5e_rep_priv * rpriv = priv -> ppriv ;
173
182
struct mlx5_eswitch_rep * rep = rpriv -> rep ;
174
- struct rtnl_link_stats64 * vport_stats ;
175
- struct ifla_vf_stats vf_stats ;
183
+ u32 * out ;
176
184
int err ;
177
185
178
- err = mlx5_eswitch_get_vport_stats (esw , rep -> vport , & vf_stats );
186
+ out = kvzalloc (outlen , GFP_KERNEL );
187
+ if (!out )
188
+ return ;
189
+
190
+ err = mlx5_core_query_vport_counter (esw -> dev , 1 , rep -> vport - 1 , 0 , out );
179
191
if (err ) {
180
192
netdev_warn (priv -> netdev , "vport %d error %d reading stats\n" ,
181
193
rep -> vport , err );
182
194
return ;
183
195
}
184
196
185
- vport_stats = & priv -> stats .vf_vport ;
197
+ #define MLX5_GET_CTR (p , x ) \
198
+ MLX5_GET64(query_vport_counter_out, p, x)
186
199
/* flip tx/rx as we are reporting the counters for the switch vport */
187
- vport_stats -> rx_packets = vf_stats .tx_packets ;
188
- vport_stats -> rx_bytes = vf_stats .tx_bytes ;
189
- vport_stats -> tx_packets = vf_stats .rx_packets ;
190
- vport_stats -> tx_bytes = vf_stats .rx_bytes ;
200
+ rep_stats -> vport_rx_packets =
201
+ MLX5_GET_CTR (out , transmitted_ib_unicast .packets ) +
202
+ MLX5_GET_CTR (out , transmitted_eth_unicast .packets ) +
203
+ MLX5_GET_CTR (out , transmitted_ib_multicast .packets ) +
204
+ MLX5_GET_CTR (out , transmitted_eth_multicast .packets ) +
205
+ MLX5_GET_CTR (out , transmitted_eth_broadcast .packets );
206
+
207
+ rep_stats -> vport_tx_packets =
208
+ MLX5_GET_CTR (out , received_ib_unicast .packets ) +
209
+ MLX5_GET_CTR (out , received_eth_unicast .packets ) +
210
+ MLX5_GET_CTR (out , received_ib_multicast .packets ) +
211
+ MLX5_GET_CTR (out , received_eth_multicast .packets ) +
212
+ MLX5_GET_CTR (out , received_eth_broadcast .packets );
213
+
214
+ rep_stats -> vport_rx_bytes =
215
+ MLX5_GET_CTR (out , transmitted_ib_unicast .octets ) +
216
+ MLX5_GET_CTR (out , transmitted_eth_unicast .octets ) +
217
+ MLX5_GET_CTR (out , transmitted_ib_multicast .octets ) +
218
+ MLX5_GET_CTR (out , transmitted_eth_broadcast .octets );
219
+
220
+ rep_stats -> vport_tx_bytes =
221
+ MLX5_GET_CTR (out , received_ib_unicast .octets ) +
222
+ MLX5_GET_CTR (out , received_eth_unicast .octets ) +
223
+ MLX5_GET_CTR (out , received_ib_multicast .octets ) +
224
+ MLX5_GET_CTR (out , received_eth_multicast .octets ) +
225
+ MLX5_GET_CTR (out , received_eth_broadcast .octets );
226
+
227
+ rep_stats -> rx_vport_rdma_unicast_packets =
228
+ MLX5_GET_CTR (out , transmitted_ib_unicast .packets );
229
+ rep_stats -> tx_vport_rdma_unicast_packets =
230
+ MLX5_GET_CTR (out , received_ib_unicast .packets );
231
+ rep_stats -> rx_vport_rdma_unicast_bytes =
232
+ MLX5_GET_CTR (out , transmitted_ib_unicast .octets );
233
+ rep_stats -> tx_vport_rdma_unicast_bytes =
234
+ MLX5_GET_CTR (out , received_ib_unicast .octets );
235
+ rep_stats -> rx_vport_rdma_multicast_packets =
236
+ MLX5_GET_CTR (out , transmitted_ib_multicast .packets );
237
+ rep_stats -> tx_vport_rdma_multicast_packets =
238
+ MLX5_GET_CTR (out , received_ib_multicast .packets );
239
+ rep_stats -> rx_vport_rdma_multicast_bytes =
240
+ MLX5_GET_CTR (out , transmitted_ib_multicast .octets );
241
+ rep_stats -> tx_vport_rdma_multicast_bytes =
242
+ MLX5_GET_CTR (out , received_ib_multicast .octets );
243
+
244
+ kvfree (out );
191
245
}
192
246
193
247
static void mlx5e_rep_get_strings (struct net_device * dev ,
0 commit comments