1010int netdev_rx_queue_restart (struct net_device * dev , unsigned int rxq_idx )
1111{
1212 struct netdev_rx_queue * rxq = __netif_get_rx_queue (dev , rxq_idx );
13+ const struct netdev_queue_mgmt_ops * qops = dev -> queue_mgmt_ops ;
1314 void * new_mem , * old_mem ;
1415 int err ;
1516
16- if (!dev -> queue_mgmt_ops || !dev -> queue_mgmt_ops -> ndo_queue_stop ||
17- !dev -> queue_mgmt_ops -> ndo_queue_mem_free ||
18- !dev -> queue_mgmt_ops -> ndo_queue_mem_alloc ||
19- !dev -> queue_mgmt_ops -> ndo_queue_start )
17+ if (!qops || !qops -> ndo_queue_stop || !qops -> ndo_queue_mem_free ||
18+ !qops -> ndo_queue_mem_alloc || !qops -> ndo_queue_start )
2019 return - EOPNOTSUPP ;
2120
2221 ASSERT_RTNL ();
2322
24- new_mem = kvzalloc (dev -> queue_mgmt_ops -> ndo_queue_mem_size , GFP_KERNEL );
23+ new_mem = kvzalloc (qops -> ndo_queue_mem_size , GFP_KERNEL );
2524 if (!new_mem )
2625 return - ENOMEM ;
2726
28- old_mem = kvzalloc (dev -> queue_mgmt_ops -> ndo_queue_mem_size , GFP_KERNEL );
27+ old_mem = kvzalloc (qops -> ndo_queue_mem_size , GFP_KERNEL );
2928 if (!old_mem ) {
3029 err = - ENOMEM ;
3130 goto err_free_new_mem ;
3231 }
3332
34- err = dev -> queue_mgmt_ops -> ndo_queue_mem_alloc (dev , new_mem , rxq_idx );
33+ err = qops -> ndo_queue_mem_alloc (dev , new_mem , rxq_idx );
3534 if (err )
3635 goto err_free_old_mem ;
3736
3837 err = page_pool_check_memory_provider (dev , rxq );
3938 if (err )
4039 goto err_free_new_queue_mem ;
4140
42- err = dev -> queue_mgmt_ops -> ndo_queue_stop (dev , old_mem , rxq_idx );
41+ err = qops -> ndo_queue_stop (dev , old_mem , rxq_idx );
4342 if (err )
4443 goto err_free_new_queue_mem ;
4544
46- err = dev -> queue_mgmt_ops -> ndo_queue_start (dev , new_mem , rxq_idx );
45+ err = qops -> ndo_queue_start (dev , new_mem , rxq_idx );
4746 if (err )
4847 goto err_start_queue ;
4948
50- dev -> queue_mgmt_ops -> ndo_queue_mem_free (dev , old_mem );
49+ qops -> ndo_queue_mem_free (dev , old_mem );
5150
5251 kvfree (old_mem );
5352 kvfree (new_mem );
@@ -62,15 +61,15 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
6261 * WARN if we fail to recover the old rx queue, and at least free
6362 * old_mem so we don't also leak that.
6463 */
65- if (dev -> queue_mgmt_ops -> ndo_queue_start (dev , old_mem , rxq_idx )) {
64+ if (qops -> ndo_queue_start (dev , old_mem , rxq_idx )) {
6665 WARN (1 ,
6766 "Failed to restart old queue in error path. RX queue %d may be unhealthy." ,
6867 rxq_idx );
69- dev -> queue_mgmt_ops -> ndo_queue_mem_free (dev , old_mem );
68+ qops -> ndo_queue_mem_free (dev , old_mem );
7069 }
7170
7271err_free_new_queue_mem :
73- dev -> queue_mgmt_ops -> ndo_queue_mem_free (dev , new_mem );
72+ qops -> ndo_queue_mem_free (dev , new_mem );
7473
7574err_free_old_mem :
7675 kvfree (old_mem );
0 commit comments