Skip to content

Commit 127b721

Browse files
Nick Childdavem330
authored andcommitted
ibmveth: Always stop tx queues during close
netif_stop_all_queues must be called before calling H_FREE_LOGICAL_LAN. As a result, we can remove the pool_config field from the ibmveth adapter structure. Some device configuration changes call ibmveth_close in order to free the current resources held by the device. These functions then make their changes and call ibmveth_open to reallocate and reserve resources for the device. Prior to this commit, the flag pool_config was used to tell ibmveth_close that it should not halt the transmit queue. pool_config was introduced in commit 860f242 ("[PATCH] ibmveth change buffer pools dynamically") to avoid interrupting the tx flow when making rx config changes. Since then, other commits adopted this approach, even if making tx config changes. The issue with this approach was that the hypervisor freed all of the devices control structures after the hcall H_FREE_LOGICAL_LAN was performed but the transmit queues were never stopped. So the higher layers in the network stack would continue transmission but any H_SEND_LOGICAL_LAN hcall would fail with H_PARAMETER until the hypervisor's structures for the device were allocated with the H_REGISTER_LOGICAL_LAN hcall in ibmveth_open. This resulted in no real networking harm but did cause several of these error messages to be logged: "h_send_logical_lan failed with rc=-4" So, instead of trying to keep the transmit queues alive during network configuration changes, just stop the queues, make necessary changes then restart the queues. Signed-off-by: Nick Child <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 233baf9 commit 127b721

File tree

2 files changed

+1
-18
lines changed

2 files changed

+1
-18
lines changed

drivers/net/ethernet/ibm/ibmveth.c

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -690,8 +690,7 @@ static int ibmveth_close(struct net_device *netdev)
690690

691691
napi_disable(&adapter->napi);
692692

693-
if (!adapter->pool_config)
694-
netif_tx_stop_all_queues(netdev);
693+
netif_tx_stop_all_queues(netdev);
695694

696695
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
697696

@@ -799,9 +798,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
799798

800799
if (netif_running(dev)) {
801800
restart = 1;
802-
adapter->pool_config = 1;
803801
ibmveth_close(dev);
804-
adapter->pool_config = 0;
805802
}
806803

807804
set_attr = 0;
@@ -883,9 +880,7 @@ static int ibmveth_set_tso(struct net_device *dev, u32 data)
883880

884881
if (netif_running(dev)) {
885882
restart = 1;
886-
adapter->pool_config = 1;
887883
ibmveth_close(dev);
888-
adapter->pool_config = 0;
889884
}
890885

891886
set_attr = 0;
@@ -1535,9 +1530,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
15351530
only the buffer pools necessary to hold the new MTU */
15361531
if (netif_running(adapter->netdev)) {
15371532
need_restart = 1;
1538-
adapter->pool_config = 1;
15391533
ibmveth_close(adapter->netdev);
1540-
adapter->pool_config = 0;
15411534
}
15421535

15431536
/* Look for an active buffer pool that can hold the new MTU */
@@ -1701,7 +1694,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
17011694
adapter->vdev = dev;
17021695
adapter->netdev = netdev;
17031696
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1704-
adapter->pool_config = 0;
17051697
ibmveth_init_link_settings(netdev);
17061698

17071699
netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16);
@@ -1841,9 +1833,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
18411833
return -ENOMEM;
18421834
}
18431835
pool->active = 1;
1844-
adapter->pool_config = 1;
18451836
ibmveth_close(netdev);
1846-
adapter->pool_config = 0;
18471837
if ((rc = ibmveth_open(netdev)))
18481838
return rc;
18491839
} else {
@@ -1869,10 +1859,8 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
18691859
}
18701860

18711861
if (netif_running(netdev)) {
1872-
adapter->pool_config = 1;
18731862
ibmveth_close(netdev);
18741863
pool->active = 0;
1875-
adapter->pool_config = 0;
18761864
if ((rc = ibmveth_open(netdev)))
18771865
return rc;
18781866
}
@@ -1883,9 +1871,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
18831871
return -EINVAL;
18841872
} else {
18851873
if (netif_running(netdev)) {
1886-
adapter->pool_config = 1;
18871874
ibmveth_close(netdev);
1888-
adapter->pool_config = 0;
18891875
pool->size = value;
18901876
if ((rc = ibmveth_open(netdev)))
18911877
return rc;
@@ -1898,9 +1884,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
18981884
return -EINVAL;
18991885
} else {
19001886
if (netif_running(netdev)) {
1901-
adapter->pool_config = 1;
19021887
ibmveth_close(netdev);
1903-
adapter->pool_config = 0;
19041888
pool->buff_size = value;
19051889
if ((rc = ibmveth_open(netdev)))
19061890
return rc;

drivers/net/ethernet/ibm/ibmveth.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,6 @@ struct ibmveth_adapter {
146146
dma_addr_t filter_list_dma;
147147
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
148148
struct ibmveth_rx_q rx_queue;
149-
int pool_config;
150149
int rx_csum;
151150
int large_send;
152151
bool is_active_trunk;

0 commit comments

Comments
 (0)