|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* Copyright(c) 2018 Intel Corporation. */ |
| 3 | + |
| 4 | +#include <linux/bpf_trace.h> |
| 5 | +#include <net/xdp_sock_drv.h> |
| 6 | +#include <net/xdp.h> |
| 7 | + |
| 8 | +#include "e1000_hw.h" |
| 9 | +#include "igb.h" |
| 10 | + |
| 11 | +static int igb_realloc_rx_buffer_info(struct igb_ring *ring, bool pool_present) |
| 12 | +{ |
| 13 | + int size = pool_present ? |
| 14 | + sizeof(*ring->rx_buffer_info_zc) * ring->count : |
| 15 | + sizeof(*ring->rx_buffer_info) * ring->count; |
| 16 | + void *buff_info = vmalloc(size); |
| 17 | + |
| 18 | + if (!buff_info) |
| 19 | + return -ENOMEM; |
| 20 | + |
| 21 | + if (pool_present) { |
| 22 | + vfree(ring->rx_buffer_info); |
| 23 | + ring->rx_buffer_info = NULL; |
| 24 | + ring->rx_buffer_info_zc = buff_info; |
| 25 | + } else { |
| 26 | + vfree(ring->rx_buffer_info_zc); |
| 27 | + ring->rx_buffer_info_zc = NULL; |
| 28 | + ring->rx_buffer_info = buff_info; |
| 29 | + } |
| 30 | + |
| 31 | + return 0; |
| 32 | +} |
| 33 | + |
| 34 | +static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid) |
| 35 | +{ |
| 36 | + struct igb_ring *tx_ring = adapter->tx_ring[qid]; |
| 37 | + struct igb_ring *rx_ring = adapter->rx_ring[qid]; |
| 38 | + struct e1000_hw *hw = &adapter->hw; |
| 39 | + |
| 40 | + set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags); |
| 41 | + |
| 42 | + wr32(E1000_TXDCTL(tx_ring->reg_idx), 0); |
| 43 | + wr32(E1000_RXDCTL(rx_ring->reg_idx), 0); |
| 44 | + |
| 45 | + synchronize_net(); |
| 46 | + |
| 47 | + /* Rx/Tx share the same napi context. */ |
| 48 | + napi_disable(&rx_ring->q_vector->napi); |
| 49 | + |
| 50 | + igb_clean_tx_ring(tx_ring); |
| 51 | + igb_clean_rx_ring(rx_ring); |
| 52 | + |
| 53 | + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); |
| 54 | + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); |
| 55 | +} |
| 56 | + |
| 57 | +static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid) |
| 58 | +{ |
| 59 | + struct igb_ring *tx_ring = adapter->tx_ring[qid]; |
| 60 | + struct igb_ring *rx_ring = adapter->rx_ring[qid]; |
| 61 | + |
| 62 | + igb_configure_tx_ring(adapter, tx_ring); |
| 63 | + igb_configure_rx_ring(adapter, rx_ring); |
| 64 | + |
| 65 | + synchronize_net(); |
| 66 | + |
| 67 | + clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags); |
| 68 | + |
| 69 | + /* call igb_desc_unused which always leaves |
| 70 | + * at least 1 descriptor unused to make sure |
| 71 | + * next_to_use != next_to_clean |
| 72 | + */ |
| 73 | + igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); |
| 74 | + |
| 75 | + /* Rx/Tx share the same napi context. */ |
| 76 | + napi_enable(&rx_ring->q_vector->napi); |
| 77 | +} |
| 78 | + |
| 79 | +struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter, |
| 80 | + struct igb_ring *ring) |
| 81 | +{ |
| 82 | + int qid = ring->queue_index; |
| 83 | + struct xsk_buff_pool *pool; |
| 84 | + |
| 85 | + pool = xsk_get_pool_from_qid(adapter->netdev, qid); |
| 86 | + |
| 87 | + if (!igb_xdp_is_enabled(adapter)) |
| 88 | + return NULL; |
| 89 | + |
| 90 | + return (pool && pool->dev) ? pool : NULL; |
| 91 | +} |
| 92 | + |
| 93 | +static int igb_xsk_pool_enable(struct igb_adapter *adapter, |
| 94 | + struct xsk_buff_pool *pool, |
| 95 | + u16 qid) |
| 96 | +{ |
| 97 | + struct net_device *netdev = adapter->netdev; |
| 98 | + struct igb_ring *rx_ring; |
| 99 | + bool if_running; |
| 100 | + int err; |
| 101 | + |
| 102 | + if (qid >= adapter->num_rx_queues) |
| 103 | + return -EINVAL; |
| 104 | + |
| 105 | + if (qid >= netdev->real_num_rx_queues || |
| 106 | + qid >= netdev->real_num_tx_queues) |
| 107 | + return -EINVAL; |
| 108 | + |
| 109 | + err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR); |
| 110 | + if (err) |
| 111 | + return err; |
| 112 | + |
| 113 | + rx_ring = adapter->rx_ring[qid]; |
| 114 | + if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter); |
| 115 | + if (if_running) |
| 116 | + igb_txrx_ring_disable(adapter, qid); |
| 117 | + |
| 118 | + if (if_running) { |
| 119 | + err = igb_realloc_rx_buffer_info(rx_ring, true); |
| 120 | + if (!err) { |
| 121 | + igb_txrx_ring_enable(adapter, qid); |
| 122 | + /* Kick start the NAPI context so that receiving will start */ |
| 123 | + err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); |
| 124 | + } |
| 125 | + |
| 126 | + if (err) { |
| 127 | + xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR); |
| 128 | + return err; |
| 129 | + } |
| 130 | + } |
| 131 | + |
| 132 | + return 0; |
| 133 | +} |
| 134 | + |
| 135 | +static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid) |
| 136 | +{ |
| 137 | + struct xsk_buff_pool *pool; |
| 138 | + struct igb_ring *rx_ring; |
| 139 | + bool if_running; |
| 140 | + int err; |
| 141 | + |
| 142 | + pool = xsk_get_pool_from_qid(adapter->netdev, qid); |
| 143 | + if (!pool) |
| 144 | + return -EINVAL; |
| 145 | + |
| 146 | + rx_ring = adapter->rx_ring[qid]; |
| 147 | + if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter); |
| 148 | + if (if_running) |
| 149 | + igb_txrx_ring_disable(adapter, qid); |
| 150 | + |
| 151 | + xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR); |
| 152 | + |
| 153 | + if (if_running) { |
| 154 | + err = igb_realloc_rx_buffer_info(rx_ring, false); |
| 155 | + if (err) |
| 156 | + return err; |
| 157 | + |
| 158 | + igb_txrx_ring_enable(adapter, qid); |
| 159 | + } |
| 160 | + |
| 161 | + return 0; |
| 162 | +} |
| 163 | + |
| 164 | +int igb_xsk_pool_setup(struct igb_adapter *adapter, |
| 165 | + struct xsk_buff_pool *pool, |
| 166 | + u16 qid) |
| 167 | +{ |
| 168 | + return pool ? igb_xsk_pool_enable(adapter, pool, qid) : |
| 169 | + igb_xsk_pool_disable(adapter, qid); |
| 170 | +} |
| 171 | + |
| 172 | +int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) |
| 173 | +{ |
| 174 | + struct igb_adapter *adapter = netdev_priv(dev); |
| 175 | + struct e1000_hw *hw = &adapter->hw; |
| 176 | + struct igb_ring *ring; |
| 177 | + u32 eics = 0; |
| 178 | + |
| 179 | + if (test_bit(__IGB_DOWN, &adapter->state)) |
| 180 | + return -ENETDOWN; |
| 181 | + |
| 182 | + if (!igb_xdp_is_enabled(adapter)) |
| 183 | + return -EINVAL; |
| 184 | + |
| 185 | + if (qid >= adapter->num_tx_queues) |
| 186 | + return -EINVAL; |
| 187 | + |
| 188 | + ring = adapter->tx_ring[qid]; |
| 189 | + |
| 190 | + if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags)) |
| 191 | + return -ENETDOWN; |
| 192 | + |
| 193 | + if (!READ_ONCE(ring->xsk_pool)) |
| 194 | + return -EINVAL; |
| 195 | + |
| 196 | + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { |
| 197 | + /* Cause software interrupt */ |
| 198 | + if (adapter->flags & IGB_FLAG_HAS_MSIX) { |
| 199 | + eics |= ring->q_vector->eims_value; |
| 200 | + wr32(E1000_EICS, eics); |
| 201 | + } else { |
| 202 | + wr32(E1000_ICS, E1000_ICS_RXDMT0); |
| 203 | + } |
| 204 | + } |
| 205 | + |
| 206 | + return 0; |
| 207 | +} |
0 commit comments