|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | +/**************************************************************************** |
| 3 | + * Driver for Solarflare network controllers and boards |
| 4 | + * Copyright 2005-2006 Fen Systems Ltd. |
| 5 | + * Copyright 2006-2013 Solarflare Communications Inc. |
| 6 | + */ |
| 7 | + |
| 8 | +#ifndef EFX_EFX_H |
| 9 | +#define EFX_EFX_H |
| 10 | + |
| 11 | +#include <linux/indirect_call_wrapper.h> |
| 12 | +#include "net_driver.h" |
| 13 | +#include "ef100_rx.h" |
| 14 | +#include "ef100_tx.h" |
| 15 | +#include "filter.h" |
| 16 | + |
| 17 | +int efx_net_open(struct net_device *net_dev); |
| 18 | +int efx_net_stop(struct net_device *net_dev); |
| 19 | + |
| 20 | +/* TX */ |
| 21 | +void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); |
| 22 | +netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, |
| 23 | + struct net_device *net_dev); |
| 24 | +netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); |
| 25 | +static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
| 26 | +{ |
| 27 | + return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue, |
| 28 | + ef100_enqueue_skb, __efx_enqueue_skb, |
| 29 | + tx_queue, skb); |
| 30 | +} |
| 31 | +void efx_xmit_done_single(struct efx_tx_queue *tx_queue); |
| 32 | +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, |
| 33 | + void *type_data); |
| 34 | +extern unsigned int efx_piobuf_size; |
| 35 | + |
| 36 | +/* RX */ |
| 37 | +void __efx_rx_packet(struct efx_channel *channel); |
| 38 | +void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
| 39 | + unsigned int n_frags, unsigned int len, u16 flags); |
| 40 | +static inline void efx_rx_flush_packet(struct efx_channel *channel) |
| 41 | +{ |
| 42 | + if (channel->rx_pkt_n_frags) |
| 43 | + INDIRECT_CALL_2(channel->efx->type->rx_packet, |
| 44 | + __ef100_rx_packet, __efx_rx_packet, |
| 45 | + channel); |
| 46 | +} |
| 47 | +static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix) |
| 48 | +{ |
| 49 | + if (efx->type->rx_buf_hash_valid) |
| 50 | + return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid, |
| 51 | + ef100_rx_buf_hash_valid, |
| 52 | + prefix); |
| 53 | + return true; |
| 54 | +} |
| 55 | + |
| 56 | +/* Maximum number of TCP segments we support for soft-TSO */ |
| 57 | +#define EFX_TSO_MAX_SEGS 100 |
| 58 | + |
| 59 | +/* The smallest [rt]xq_entries that the driver supports. RX minimum |
| 60 | + * is a bit arbitrary. For TX, we must have space for at least 2 |
| 61 | + * TSO skbs. |
| 62 | + */ |
| 63 | +#define EFX_RXQ_MIN_ENT 128U |
| 64 | +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) |
| 65 | + |
| 66 | +/* All EF10 architecture NICs steal one bit of the DMAQ size for various |
| 67 | + * other purposes when counting TxQ entries, so we halve the queue size. |
| 68 | + */ |
| 69 | +#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \ |
| 70 | + EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) |
| 71 | + |
| 72 | +static inline bool efx_rss_enabled(struct efx_nic *efx) |
| 73 | +{ |
| 74 | + return efx->rss_spread > 1; |
| 75 | +} |
| 76 | + |
| 77 | +/* Filters */ |
| 78 | + |
| 79 | +/** |
| 80 | + * efx_filter_insert_filter - add or replace a filter |
| 81 | + * @efx: NIC in which to insert the filter |
| 82 | + * @spec: Specification for the filter |
| 83 | + * @replace_equal: Flag for whether the specified filter may replace an |
| 84 | + * existing filter with equal priority |
| 85 | + * |
| 86 | + * On success, return the filter ID. |
| 87 | + * On failure, return a negative error code. |
| 88 | + * |
| 89 | + * If existing filters have equal match values to the new filter spec, |
| 90 | + * then the new filter might replace them or the function might fail, |
| 91 | + * as follows. |
| 92 | + * |
| 93 | + * 1. If the existing filters have lower priority, or @replace_equal |
| 94 | + * is set and they have equal priority, replace them. |
| 95 | + * |
| 96 | + * 2. If the existing filters have higher priority, return -%EPERM. |
| 97 | + * |
| 98 | + * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not |
| 99 | + * support delivery to multiple recipients, return -%EEXIST. |
| 100 | + * |
| 101 | + * This implies that filters for multiple multicast recipients must |
| 102 | + * all be inserted with the same priority and @replace_equal = %false. |
| 103 | + */ |
| 104 | +static inline s32 efx_filter_insert_filter(struct efx_nic *efx, |
| 105 | + struct efx_filter_spec *spec, |
| 106 | + bool replace_equal) |
| 107 | +{ |
| 108 | + return efx->type->filter_insert(efx, spec, replace_equal); |
| 109 | +} |
| 110 | + |
| 111 | +/** |
| 112 | + * efx_filter_remove_id_safe - remove a filter by ID, carefully |
| 113 | + * @efx: NIC from which to remove the filter |
| 114 | + * @priority: Priority of filter, as passed to @efx_filter_insert_filter |
| 115 | + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter |
| 116 | + * |
| 117 | + * This function will range-check @filter_id, so it is safe to call |
| 118 | + * with a value passed from userland. |
| 119 | + */ |
| 120 | +static inline int efx_filter_remove_id_safe(struct efx_nic *efx, |
| 121 | + enum efx_filter_priority priority, |
| 122 | + u32 filter_id) |
| 123 | +{ |
| 124 | + return efx->type->filter_remove_safe(efx, priority, filter_id); |
| 125 | +} |
| 126 | + |
| 127 | +/** |
| 128 | + * efx_filter_get_filter_safe - retrieve a filter by ID, carefully |
| 129 | + * @efx: NIC from which to remove the filter |
| 130 | + * @priority: Priority of filter, as passed to @efx_filter_insert_filter |
| 131 | + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter |
| 132 | + * @spec: Buffer in which to store filter specification |
| 133 | + * |
| 134 | + * This function will range-check @filter_id, so it is safe to call |
| 135 | + * with a value passed from userland. |
| 136 | + */ |
| 137 | +static inline int |
| 138 | +efx_filter_get_filter_safe(struct efx_nic *efx, |
| 139 | + enum efx_filter_priority priority, |
| 140 | + u32 filter_id, struct efx_filter_spec *spec) |
| 141 | +{ |
| 142 | + return efx->type->filter_get_safe(efx, priority, filter_id, spec); |
| 143 | +} |
| 144 | + |
| 145 | +static inline u32 efx_filter_count_rx_used(struct efx_nic *efx, |
| 146 | + enum efx_filter_priority priority) |
| 147 | +{ |
| 148 | + return efx->type->filter_count_rx_used(efx, priority); |
| 149 | +} |
| 150 | +static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) |
| 151 | +{ |
| 152 | + return efx->type->filter_get_rx_id_limit(efx); |
| 153 | +} |
| 154 | +static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, |
| 155 | + enum efx_filter_priority priority, |
| 156 | + u32 *buf, u32 size) |
| 157 | +{ |
| 158 | + return efx->type->filter_get_rx_ids(efx, priority, buf, size); |
| 159 | +} |
| 160 | + |
| 161 | +/* RSS contexts */ |
| 162 | +static inline bool efx_rss_active(struct efx_rss_context *ctx) |
| 163 | +{ |
| 164 | + return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID; |
| 165 | +} |
| 166 | + |
| 167 | +/* Ethtool support */ |
| 168 | +extern const struct ethtool_ops efx_ethtool_ops; |
| 169 | + |
| 170 | +/* Global */ |
| 171 | +unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); |
| 172 | +unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks); |
| 173 | +int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, |
| 174 | + unsigned int rx_usecs, bool rx_adaptive, |
| 175 | + bool rx_may_override_tx); |
| 176 | +void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, |
| 177 | + unsigned int *rx_usecs, bool *rx_adaptive); |
| 178 | + |
| 179 | +/* Update the generic software stats in the passed stats array */ |
| 180 | +void efx_update_sw_stats(struct efx_nic *efx, u64 *stats); |
| 181 | + |
| 182 | +/* MTD */ |
| 183 | +#ifdef CONFIG_SFC_MTD |
| 184 | +int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, |
| 185 | + size_t n_parts, size_t sizeof_part); |
| 186 | +static inline int efx_mtd_probe(struct efx_nic *efx) |
| 187 | +{ |
| 188 | + return efx->type->mtd_probe(efx); |
| 189 | +} |
| 190 | +void efx_mtd_rename(struct efx_nic *efx); |
| 191 | +void efx_mtd_remove(struct efx_nic *efx); |
| 192 | +#else |
| 193 | +static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } |
| 194 | +static inline void efx_mtd_rename(struct efx_nic *efx) {} |
| 195 | +static inline void efx_mtd_remove(struct efx_nic *efx) {} |
| 196 | +#endif |
| 197 | + |
| 198 | +#ifdef CONFIG_SFC_SRIOV |
| 199 | +static inline unsigned int efx_vf_size(struct efx_nic *efx) |
| 200 | +{ |
| 201 | + return 1 << efx->vi_scale; |
| 202 | +} |
| 203 | +#endif |
| 204 | + |
| 205 | +static inline void efx_device_detach_sync(struct efx_nic *efx) |
| 206 | +{ |
| 207 | + struct net_device *dev = efx->net_dev; |
| 208 | + |
| 209 | + /* Lock/freeze all TX queues so that we can be sure the |
| 210 | + * TX scheduler is stopped when we're done and before |
| 211 | + * netif_device_present() becomes false. |
| 212 | + */ |
| 213 | + netif_tx_lock_bh(dev); |
| 214 | + netif_device_detach(dev); |
| 215 | + netif_tx_unlock_bh(dev); |
| 216 | +} |
| 217 | + |
| 218 | +static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx) |
| 219 | +{ |
| 220 | + if ((efx->state != STATE_DISABLED) && !efx->reset_pending) |
| 221 | + netif_device_attach(efx->net_dev); |
| 222 | +} |
| 223 | + |
| 224 | +static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) |
| 225 | +{ |
| 226 | + if (WARN_ON(down_read_trylock(sem))) { |
| 227 | + up_read(sem); |
| 228 | + return false; |
| 229 | + } |
| 230 | + return true; |
| 231 | +} |
| 232 | + |
| 233 | +int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, |
| 234 | + bool flush); |
| 235 | + |
| 236 | +#endif /* EFX_EFX_H */ |
0 commit comments