Skip to content

Commit 8880fc6

Browse files
sukadevkuba-moo
authored andcommitted
ibmvnic: rename local variable index to bufidx
The local variable 'index' is heavily used in some functions and is confusing with the presence of other "index" fields like pool->index, ->consumer_index, etc. Rename it to bufidx to better reflect that its the index of a buffer in the pool. Signed-off-by: Sukadev Bhattiprolu <[email protected]> Signed-off-by: Dany Madden <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 7b05c54 commit 8880fc6

File tree

1 file changed

+22
-22
lines changed

1 file changed

+22
-22
lines changed

drivers/net/ethernet/ibm/ibmvnic.c

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
369369
dma_addr_t dma_addr;
370370
unsigned char *dst;
371371
int shift = 0;
372-
int index;
372+
int bufidx;
373373
int i;
374374

375375
if (!pool->active)
@@ -385,14 +385,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
385385
* be 0.
386386
*/
387387
for (i = ind_bufp->index; i < count; ++i) {
388-
index = pool->free_map[pool->next_free];
388+
bufidx = pool->free_map[pool->next_free];
389389

390390
/* We maybe reusing the skb from earlier resets. Allocate
391391
* only if necessary. But since the LTB may have changed
392392
* during reset (see init_rx_pools()), update LTB below
393393
* even if reusing skb.
394394
*/
395-
skb = pool->rx_buff[index].skb;
395+
skb = pool->rx_buff[bufidx].skb;
396396
if (!skb) {
397397
skb = netdev_alloc_skb(adapter->netdev,
398398
pool->buff_size);
@@ -407,24 +407,24 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
407407
pool->next_free = (pool->next_free + 1) % pool->size;
408408

409409
/* Copy the skb to the long term mapped DMA buffer */
410-
offset = index * pool->buff_size;
410+
offset = bufidx * pool->buff_size;
411411
dst = pool->long_term_buff.buff + offset;
412412
memset(dst, 0, pool->buff_size);
413413
dma_addr = pool->long_term_buff.addr + offset;
414414

415415
/* add the skb to an rx_buff in the pool */
416-
pool->rx_buff[index].data = dst;
417-
pool->rx_buff[index].dma = dma_addr;
418-
pool->rx_buff[index].skb = skb;
419-
pool->rx_buff[index].pool_index = pool->index;
420-
pool->rx_buff[index].size = pool->buff_size;
416+
pool->rx_buff[bufidx].data = dst;
417+
pool->rx_buff[bufidx].dma = dma_addr;
418+
pool->rx_buff[bufidx].skb = skb;
419+
pool->rx_buff[bufidx].pool_index = pool->index;
420+
pool->rx_buff[bufidx].size = pool->buff_size;
421421

422422
/* queue the rx_buff for the next send_subcrq_indirect */
423423
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
424424
memset(sub_crq, 0, sizeof(*sub_crq));
425425
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
426426
sub_crq->rx_add.correlator =
427-
cpu_to_be64((u64)&pool->rx_buff[index]);
427+
cpu_to_be64((u64)&pool->rx_buff[bufidx]);
428428
sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
429429
sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
430430

@@ -466,10 +466,10 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
466466
sub_crq = &ind_bufp->indir_arr[i];
467467
rx_buff = (struct ibmvnic_rx_buff *)
468468
be64_to_cpu(sub_crq->rx_add.correlator);
469-
index = (int)(rx_buff - pool->rx_buff);
470-
pool->free_map[pool->next_free] = index;
471-
dev_kfree_skb_any(pool->rx_buff[index].skb);
472-
pool->rx_buff[index].skb = NULL;
469+
bufidx = (int)(rx_buff - pool->rx_buff);
470+
pool->free_map[pool->next_free] = bufidx;
471+
dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
472+
pool->rx_buff[bufidx].skb = NULL;
473473
}
474474
adapter->replenish_add_buff_failure += ind_bufp->index;
475475
atomic_add(buffers_added, &pool->available);
@@ -1926,7 +1926,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
19261926
unsigned int offset;
19271927
int num_entries = 1;
19281928
unsigned char *dst;
1929-
int index = 0;
1929+
int bufidx = 0;
19301930
u8 proto = 0;
19311931

19321932
/* If a reset is in progress, drop the packet since
@@ -1960,9 +1960,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
19601960
else
19611961
tx_pool = &adapter->tx_pool[queue_num];
19621962

1963-
index = tx_pool->free_map[tx_pool->consumer_index];
1963+
bufidx = tx_pool->free_map[tx_pool->consumer_index];
19641964

1965-
if (index == IBMVNIC_INVALID_MAP) {
1965+
if (bufidx == IBMVNIC_INVALID_MAP) {
19661966
dev_kfree_skb_any(skb);
19671967
tx_send_failed++;
19681968
tx_dropped++;
@@ -1973,7 +1973,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
19731973

19741974
tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
19751975

1976-
offset = index * tx_pool->buf_size;
1976+
offset = bufidx * tx_pool->buf_size;
19771977
dst = tx_pool->long_term_buff.buff + offset;
19781978
memset(dst, 0, tx_pool->buf_size);
19791979
data_dma_addr = tx_pool->long_term_buff.addr + offset;
@@ -2003,9 +2003,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
20032003
tx_pool->consumer_index =
20042004
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
20052005

2006-
tx_buff = &tx_pool->tx_buff[index];
2006+
tx_buff = &tx_pool->tx_buff[bufidx];
20072007
tx_buff->skb = skb;
2008-
tx_buff->index = index;
2008+
tx_buff->index = bufidx;
20092009
tx_buff->pool_index = queue_num;
20102010

20112011
memset(&tx_crq, 0, sizeof(tx_crq));
@@ -2017,9 +2017,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
20172017

20182018
if (skb_is_gso(skb))
20192019
tx_crq.v1.correlator =
2020-
cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
2020+
cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
20212021
else
2022-
tx_crq.v1.correlator = cpu_to_be32(index);
2022+
tx_crq.v1.correlator = cpu_to_be32(bufidx);
20232023
tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
20242024
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
20252025
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);

0 commit comments

Comments
 (0)