@@ -369,7 +369,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
369
369
dma_addr_t dma_addr ;
370
370
unsigned char * dst ;
371
371
int shift = 0 ;
372
- int index ;
372
+ int bufidx ;
373
373
int i ;
374
374
375
375
if (!pool -> active )
@@ -385,14 +385,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
385
385
* be 0.
386
386
*/
387
387
for (i = ind_bufp -> index ; i < count ; ++ i ) {
388
- index = pool -> free_map [pool -> next_free ];
388
+ bufidx = pool -> free_map [pool -> next_free ];
389
389
390
390
/* We maybe reusing the skb from earlier resets. Allocate
391
391
* only if necessary. But since the LTB may have changed
392
392
* during reset (see init_rx_pools()), update LTB below
393
393
* even if reusing skb.
394
394
*/
395
- skb = pool -> rx_buff [index ].skb ;
395
+ skb = pool -> rx_buff [bufidx ].skb ;
396
396
if (!skb ) {
397
397
skb = netdev_alloc_skb (adapter -> netdev ,
398
398
pool -> buff_size );
@@ -407,24 +407,24 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
407
407
pool -> next_free = (pool -> next_free + 1 ) % pool -> size ;
408
408
409
409
/* Copy the skb to the long term mapped DMA buffer */
410
- offset = index * pool -> buff_size ;
410
+ offset = bufidx * pool -> buff_size ;
411
411
dst = pool -> long_term_buff .buff + offset ;
412
412
memset (dst , 0 , pool -> buff_size );
413
413
dma_addr = pool -> long_term_buff .addr + offset ;
414
414
415
415
/* add the skb to an rx_buff in the pool */
416
- pool -> rx_buff [index ].data = dst ;
417
- pool -> rx_buff [index ].dma = dma_addr ;
418
- pool -> rx_buff [index ].skb = skb ;
419
- pool -> rx_buff [index ].pool_index = pool -> index ;
420
- pool -> rx_buff [index ].size = pool -> buff_size ;
416
+ pool -> rx_buff [bufidx ].data = dst ;
417
+ pool -> rx_buff [bufidx ].dma = dma_addr ;
418
+ pool -> rx_buff [bufidx ].skb = skb ;
419
+ pool -> rx_buff [bufidx ].pool_index = pool -> index ;
420
+ pool -> rx_buff [bufidx ].size = pool -> buff_size ;
421
421
422
422
/* queue the rx_buff for the next send_subcrq_indirect */
423
423
sub_crq = & ind_bufp -> indir_arr [ind_bufp -> index ++ ];
424
424
memset (sub_crq , 0 , sizeof (* sub_crq ));
425
425
sub_crq -> rx_add .first = IBMVNIC_CRQ_CMD ;
426
426
sub_crq -> rx_add .correlator =
427
- cpu_to_be64 ((u64 )& pool -> rx_buff [index ]);
427
+ cpu_to_be64 ((u64 )& pool -> rx_buff [bufidx ]);
428
428
sub_crq -> rx_add .ioba = cpu_to_be32 (dma_addr );
429
429
sub_crq -> rx_add .map_id = pool -> long_term_buff .map_id ;
430
430
@@ -466,10 +466,10 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
466
466
sub_crq = & ind_bufp -> indir_arr [i ];
467
467
rx_buff = (struct ibmvnic_rx_buff * )
468
468
be64_to_cpu (sub_crq -> rx_add .correlator );
469
- index = (int )(rx_buff - pool -> rx_buff );
470
- pool -> free_map [pool -> next_free ] = index ;
471
- dev_kfree_skb_any (pool -> rx_buff [index ].skb );
472
- pool -> rx_buff [index ].skb = NULL ;
469
+ bufidx = (int )(rx_buff - pool -> rx_buff );
470
+ pool -> free_map [pool -> next_free ] = bufidx ;
471
+ dev_kfree_skb_any (pool -> rx_buff [bufidx ].skb );
472
+ pool -> rx_buff [bufidx ].skb = NULL ;
473
473
}
474
474
adapter -> replenish_add_buff_failure += ind_bufp -> index ;
475
475
atomic_add (buffers_added , & pool -> available );
@@ -1926,7 +1926,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1926
1926
unsigned int offset ;
1927
1927
int num_entries = 1 ;
1928
1928
unsigned char * dst ;
1929
- int index = 0 ;
1929
+ int bufidx = 0 ;
1930
1930
u8 proto = 0 ;
1931
1931
1932
1932
/* If a reset is in progress, drop the packet since
@@ -1960,9 +1960,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1960
1960
else
1961
1961
tx_pool = & adapter -> tx_pool [queue_num ];
1962
1962
1963
- index = tx_pool -> free_map [tx_pool -> consumer_index ];
1963
+ bufidx = tx_pool -> free_map [tx_pool -> consumer_index ];
1964
1964
1965
- if (index == IBMVNIC_INVALID_MAP ) {
1965
+ if (bufidx == IBMVNIC_INVALID_MAP ) {
1966
1966
dev_kfree_skb_any (skb );
1967
1967
tx_send_failed ++ ;
1968
1968
tx_dropped ++ ;
@@ -1973,7 +1973,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1973
1973
1974
1974
tx_pool -> free_map [tx_pool -> consumer_index ] = IBMVNIC_INVALID_MAP ;
1975
1975
1976
- offset = index * tx_pool -> buf_size ;
1976
+ offset = bufidx * tx_pool -> buf_size ;
1977
1977
dst = tx_pool -> long_term_buff .buff + offset ;
1978
1978
memset (dst , 0 , tx_pool -> buf_size );
1979
1979
data_dma_addr = tx_pool -> long_term_buff .addr + offset ;
@@ -2003,9 +2003,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2003
2003
tx_pool -> consumer_index =
2004
2004
(tx_pool -> consumer_index + 1 ) % tx_pool -> num_buffers ;
2005
2005
2006
- tx_buff = & tx_pool -> tx_buff [index ];
2006
+ tx_buff = & tx_pool -> tx_buff [bufidx ];
2007
2007
tx_buff -> skb = skb ;
2008
- tx_buff -> index = index ;
2008
+ tx_buff -> index = bufidx ;
2009
2009
tx_buff -> pool_index = queue_num ;
2010
2010
2011
2011
memset (& tx_crq , 0 , sizeof (tx_crq ));
@@ -2017,9 +2017,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2017
2017
2018
2018
if (skb_is_gso (skb ))
2019
2019
tx_crq .v1 .correlator =
2020
- cpu_to_be32 (index | IBMVNIC_TSO_POOL_MASK );
2020
+ cpu_to_be32 (bufidx | IBMVNIC_TSO_POOL_MASK );
2021
2021
else
2022
- tx_crq .v1 .correlator = cpu_to_be32 (index );
2022
+ tx_crq .v1 .correlator = cpu_to_be32 (bufidx );
2023
2023
tx_crq .v1 .dma_reg = cpu_to_be16 (tx_pool -> long_term_buff .map_id );
2024
2024
tx_crq .v1 .sge_len = cpu_to_be32 (skb -> len );
2025
2025
tx_crq .v1 .ioba = cpu_to_be64 (data_dma_addr );
0 commit comments