Skip to content

Commit c4bc44c

Browse files
haokexindavem330
authored andcommitted
net: fec: fix the race between xmit and bdp reclaiming path
When we transmit a fragmented skb, we may run into a race like the following scenario (assume txq->cur_tx is next to txq->dirty_tx): cpu 0 cpu 1 fec_enet_txq_submit_skb reserve a bdp for the first fragment fec_enet_txq_submit_frag_skb update the bdp for the other fragment update txq->cur_tx fec_enet_tx_queue bdp = fec_enet_get_nextdesc(txq->dirty_tx, fep, queue_id); This bdp is the bdp reserved for the first segment. Given that this bdp BD_ENET_TX_READY bit is not set and txq->cur_tx is already pointed to a bdp beyond this one. We think this is a completed bdp and try to reclaim it. update the bdp for the first segment update txq->cur_tx So we shouldn't update the txq->cur_tx until all the update to the bdps used for fragments are performed. Also add the corresponding memory barrier to guarantee that the update to the bdps, dirty_tx and cur_tx performed in the proper order. Signed-off-by: Kevin Hao <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 2cf1b5c commit c4bc44c

File tree

1 file changed

+20
-15
lines changed

1 file changed

+20
-15
lines changed

drivers/net/ethernet/freescale/fec_main.c

Lines changed: 20 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -364,7 +364,7 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
364364
return 0;
365365
}
366366

367-
static int
367+
static struct bufdesc *
368368
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
369369
struct sk_buff *skb,
370370
struct net_device *ndev)
@@ -439,18 +439,15 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
439439
bdp->cbd_sc = status;
440440
}
441441

442-
txq->cur_tx = bdp;
443-
444-
return 0;
445-
442+
return bdp;
446443
dma_mapping_error:
447444
bdp = txq->cur_tx;
448445
for (i = 0; i < frag; i++) {
449446
bdp = fec_enet_get_nextdesc(bdp, fep, queue);
450447
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
451448
bdp->cbd_datlen, DMA_TO_DEVICE);
452449
}
453-
return NETDEV_TX_OK;
450+
return ERR_PTR(-ENOMEM);
454451
}
455452

456453
static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
@@ -467,7 +464,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
467464
unsigned int estatus = 0;
468465
unsigned int index;
469466
int entries_free;
470-
int ret;
471467

472468
entries_free = fec_enet_get_free_txdesc_num(fep, txq);
473469
if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -485,6 +481,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
485481

486482
/* Fill in a Tx ring entry */
487483
bdp = txq->cur_tx;
484+
last_bdp = bdp;
488485
status = bdp->cbd_sc;
489486
status &= ~BD_ENET_TX_STATS;
490487

@@ -513,9 +510,9 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
513510
}
514511

515512
if (nr_frags) {
516-
ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
517-
if (ret)
518-
return ret;
513+
last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
514+
if (IS_ERR(last_bdp))
515+
return NETDEV_TX_OK;
519516
} else {
520517
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
521518
if (fep->bufdesc_ex) {
@@ -544,7 +541,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
544541
ebdp->cbd_esc = estatus;
545542
}
546543

547-
last_bdp = txq->cur_tx;
548544
index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
549545
/* Save skb pointer */
550546
txq->tx_skbuff[index] = skb;
@@ -563,6 +559,10 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
563559

564560
skb_tx_timestamp(skb);
565561

562+
/* Make sure the update to bdp and tx_skbuff are performed before
563+
* cur_tx.
564+
*/
565+
wmb();
566566
txq->cur_tx = bdp;
567567

568568
/* Trigger transmission start */
@@ -1218,10 +1218,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
12181218
/* get next bdp of dirty_tx */
12191219
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
12201220

1221-
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
1222-
1223-
/* current queue is empty */
1224-
if (bdp == txq->cur_tx)
1221+
while (bdp != READ_ONCE(txq->cur_tx)) {
1222+
/* Order the load of cur_tx and cbd_sc */
1223+
rmb();
1224+
status = READ_ONCE(bdp->cbd_sc);
1225+
if (status & BD_ENET_TX_READY)
12251226
break;
12261227

12271228
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
@@ -1275,6 +1276,10 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
12751276
/* Free the sk buffer associated with this last transmit */
12761277
dev_kfree_skb_any(skb);
12771278

1279+
/* Make sure the update to bdp and tx_skbuff are performed
1280+
* before dirty_tx
1281+
*/
1282+
wmb();
12781283
txq->dirty_tx = bdp;
12791284

12801285
/* Update pointer to next buffer descriptor to be transmitted */

0 commit comments

Comments
 (0)