Skip to content

Commit 8256e0c

Browse files
axelf4marckleinebudde
authored andcommitted
can: kvaser_pciefd: Fix echo_skb race
The functions kvaser_pciefd_start_xmit() and kvaser_pciefd_handle_ack_packet() raced to stop/wake TX queues and get/put echo skbs, as kvaser_pciefd_can->echo_lock was only ever taken when transmitting and KCAN_TX_NR_PACKETS_CURRENT gets decremented prior to handling of ACKs. E.g., this caused the following error: can_put_echo_skb: BUG! echo_skb 5 is occupied! Instead, use the synchronization helpers in netdev_queues.h. As those piggyback on BQL barriers, start updating in-flight packets and bytes counts as well. Cc: [email protected] Signed-off-by: Axel Forsman <[email protected]> Tested-by: Jimmy Assarsson <[email protected]> Reviewed-by: Jimmy Assarsson <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Marc Kleine-Budde <[email protected]>
1 parent 9176bd2 commit 8256e0c

File tree

1 file changed

+59
-34
lines changed

1 file changed

+59
-34
lines changed

drivers/net/can/kvaser_pciefd.c

Lines changed: 59 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <linux/netdevice.h>
1717
#include <linux/pci.h>
1818
#include <linux/timer.h>
19+
#include <net/netdev_queues.h>
1920

2021
MODULE_LICENSE("Dual BSD/GPL");
2122
MODULE_AUTHOR("Kvaser AB <[email protected]>");
@@ -410,10 +411,13 @@ struct kvaser_pciefd_can {
410411
void __iomem *reg_base;
411412
struct can_berr_counter bec;
412413
u8 cmd_seq;
414+
u8 tx_max_count;
415+
u8 tx_idx;
416+
u8 ack_idx;
413417
int err_rep_cnt;
414-
int echo_idx;
418+
unsigned int completed_tx_pkts;
419+
unsigned int completed_tx_bytes;
415420
spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
416-
spinlock_t echo_lock; /* Locks the message echo buffer */
417421
struct timer_list bec_poll_timer;
418422
struct completion start_comp, flush_comp;
419423
};
@@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev)
714718
int ret;
715719
struct kvaser_pciefd_can *can = netdev_priv(netdev);
716720

721+
can->tx_idx = 0;
722+
can->ack_idx = 0;
723+
717724
ret = open_candev(netdev);
718725
if (ret)
719726
return ret;
@@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
745752
timer_delete(&can->bec_poll_timer);
746753
}
747754
can->can.state = CAN_STATE_STOPPED;
755+
netdev_reset_queue(netdev);
748756
close_candev(netdev);
749757

750758
return ret;
751759
}
752760

761+
static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can)
762+
{
763+
return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx));
764+
}
765+
753766
static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
754-
struct kvaser_pciefd_can *can,
767+
struct can_priv *can, u8 seq,
755768
struct sk_buff *skb)
756769
{
757770
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
758771
int packet_size;
759-
int seq = can->echo_idx;
760772

761773
memset(p, 0, sizeof(*p));
762-
if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
774+
if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT)
763775
p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
764776

765777
if (cf->can_id & CAN_RTR_FLAG)
@@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
782794
} else {
783795
p->header[1] |=
784796
FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
785-
can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
797+
can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode));
786798
}
787799

788800
p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
@@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
797809
struct net_device *netdev)
798810
{
799811
struct kvaser_pciefd_can *can = netdev_priv(netdev);
800-
unsigned long irq_flags;
801812
struct kvaser_pciefd_tx_packet packet;
813+
unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1);
814+
unsigned int frame_len;
802815
int nr_words;
803-
u8 count;
804816

805817
if (can_dev_dropped_skb(netdev, skb))
806818
return NETDEV_TX_OK;
819+
if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1))
820+
return NETDEV_TX_BUSY;
807821

808-
nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
822+
nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
809823

810-
spin_lock_irqsave(&can->echo_lock, irq_flags);
811824
/* Prepare and save echo skb in internal slot */
812-
can_put_echo_skb(skb, netdev, can->echo_idx, 0);
813-
814-
/* Move echo index to the next slot */
815-
can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
825+
WRITE_ONCE(can->can.echo_skb[seq], NULL);
826+
frame_len = can_skb_get_frame_len(skb);
827+
can_put_echo_skb(skb, netdev, seq, frame_len);
828+
netdev_sent_queue(netdev, frame_len);
829+
WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
816830

817831
/* Write header to fifo */
818832
iowrite32(packet.header[0],
@@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
836850
KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
837851
}
838852

839-
count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
840-
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
841-
/* No room for a new message, stop the queue until at least one
842-
* successful transmit
843-
*/
844-
if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
845-
netif_stop_queue(netdev);
846-
spin_unlock_irqrestore(&can->echo_lock, irq_flags);
853+
netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
847854

848855
return NETDEV_TX_OK;
849856
}
@@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
970977
can->kv_pcie = pcie;
971978
can->cmd_seq = 0;
972979
can->err_rep_cnt = 0;
980+
can->completed_tx_pkts = 0;
981+
can->completed_tx_bytes = 0;
973982
can->bec.txerr = 0;
974983
can->bec.rxerr = 0;
975984

@@ -983,11 +992,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
983992
tx_nr_packets_max =
984993
FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
985994
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
995+
can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
986996

987997
can->can.clock.freq = pcie->freq;
988-
can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
989-
can->echo_idx = 0;
990-
spin_lock_init(&can->echo_lock);
998+
can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count);
991999
spin_lock_init(&can->lock);
9921000

9931001
can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
@@ -1510,19 +1518,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
15101518
netdev_dbg(can->can.dev, "Packet was flushed\n");
15111519
} else {
15121520
int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
1513-
int len;
1514-
u8 count;
1521+
unsigned int len, frame_len = 0;
15151522
struct sk_buff *skb;
15161523

1524+
if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1)))
1525+
return 0;
15171526
skb = can->can.echo_skb[echo_idx];
1518-
if (skb)
1519-
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
1520-
len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
1521-
count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
1522-
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
1527+
if (!skb)
1528+
return 0;
1529+
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
1530+
len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
15231531

1524-
if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
1525-
netif_wake_queue(can->can.dev);
1532+
/* Pairs with barrier in kvaser_pciefd_start_xmit() */
1533+
smp_store_release(&can->ack_idx, can->ack_idx + 1);
1534+
can->completed_tx_pkts++;
1535+
can->completed_tx_bytes += frame_len;
15261536

15271537
if (!one_shot_fail) {
15281538
can->can.dev->stats.tx_bytes += len;
@@ -1638,11 +1648,26 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
16381648
{
16391649
int pos = 0;
16401650
int res = 0;
1651+
unsigned int i;
16411652

16421653
do {
16431654
res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
16441655
} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
16451656

1657+
/* Report ACKs in this buffer to BQL en masse for correct periods */
1658+
for (i = 0; i < pcie->nr_channels; ++i) {
1659+
struct kvaser_pciefd_can *can = pcie->can[i];
1660+
1661+
if (!can->completed_tx_pkts)
1662+
continue;
1663+
netif_subqueue_completed_wake(can->can.dev, 0,
1664+
can->completed_tx_pkts,
1665+
can->completed_tx_bytes,
1666+
kvaser_pciefd_tx_avail(can), 1);
1667+
can->completed_tx_pkts = 0;
1668+
can->completed_tx_bytes = 0;
1669+
}
1670+
16461671
return res;
16471672
}
16481673

0 commit comments

Comments
 (0)