Skip to content

Commit 9bd2702

Browse files
Lincoln Ramsaykuba-moo
authored andcommitted
aquantia: Remove the build_skb path
When performing IPv6 forwarding, there is an expectation that SKBs will have some headroom. When forwarding a packet from the aquantia driver, this does not always happen, triggering a kernel warning. aq_ring.c has this code (edited slightly for brevity): if (buff->is_eop && buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { skb = build_skb(aq_buf_vaddr(&buff->rxdata), AQ_CFG_RX_FRAME_MAX); } else { skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE); There is a significant difference between the SKB produced by these 2 code paths. When napi_alloc_skb creates an SKB, there is a certain amount of headroom reserved. However, this is not done in the build_skb codepath. As the hardware buffer that build_skb is built around does not handle the presence of the SKB header, this code path is being removed and the napi_alloc_skb path will always be used. This code path does have to copy the packet header into the SKB, but it adds the packet data as a frag. Fixes: 018423e ("net: ethernet: aquantia: Add ring support code") Signed-off-by: Lincoln Ramsay <[email protected]> Link: https://lore.kernel.org/r/MWHPR1001MB23184F3EAFA413E0D1910EC9E8FC0@MWHPR1001MB2318.namprd10.prod.outlook.com Signed-off-by: Jakub Kicinski <[email protected]>
1 parent d549699 commit 9bd2702

File tree

1 file changed

+52
-74
lines changed
  • drivers/net/ethernet/aquantia/atlantic

1 file changed

+52
-74
lines changed

drivers/net/ethernet/aquantia/atlantic/aq_ring.c

Lines changed: 52 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -413,85 +413,63 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
413413
buff->rxdata.pg_off,
414414
buff->len, DMA_FROM_DEVICE);
415415

416-
/* for single fragment packets use build_skb() */
417-
if (buff->is_eop &&
418-
buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
419-
skb = build_skb(aq_buf_vaddr(&buff->rxdata),
416+
skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
417+
if (unlikely(!skb)) {
418+
u64_stats_update_begin(&self->stats.rx.syncp);
419+
self->stats.rx.skb_alloc_fails++;
420+
u64_stats_update_end(&self->stats.rx.syncp);
421+
err = -ENOMEM;
422+
goto err_exit;
423+
}
424+
if (is_ptp_ring)
425+
buff->len -=
426+
aq_ptp_extract_ts(self->aq_nic, skb,
427+
aq_buf_vaddr(&buff->rxdata),
428+
buff->len);
429+
430+
hdr_len = buff->len;
431+
if (hdr_len > AQ_CFG_RX_HDR_SIZE)
432+
hdr_len = eth_get_headlen(skb->dev,
433+
aq_buf_vaddr(&buff->rxdata),
434+
AQ_CFG_RX_HDR_SIZE);
435+
436+
memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
437+
ALIGN(hdr_len, sizeof(long)));
438+
439+
if (buff->len - hdr_len > 0) {
440+
skb_add_rx_frag(skb, 0, buff->rxdata.page,
441+
buff->rxdata.pg_off + hdr_len,
442+
buff->len - hdr_len,
420443
AQ_CFG_RX_FRAME_MAX);
421-
if (unlikely(!skb)) {
422-
u64_stats_update_begin(&self->stats.rx.syncp);
423-
self->stats.rx.skb_alloc_fails++;
424-
u64_stats_update_end(&self->stats.rx.syncp);
425-
err = -ENOMEM;
426-
goto err_exit;
427-
}
428-
if (is_ptp_ring)
429-
buff->len -=
430-
aq_ptp_extract_ts(self->aq_nic, skb,
431-
aq_buf_vaddr(&buff->rxdata),
432-
buff->len);
433-
skb_put(skb, buff->len);
434444
page_ref_inc(buff->rxdata.page);
435-
} else {
436-
skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
437-
if (unlikely(!skb)) {
438-
u64_stats_update_begin(&self->stats.rx.syncp);
439-
self->stats.rx.skb_alloc_fails++;
440-
u64_stats_update_end(&self->stats.rx.syncp);
441-
err = -ENOMEM;
442-
goto err_exit;
443-
}
444-
if (is_ptp_ring)
445-
buff->len -=
446-
aq_ptp_extract_ts(self->aq_nic, skb,
447-
aq_buf_vaddr(&buff->rxdata),
448-
buff->len);
449-
450-
hdr_len = buff->len;
451-
if (hdr_len > AQ_CFG_RX_HDR_SIZE)
452-
hdr_len = eth_get_headlen(skb->dev,
453-
aq_buf_vaddr(&buff->rxdata),
454-
AQ_CFG_RX_HDR_SIZE);
455-
456-
memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
457-
ALIGN(hdr_len, sizeof(long)));
458-
459-
if (buff->len - hdr_len > 0) {
460-
skb_add_rx_frag(skb, 0, buff->rxdata.page,
461-
buff->rxdata.pg_off + hdr_len,
462-
buff->len - hdr_len,
463-
AQ_CFG_RX_FRAME_MAX);
464-
page_ref_inc(buff->rxdata.page);
465-
}
445+
}
466446

467-
if (!buff->is_eop) {
468-
buff_ = buff;
469-
i = 1U;
470-
do {
471-
next_ = buff_->next,
472-
buff_ = &self->buff_ring[next_];
447+
if (!buff->is_eop) {
448+
buff_ = buff;
449+
i = 1U;
450+
do {
451+
next_ = buff_->next;
452+
buff_ = &self->buff_ring[next_];
473453

474-
dma_sync_single_range_for_cpu(
475-
aq_nic_get_dev(self->aq_nic),
476-
buff_->rxdata.daddr,
477-
buff_->rxdata.pg_off,
478-
buff_->len,
479-
DMA_FROM_DEVICE);
480-
skb_add_rx_frag(skb, i++,
481-
buff_->rxdata.page,
482-
buff_->rxdata.pg_off,
483-
buff_->len,
484-
AQ_CFG_RX_FRAME_MAX);
485-
page_ref_inc(buff_->rxdata.page);
486-
buff_->is_cleaned = 1;
487-
488-
buff->is_ip_cso &= buff_->is_ip_cso;
489-
buff->is_udp_cso &= buff_->is_udp_cso;
490-
buff->is_tcp_cso &= buff_->is_tcp_cso;
491-
buff->is_cso_err |= buff_->is_cso_err;
454+
dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
455+
buff_->rxdata.daddr,
456+
buff_->rxdata.pg_off,
457+
buff_->len,
458+
DMA_FROM_DEVICE);
459+
skb_add_rx_frag(skb, i++,
460+
buff_->rxdata.page,
461+
buff_->rxdata.pg_off,
462+
buff_->len,
463+
AQ_CFG_RX_FRAME_MAX);
464+
page_ref_inc(buff_->rxdata.page);
465+
buff_->is_cleaned = 1;
492466

493-
} while (!buff_->is_eop);
494-
}
467+
buff->is_ip_cso &= buff_->is_ip_cso;
468+
buff->is_udp_cso &= buff_->is_udp_cso;
469+
buff->is_tcp_cso &= buff_->is_tcp_cso;
470+
buff->is_cso_err |= buff_->is_cso_err;
471+
472+
} while (!buff_->is_eop);
495473
}
496474

497475
if (buff->is_vlan)

0 commit comments

Comments
 (0)