Skip to content

Commit 1344e75

Browse files
DavMiladavem330
authored andcommitted
gve: Add RX context.
This refactor moves the skb_head and skb_tail fields into a new gve_rx_ctx struct. This new struct will contain information about the current packet being processed. This is in preparation for multi-descriptor RX packets. Signed-off-by: David Awogbemila <[email protected]> Signed-off-by: Jeroen de Borst <[email protected]> Reviewed-by: Catherine Sullivan <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 233cdfb commit 1344e75

File tree

2 files changed

+44
-37
lines changed

2 files changed

+44
-37
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,15 @@ struct gve_index_list {
142142
s16 tail;
143143
};
144144

145+
/* A single received packet split across multiple buffers may be
146+
* reconstructed using the information in this structure.
147+
*/
148+
struct gve_rx_ctx {
149+
/* head and tail of skb chain for the current packet or NULL if none */
150+
struct sk_buff *skb_head;
151+
struct sk_buff *skb_tail;
152+
};
153+
145154
/* Contains datapath state used to represent an RX queue. */
146155
struct gve_rx_ring {
147156
struct gve_priv *gve;
@@ -206,9 +215,7 @@ struct gve_rx_ring {
206215
dma_addr_t q_resources_bus; /* dma address for the queue resources */
207216
struct u64_stats_sync statss; /* sync stats for 32bit archs */
208217

209-
/* head and tail of skb chain for the current packet or NULL if none */
210-
struct sk_buff *skb_head;
211-
struct sk_buff *skb_tail;
218+
struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
212219
};
213220

214221
/* A TX desc ring entry */

drivers/net/ethernet/google/gve/gve_rx_dqo.c

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -240,8 +240,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
240240
rx->dqo.bufq.mask = buffer_queue_slots - 1;
241241
rx->dqo.complq.num_free_slots = completion_queue_slots;
242242
rx->dqo.complq.mask = completion_queue_slots - 1;
243-
rx->skb_head = NULL;
244-
rx->skb_tail = NULL;
243+
rx->ctx.skb_head = NULL;
244+
rx->ctx.skb_tail = NULL;
245245

246246
rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
247247
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
@@ -467,12 +467,12 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
467467

468468
static void gve_rx_free_skb(struct gve_rx_ring *rx)
469469
{
470-
if (!rx->skb_head)
470+
if (!rx->ctx.skb_head)
471471
return;
472472

473-
dev_kfree_skb_any(rx->skb_head);
474-
rx->skb_head = NULL;
475-
rx->skb_tail = NULL;
473+
dev_kfree_skb_any(rx->ctx.skb_head);
474+
rx->ctx.skb_head = NULL;
475+
rx->ctx.skb_tail = NULL;
476476
}
477477

478478
/* Chains multi skbs for single rx packet.
@@ -483,7 +483,7 @@ static int gve_rx_append_frags(struct napi_struct *napi,
483483
u16 buf_len, struct gve_rx_ring *rx,
484484
struct gve_priv *priv)
485485
{
486-
int num_frags = skb_shinfo(rx->skb_tail)->nr_frags;
486+
int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags;
487487

488488
if (unlikely(num_frags == MAX_SKB_FRAGS)) {
489489
struct sk_buff *skb;
@@ -492,17 +492,17 @@ static int gve_rx_append_frags(struct napi_struct *napi,
492492
if (!skb)
493493
return -1;
494494

495-
skb_shinfo(rx->skb_tail)->frag_list = skb;
496-
rx->skb_tail = skb;
495+
skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
496+
rx->ctx.skb_tail = skb;
497497
num_frags = 0;
498498
}
499-
if (rx->skb_tail != rx->skb_head) {
500-
rx->skb_head->len += buf_len;
501-
rx->skb_head->data_len += buf_len;
502-
rx->skb_head->truesize += priv->data_buffer_size_dqo;
499+
if (rx->ctx.skb_tail != rx->ctx.skb_head) {
500+
rx->ctx.skb_head->len += buf_len;
501+
rx->ctx.skb_head->data_len += buf_len;
502+
rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo;
503503
}
504504

505-
skb_add_rx_frag(rx->skb_tail, num_frags,
505+
skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
506506
buf_state->page_info.page,
507507
buf_state->page_info.page_offset,
508508
buf_len, priv->data_buffer_size_dqo);
@@ -556,7 +556,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
556556
buf_len, DMA_FROM_DEVICE);
557557

558558
/* Append to current skb if one exists. */
559-
if (rx->skb_head) {
559+
if (rx->ctx.skb_head) {
560560
if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx,
561561
priv)) != 0) {
562562
goto error;
@@ -567,11 +567,11 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
567567
}
568568

569569
if (eop && buf_len <= priv->rx_copybreak) {
570-
rx->skb_head = gve_rx_copy(priv->dev, napi,
571-
&buf_state->page_info, buf_len, 0);
572-
if (unlikely(!rx->skb_head))
570+
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
571+
&buf_state->page_info, buf_len, 0);
572+
if (unlikely(!rx->ctx.skb_head))
573573
goto error;
574-
rx->skb_tail = rx->skb_head;
574+
rx->ctx.skb_tail = rx->ctx.skb_head;
575575

576576
u64_stats_update_begin(&rx->statss);
577577
rx->rx_copied_pkt++;
@@ -583,12 +583,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
583583
return 0;
584584
}
585585

586-
rx->skb_head = napi_get_frags(napi);
587-
if (unlikely(!rx->skb_head))
586+
rx->ctx.skb_head = napi_get_frags(napi);
587+
if (unlikely(!rx->ctx.skb_head))
588588
goto error;
589-
rx->skb_tail = rx->skb_head;
589+
rx->ctx.skb_tail = rx->ctx.skb_head;
590590

591-
skb_add_rx_frag(rx->skb_head, 0, buf_state->page_info.page,
591+
skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
592592
buf_state->page_info.page_offset, buf_len,
593593
priv->data_buffer_size_dqo);
594594
gve_dec_pagecnt_bias(&buf_state->page_info);
@@ -635,27 +635,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
635635
rx->gve->ptype_lut_dqo->ptypes[desc->packet_type];
636636
int err;
637637

638-
skb_record_rx_queue(rx->skb_head, rx->q_num);
638+
skb_record_rx_queue(rx->ctx.skb_head, rx->q_num);
639639

640640
if (feat & NETIF_F_RXHASH)
641-
gve_rx_skb_hash(rx->skb_head, desc, ptype);
641+
gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype);
642642

643643
if (feat & NETIF_F_RXCSUM)
644-
gve_rx_skb_csum(rx->skb_head, desc, ptype);
644+
gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
645645

646646
/* RSC packets must set gso_size otherwise the TCP stack will complain
647647
* that packets are larger than MTU.
648648
*/
649649
if (desc->rsc) {
650-
err = gve_rx_complete_rsc(rx->skb_head, desc, ptype);
650+
err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype);
651651
if (err < 0)
652652
return err;
653653
}
654654

655-
if (skb_headlen(rx->skb_head) == 0)
655+
if (skb_headlen(rx->ctx.skb_head) == 0)
656656
napi_gro_frags(napi);
657657
else
658-
napi_gro_receive(napi, rx->skb_head);
658+
napi_gro_receive(napi, rx->ctx.skb_head);
659659

660660
return 0;
661661
}
@@ -717,18 +717,18 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
717717
/* Free running counter of completed descriptors */
718718
rx->cnt++;
719719

720-
if (!rx->skb_head)
720+
if (!rx->ctx.skb_head)
721721
continue;
722722

723723
if (!compl_desc->end_of_packet)
724724
continue;
725725

726726
work_done++;
727-
pkt_bytes = rx->skb_head->len;
727+
pkt_bytes = rx->ctx.skb_head->len;
728728
/* The ethernet header (first ETH_HLEN bytes) is snipped off
729729
* by eth_type_trans.
730730
*/
731-
if (skb_headlen(rx->skb_head))
731+
if (skb_headlen(rx->ctx.skb_head))
732732
pkt_bytes += ETH_HLEN;
733733

734734
/* gve_rx_complete_skb() will consume skb if successful */
@@ -741,8 +741,8 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
741741
}
742742

743743
bytes += pkt_bytes;
744-
rx->skb_head = NULL;
745-
rx->skb_tail = NULL;
744+
rx->ctx.skb_head = NULL;
745+
rx->ctx.skb_tail = NULL;
746746
}
747747

748748
gve_rx_post_buffers_dqo(rx);

0 commit comments

Comments
 (0)