Skip to content

Commit 80f6215

Browse files
haiyangzdavem330
authored andcommitted
net: mana: Add support for jumbo frame
During probe, get the hardware-allowed max MTU by querying the device configuration. Users can select MTU up to the device limit. When XDP is in use, limit MTU settings so the buffer size is within one page. And, when MTU is set to a too large value, XDP is not allowed to run. Also, to prevent changing MTU fails, and leaves the NIC in a bad state, pre-allocate all buffers before starting the change. So in low memory condition, it will return error, without affecting the NIC. Signed-off-by: Haiyang Zhang <[email protected]> Reviewed-by: Jesse Brandeburg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 2fbbd71 commit 80f6215

File tree

4 files changed

+233
-24
lines changed

4 files changed

+233
-24
lines changed

drivers/net/ethernet/microsoft/mana/mana_bpf.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -133,12 +133,6 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
133133
return act;
134134
}
135135

136-
static unsigned int mana_xdp_fraglen(unsigned int len)
137-
{
138-
return SKB_DATA_ALIGN(len) +
139-
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
140-
}
141-
142136
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
143137
{
144138
ASSERT_RTNL();
@@ -179,17 +173,18 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
179173
{
180174
struct mana_port_context *apc = netdev_priv(ndev);
181175
struct bpf_prog *old_prog;
182-
int buf_max;
176+
struct gdma_context *gc;
177+
178+
gc = apc->ac->gdma_dev->gdma_context;
183179

184180
old_prog = mana_xdp_get(apc);
185181

186182
if (!old_prog && !prog)
187183
return 0;
188184

189-
buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
190-
if (prog && buf_max > PAGE_SIZE) {
191-
netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
192-
ndev->mtu, buf_max);
185+
if (prog && ndev->mtu > MANA_XDP_MTU_MAX) {
186+
netdev_err(ndev, "XDP: mtu:%u too large, mtu_max:%lu\n",
187+
ndev->mtu, MANA_XDP_MTU_MAX);
193188
NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
194189

195190
return -EOPNOTSUPP;
@@ -206,6 +201,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
206201
if (apc->port_is_up)
207202
mana_chn_setxdp(apc, prog);
208203

204+
if (prog)
205+
ndev->max_mtu = MANA_XDP_MTU_MAX;
206+
else
207+
ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
208+
209209
return 0;
210210
}
211211

drivers/net/ethernet/microsoft/mana/mana_en.c

Lines changed: 204 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -427,6 +427,192 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
427427
return txq;
428428
}
429429

430+
/* Release pre-allocated RX buffers */
431+
static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
432+
{
433+
struct device *dev;
434+
int i;
435+
436+
dev = mpc->ac->gdma_dev->gdma_context->dev;
437+
438+
if (!mpc->rxbufs_pre)
439+
goto out1;
440+
441+
if (!mpc->das_pre)
442+
goto out2;
443+
444+
while (mpc->rxbpre_total) {
445+
i = --mpc->rxbpre_total;
446+
dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
447+
DMA_FROM_DEVICE);
448+
put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
449+
}
450+
451+
kfree(mpc->das_pre);
452+
mpc->das_pre = NULL;
453+
454+
out2:
455+
kfree(mpc->rxbufs_pre);
456+
mpc->rxbufs_pre = NULL;
457+
458+
out1:
459+
mpc->rxbpre_datasize = 0;
460+
mpc->rxbpre_alloc_size = 0;
461+
mpc->rxbpre_headroom = 0;
462+
}
463+
464+
/* Get a buffer from the pre-allocated RX buffers */
465+
static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
466+
{
467+
struct net_device *ndev = rxq->ndev;
468+
struct mana_port_context *mpc;
469+
void *va;
470+
471+
mpc = netdev_priv(ndev);
472+
473+
if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
474+
netdev_err(ndev, "No RX pre-allocated bufs\n");
475+
return NULL;
476+
}
477+
478+
/* Check sizes to catch unexpected coding error */
479+
if (mpc->rxbpre_datasize != rxq->datasize) {
480+
netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
481+
mpc->rxbpre_datasize, rxq->datasize);
482+
return NULL;
483+
}
484+
485+
if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
486+
netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
487+
mpc->rxbpre_alloc_size, rxq->alloc_size);
488+
return NULL;
489+
}
490+
491+
if (mpc->rxbpre_headroom != rxq->headroom) {
492+
netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
493+
mpc->rxbpre_headroom, rxq->headroom);
494+
return NULL;
495+
}
496+
497+
mpc->rxbpre_total--;
498+
499+
*da = mpc->das_pre[mpc->rxbpre_total];
500+
va = mpc->rxbufs_pre[mpc->rxbpre_total];
501+
mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
502+
503+
/* Deallocate the array after all buffers are gone */
504+
if (!mpc->rxbpre_total)
505+
mana_pre_dealloc_rxbufs(mpc);
506+
507+
return va;
508+
}
509+
510+
/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
511+
static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
512+
u32 *headroom)
513+
{
514+
if (mtu > MANA_XDP_MTU_MAX)
515+
*headroom = 0; /* no support for XDP */
516+
else
517+
*headroom = XDP_PACKET_HEADROOM;
518+
519+
*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
520+
521+
*datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
522+
}
523+
524+
static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
525+
{
526+
struct device *dev;
527+
struct page *page;
528+
dma_addr_t da;
529+
int num_rxb;
530+
void *va;
531+
int i;
532+
533+
mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
534+
&mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
535+
536+
dev = mpc->ac->gdma_dev->gdma_context->dev;
537+
538+
num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
539+
540+
WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
541+
mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
542+
if (!mpc->rxbufs_pre)
543+
goto error;
544+
545+
mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL);
546+
if (!mpc->das_pre)
547+
goto error;
548+
549+
mpc->rxbpre_total = 0;
550+
551+
for (i = 0; i < num_rxb; i++) {
552+
if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
553+
va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
554+
if (!va)
555+
goto error;
556+
} else {
557+
page = dev_alloc_page();
558+
if (!page)
559+
goto error;
560+
561+
va = page_to_virt(page);
562+
}
563+
564+
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
565+
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
566+
567+
if (dma_mapping_error(dev, da)) {
568+
put_page(virt_to_head_page(va));
569+
goto error;
570+
}
571+
572+
mpc->rxbufs_pre[i] = va;
573+
mpc->das_pre[i] = da;
574+
mpc->rxbpre_total = i + 1;
575+
}
576+
577+
return 0;
578+
579+
error:
580+
mana_pre_dealloc_rxbufs(mpc);
581+
return -ENOMEM;
582+
}
583+
584+
static int mana_change_mtu(struct net_device *ndev, int new_mtu)
585+
{
586+
struct mana_port_context *mpc = netdev_priv(ndev);
587+
unsigned int old_mtu = ndev->mtu;
588+
int err;
589+
590+
/* Pre-allocate buffers to prevent failure in mana_attach later */
591+
err = mana_pre_alloc_rxbufs(mpc, new_mtu);
592+
if (err) {
593+
netdev_err(ndev, "Insufficient memory for new MTU\n");
594+
return err;
595+
}
596+
597+
err = mana_detach(ndev, false);
598+
if (err) {
599+
netdev_err(ndev, "mana_detach failed: %d\n", err);
600+
goto out;
601+
}
602+
603+
ndev->mtu = new_mtu;
604+
605+
err = mana_attach(ndev);
606+
if (err) {
607+
netdev_err(ndev, "mana_attach failed: %d\n", err);
608+
ndev->mtu = old_mtu;
609+
}
610+
611+
out:
612+
mana_pre_dealloc_rxbufs(mpc);
613+
return err;
614+
}
615+
430616
static const struct net_device_ops mana_devops = {
431617
.ndo_open = mana_open,
432618
.ndo_stop = mana_close,
@@ -436,6 +622,7 @@ static const struct net_device_ops mana_devops = {
436622
.ndo_get_stats64 = mana_get_stats64,
437623
.ndo_bpf = mana_bpf,
438624
.ndo_xdp_xmit = mana_xdp_xmit,
625+
.ndo_change_mtu = mana_change_mtu,
439626
};
440627

441628
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -625,6 +812,9 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
625812

626813
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
627814
sizeof(req), sizeof(resp));
815+
816+
req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
817+
628818
req.proto_major_ver = proto_major_ver;
629819
req.proto_minor_ver = proto_minor_ver;
630820
req.proto_micro_ver = proto_micro_ver;
@@ -647,6 +837,11 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
647837

648838
*max_num_vports = resp.max_num_vports;
649839

840+
if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
841+
gc->adapter_mtu = resp.adapter_mtu;
842+
else
843+
gc->adapter_mtu = ETH_FRAME_LEN;
844+
650845
return 0;
651846
}
652847

@@ -1712,10 +1907,14 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
17121907
static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
17131908
struct mana_rxq *rxq, struct device *dev)
17141909
{
1910+
struct mana_port_context *mpc = netdev_priv(rxq->ndev);
17151911
dma_addr_t da;
17161912
void *va;
17171913

1718-
va = mana_get_rxfrag(rxq, dev, &da, false);
1914+
if (mpc->rxbufs_pre)
1915+
va = mana_get_rxbuf_pre(rxq, &da);
1916+
else
1917+
va = mana_get_rxfrag(rxq, dev, &da, false);
17191918

17201919
if (!va)
17211920
return -ENOMEM;
@@ -1797,7 +1996,6 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
17971996
struct gdma_dev *gd = apc->ac->gdma_dev;
17981997
struct mana_obj_spec wq_spec;
17991998
struct mana_obj_spec cq_spec;
1800-
unsigned int mtu = ndev->mtu;
18011999
struct gdma_queue_spec spec;
18022000
struct mana_cq *cq = NULL;
18032001
struct gdma_context *gc;
@@ -1817,15 +2015,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
18172015
rxq->rxq_idx = rxq_idx;
18182016
rxq->rxobj = INVALID_MANA_HANDLE;
18192017

1820-
rxq->datasize = ALIGN(mtu + ETH_HLEN, 64);
1821-
1822-
if (mtu > MANA_XDP_MTU_MAX) {
1823-
rxq->alloc_size = mtu + MANA_RXBUF_PAD;
1824-
rxq->headroom = 0;
1825-
} else {
1826-
rxq->alloc_size = mtu + MANA_RXBUF_PAD + XDP_PACKET_HEADROOM;
1827-
rxq->headroom = XDP_PACKET_HEADROOM;
1828-
}
2018+
mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2019+
&rxq->headroom);
18292020

18302021
err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
18312022
if (err)
@@ -2238,8 +2429,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
22382429
ndev->netdev_ops = &mana_devops;
22392430
ndev->ethtool_ops = &mana_ethtool_ops;
22402431
ndev->mtu = ETH_DATA_LEN;
2241-
ndev->max_mtu = ndev->mtu;
2242-
ndev->min_mtu = ndev->mtu;
2432+
ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
2433+
ndev->min_mtu = ETH_MIN_MTU;
22432434
ndev->needed_headroom = MANA_HEADROOM;
22442435
ndev->dev_port = port_idx;
22452436
SET_NETDEV_DEV(ndev, gc->dev);

include/net/mana/gdma.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,7 @@ struct gdma_general_req {
145145
}; /* HW DATA */
146146

147147
#define GDMA_MESSAGE_V1 1
148+
#define GDMA_MESSAGE_V2 2
148149

149150
struct gdma_general_resp {
150151
struct gdma_resp_hdr hdr;
@@ -354,6 +355,9 @@ struct gdma_context {
354355
struct gdma_resource msix_resource;
355356
struct gdma_irq_context *irq_contexts;
356357

358+
/* L2 MTU */
359+
u16 adapter_mtu;
360+
357361
/* This maps a CQ index to the queue structure. */
358362
unsigned int max_num_cqs;
359363
struct gdma_queue **cq_table;

include/net/mana/mana.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ enum TRI_STATE {
3737
#define COMP_ENTRY_SIZE 64
3838

3939
#define RX_BUFFERS_PER_QUEUE 512
40+
#define MANA_RX_DATA_ALIGN 64
4041

4142
#define MAX_SEND_BUFFERS_PER_QUEUE 256
4243

@@ -390,6 +391,14 @@ struct mana_port_context {
390391
/* This points to an array of num_queues of RQ pointers. */
391392
struct mana_rxq **rxqs;
392393

394+
/* pre-allocated rx buffer array */
395+
void **rxbufs_pre;
396+
dma_addr_t *das_pre;
397+
int rxbpre_total;
398+
u32 rxbpre_datasize;
399+
u32 rxbpre_alloc_size;
400+
u32 rxbpre_headroom;
401+
393402
struct bpf_prog *bpf_prog;
394403

395404
/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
@@ -489,6 +498,11 @@ struct mana_query_device_cfg_resp {
489498
u16 max_num_vports;
490499
u16 reserved;
491500
u32 max_num_eqs;
501+
502+
/* response v2: */
503+
u16 adapter_mtu;
504+
u16 reserved2;
505+
u32 reserved3;
492506
}; /* HW DATA */
493507

494508
/* Query vPort Configuration */

0 commit comments

Comments
 (0)