@@ -427,6 +427,192 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
427427 return txq ;
428428}
429429
430+ /* Release pre-allocated RX buffers */
431+ static void mana_pre_dealloc_rxbufs (struct mana_port_context * mpc )
432+ {
433+ struct device * dev ;
434+ int i ;
435+
436+ dev = mpc -> ac -> gdma_dev -> gdma_context -> dev ;
437+
438+ if (!mpc -> rxbufs_pre )
439+ goto out1 ;
440+
441+ if (!mpc -> das_pre )
442+ goto out2 ;
443+
444+ while (mpc -> rxbpre_total ) {
445+ i = -- mpc -> rxbpre_total ;
446+ dma_unmap_single (dev , mpc -> das_pre [i ], mpc -> rxbpre_datasize ,
447+ DMA_FROM_DEVICE );
448+ put_page (virt_to_head_page (mpc -> rxbufs_pre [i ]));
449+ }
450+
451+ kfree (mpc -> das_pre );
452+ mpc -> das_pre = NULL ;
453+
454+ out2 :
455+ kfree (mpc -> rxbufs_pre );
456+ mpc -> rxbufs_pre = NULL ;
457+
458+ out1 :
459+ mpc -> rxbpre_datasize = 0 ;
460+ mpc -> rxbpre_alloc_size = 0 ;
461+ mpc -> rxbpre_headroom = 0 ;
462+ }
463+
464+ /* Get a buffer from the pre-allocated RX buffers */
465+ static void * mana_get_rxbuf_pre (struct mana_rxq * rxq , dma_addr_t * da )
466+ {
467+ struct net_device * ndev = rxq -> ndev ;
468+ struct mana_port_context * mpc ;
469+ void * va ;
470+
471+ mpc = netdev_priv (ndev );
472+
473+ if (!mpc -> rxbufs_pre || !mpc -> das_pre || !mpc -> rxbpre_total ) {
474+ netdev_err (ndev , "No RX pre-allocated bufs\n" );
475+ return NULL ;
476+ }
477+
478+ /* Check sizes to catch unexpected coding error */
479+ if (mpc -> rxbpre_datasize != rxq -> datasize ) {
480+ netdev_err (ndev , "rxbpre_datasize mismatch: %u: %u\n" ,
481+ mpc -> rxbpre_datasize , rxq -> datasize );
482+ return NULL ;
483+ }
484+
485+ if (mpc -> rxbpre_alloc_size != rxq -> alloc_size ) {
486+ netdev_err (ndev , "rxbpre_alloc_size mismatch: %u: %u\n" ,
487+ mpc -> rxbpre_alloc_size , rxq -> alloc_size );
488+ return NULL ;
489+ }
490+
491+ if (mpc -> rxbpre_headroom != rxq -> headroom ) {
492+ netdev_err (ndev , "rxbpre_headroom mismatch: %u: %u\n" ,
493+ mpc -> rxbpre_headroom , rxq -> headroom );
494+ return NULL ;
495+ }
496+
497+ mpc -> rxbpre_total -- ;
498+
499+ * da = mpc -> das_pre [mpc -> rxbpre_total ];
500+ va = mpc -> rxbufs_pre [mpc -> rxbpre_total ];
501+ mpc -> rxbufs_pre [mpc -> rxbpre_total ] = NULL ;
502+
503+ /* Deallocate the array after all buffers are gone */
504+ if (!mpc -> rxbpre_total )
505+ mana_pre_dealloc_rxbufs (mpc );
506+
507+ return va ;
508+ }
509+
510+ /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
511+ static void mana_get_rxbuf_cfg (int mtu , u32 * datasize , u32 * alloc_size ,
512+ u32 * headroom )
513+ {
514+ if (mtu > MANA_XDP_MTU_MAX )
515+ * headroom = 0 ; /* no support for XDP */
516+ else
517+ * headroom = XDP_PACKET_HEADROOM ;
518+
519+ * alloc_size = mtu + MANA_RXBUF_PAD + * headroom ;
520+
521+ * datasize = ALIGN (mtu + ETH_HLEN , MANA_RX_DATA_ALIGN );
522+ }
523+
524+ static int mana_pre_alloc_rxbufs (struct mana_port_context * mpc , int new_mtu )
525+ {
526+ struct device * dev ;
527+ struct page * page ;
528+ dma_addr_t da ;
529+ int num_rxb ;
530+ void * va ;
531+ int i ;
532+
533+ mana_get_rxbuf_cfg (new_mtu , & mpc -> rxbpre_datasize ,
534+ & mpc -> rxbpre_alloc_size , & mpc -> rxbpre_headroom );
535+
536+ dev = mpc -> ac -> gdma_dev -> gdma_context -> dev ;
537+
538+ num_rxb = mpc -> num_queues * RX_BUFFERS_PER_QUEUE ;
539+
540+ WARN (mpc -> rxbufs_pre , "mana rxbufs_pre exists\n" );
541+ mpc -> rxbufs_pre = kmalloc_array (num_rxb , sizeof (void * ), GFP_KERNEL );
542+ if (!mpc -> rxbufs_pre )
543+ goto error ;
544+
545+ mpc -> das_pre = kmalloc_array (num_rxb , sizeof (dma_addr_t ), GFP_KERNEL );
546+ if (!mpc -> das_pre )
547+ goto error ;
548+
549+ mpc -> rxbpre_total = 0 ;
550+
551+ for (i = 0 ; i < num_rxb ; i ++ ) {
552+ if (mpc -> rxbpre_alloc_size > PAGE_SIZE ) {
553+ va = netdev_alloc_frag (mpc -> rxbpre_alloc_size );
554+ if (!va )
555+ goto error ;
556+ } else {
557+ page = dev_alloc_page ();
558+ if (!page )
559+ goto error ;
560+
561+ va = page_to_virt (page );
562+ }
563+
564+ da = dma_map_single (dev , va + mpc -> rxbpre_headroom ,
565+ mpc -> rxbpre_datasize , DMA_FROM_DEVICE );
566+
567+ if (dma_mapping_error (dev , da )) {
568+ put_page (virt_to_head_page (va ));
569+ goto error ;
570+ }
571+
572+ mpc -> rxbufs_pre [i ] = va ;
573+ mpc -> das_pre [i ] = da ;
574+ mpc -> rxbpre_total = i + 1 ;
575+ }
576+
577+ return 0 ;
578+
579+ error :
580+ mana_pre_dealloc_rxbufs (mpc );
581+ return - ENOMEM ;
582+ }
583+
584+ static int mana_change_mtu (struct net_device * ndev , int new_mtu )
585+ {
586+ struct mana_port_context * mpc = netdev_priv (ndev );
587+ unsigned int old_mtu = ndev -> mtu ;
588+ int err ;
589+
590+ /* Pre-allocate buffers to prevent failure in mana_attach later */
591+ err = mana_pre_alloc_rxbufs (mpc , new_mtu );
592+ if (err ) {
593+ netdev_err (ndev , "Insufficient memory for new MTU\n" );
594+ return err ;
595+ }
596+
597+ err = mana_detach (ndev , false);
598+ if (err ) {
599+ netdev_err (ndev , "mana_detach failed: %d\n" , err );
600+ goto out ;
601+ }
602+
603+ ndev -> mtu = new_mtu ;
604+
605+ err = mana_attach (ndev );
606+ if (err ) {
607+ netdev_err (ndev , "mana_attach failed: %d\n" , err );
608+ ndev -> mtu = old_mtu ;
609+ }
610+
611+ out :
612+ mana_pre_dealloc_rxbufs (mpc );
613+ return err ;
614+ }
615+
430616static const struct net_device_ops mana_devops = {
431617 .ndo_open = mana_open ,
432618 .ndo_stop = mana_close ,
@@ -436,6 +622,7 @@ static const struct net_device_ops mana_devops = {
436622 .ndo_get_stats64 = mana_get_stats64 ,
437623 .ndo_bpf = mana_bpf ,
438624 .ndo_xdp_xmit = mana_xdp_xmit ,
625+ .ndo_change_mtu = mana_change_mtu ,
439626};
440627
441628static void mana_cleanup_port_context (struct mana_port_context * apc )
@@ -625,6 +812,9 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
625812
626813 mana_gd_init_req_hdr (& req .hdr , MANA_QUERY_DEV_CONFIG ,
627814 sizeof (req ), sizeof (resp ));
815+
816+ req .hdr .resp .msg_version = GDMA_MESSAGE_V2 ;
817+
628818 req .proto_major_ver = proto_major_ver ;
629819 req .proto_minor_ver = proto_minor_ver ;
630820 req .proto_micro_ver = proto_micro_ver ;
@@ -647,6 +837,11 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
647837
648838 * max_num_vports = resp .max_num_vports ;
649839
840+ if (resp .hdr .response .msg_version == GDMA_MESSAGE_V2 )
841+ gc -> adapter_mtu = resp .adapter_mtu ;
842+ else
843+ gc -> adapter_mtu = ETH_FRAME_LEN ;
844+
650845 return 0 ;
651846}
652847
@@ -1712,10 +1907,14 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
17121907static int mana_fill_rx_oob (struct mana_recv_buf_oob * rx_oob , u32 mem_key ,
17131908 struct mana_rxq * rxq , struct device * dev )
17141909{
1910+ struct mana_port_context * mpc = netdev_priv (rxq -> ndev );
17151911 dma_addr_t da ;
17161912 void * va ;
17171913
1718- va = mana_get_rxfrag (rxq , dev , & da , false);
1914+ if (mpc -> rxbufs_pre )
1915+ va = mana_get_rxbuf_pre (rxq , & da );
1916+ else
1917+ va = mana_get_rxfrag (rxq , dev , & da , false);
17191918
17201919 if (!va )
17211920 return - ENOMEM ;
@@ -1797,7 +1996,6 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
17971996 struct gdma_dev * gd = apc -> ac -> gdma_dev ;
17981997 struct mana_obj_spec wq_spec ;
17991998 struct mana_obj_spec cq_spec ;
1800- unsigned int mtu = ndev -> mtu ;
18011999 struct gdma_queue_spec spec ;
18022000 struct mana_cq * cq = NULL ;
18032001 struct gdma_context * gc ;
@@ -1817,15 +2015,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
18172015 rxq -> rxq_idx = rxq_idx ;
18182016 rxq -> rxobj = INVALID_MANA_HANDLE ;
18192017
1820- rxq -> datasize = ALIGN (mtu + ETH_HLEN , 64 );
1821-
1822- if (mtu > MANA_XDP_MTU_MAX ) {
1823- rxq -> alloc_size = mtu + MANA_RXBUF_PAD ;
1824- rxq -> headroom = 0 ;
1825- } else {
1826- rxq -> alloc_size = mtu + MANA_RXBUF_PAD + XDP_PACKET_HEADROOM ;
1827- rxq -> headroom = XDP_PACKET_HEADROOM ;
1828- }
2018+ mana_get_rxbuf_cfg (ndev -> mtu , & rxq -> datasize , & rxq -> alloc_size ,
2019+ & rxq -> headroom );
18292020
18302021 err = mana_alloc_rx_wqe (apc , rxq , & rq_size , & cq_size );
18312022 if (err )
@@ -2238,8 +2429,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
22382429 ndev -> netdev_ops = & mana_devops ;
22392430 ndev -> ethtool_ops = & mana_ethtool_ops ;
22402431 ndev -> mtu = ETH_DATA_LEN ;
2241- ndev -> max_mtu = ndev -> mtu ;
2242- ndev -> min_mtu = ndev -> mtu ;
2432+ ndev -> max_mtu = gc -> adapter_mtu - ETH_HLEN ;
2433+ ndev -> min_mtu = ETH_MIN_MTU ;
22432434 ndev -> needed_headroom = MANA_HEADROOM ;
22442435 ndev -> dev_port = port_idx ;
22452436 SET_NETDEV_DEV (ndev , gc -> dev );
0 commit comments