@@ -45,6 +45,11 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac,
4545 struct prueth_rx_chn * rx_chn ,
4646 int max_rflows )
4747{
48+ if (rx_chn -> pg_pool ) {
49+ page_pool_destroy (rx_chn -> pg_pool );
50+ rx_chn -> pg_pool = NULL ;
51+ }
52+
4853 if (rx_chn -> desc_pool )
4954 k3_cppi_desc_pool_destroy (rx_chn -> desc_pool );
5055
@@ -461,43 +466,36 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
461466}
462467EXPORT_SYMBOL_GPL (prueth_init_rx_chns );
463468
464- int prueth_dma_rx_push (struct prueth_emac * emac ,
465- struct sk_buff * skb ,
466- struct prueth_rx_chn * rx_chn )
469+ int prueth_dma_rx_push_mapped (struct prueth_emac * emac ,
470+ struct prueth_rx_chn * rx_chn ,
471+ struct page * page , u32 buf_len )
467472{
468473 struct net_device * ndev = emac -> ndev ;
469474 struct cppi5_host_desc_t * desc_rx ;
470- u32 pkt_len = skb_tailroom (skb );
471475 dma_addr_t desc_dma ;
472476 dma_addr_t buf_dma ;
473477 void * * swdata ;
474478
479+ buf_dma = page_pool_get_dma_addr (page ) + PRUETH_HEADROOM ;
475480 desc_rx = k3_cppi_desc_pool_alloc (rx_chn -> desc_pool );
476481 if (!desc_rx ) {
477482 netdev_err (ndev , "rx push: failed to allocate descriptor\n" );
478483 return - ENOMEM ;
479484 }
480485 desc_dma = k3_cppi_desc_pool_virt2dma (rx_chn -> desc_pool , desc_rx );
481486
482- buf_dma = dma_map_single (rx_chn -> dma_dev , skb -> data , pkt_len , DMA_FROM_DEVICE );
483- if (unlikely (dma_mapping_error (rx_chn -> dma_dev , buf_dma ))) {
484- k3_cppi_desc_pool_free (rx_chn -> desc_pool , desc_rx );
485- netdev_err (ndev , "rx push: failed to map rx pkt buffer\n" );
486- return - EINVAL ;
487- }
488-
489487 cppi5_hdesc_init (desc_rx , CPPI5_INFO0_HDESC_EPIB_PRESENT ,
490488 PRUETH_NAV_PS_DATA_SIZE );
491489 k3_udma_glue_rx_dma_to_cppi5_addr (rx_chn -> rx_chn , & buf_dma );
492- cppi5_hdesc_attach_buf (desc_rx , buf_dma , skb_tailroom ( skb ) , buf_dma , skb_tailroom ( skb ) );
490+ cppi5_hdesc_attach_buf (desc_rx , buf_dma , buf_len , buf_dma , buf_len );
493491
494492 swdata = cppi5_hdesc_get_swdata (desc_rx );
495- * swdata = skb ;
493+ * swdata = page ;
496494
497- return k3_udma_glue_push_rx_chn (rx_chn -> rx_chn , 0 ,
495+ return k3_udma_glue_push_rx_chn (rx_chn -> rx_chn , PRUETH_RX_FLOW_DATA ,
498496 desc_rx , desc_dma );
499497}
500- EXPORT_SYMBOL_GPL (prueth_dma_rx_push );
498+ EXPORT_SYMBOL_GPL (prueth_dma_rx_push_mapped );
501499
502500u64 icssg_ts_to_ns (u32 hi_sw , u32 hi , u32 lo , u32 cycle_time_ns )
503501{
@@ -541,12 +539,16 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
541539 u32 buf_dma_len , pkt_len , port_id = 0 ;
542540 struct net_device * ndev = emac -> ndev ;
543541 struct cppi5_host_desc_t * desc_rx ;
544- struct sk_buff * skb , * new_skb ;
545542 dma_addr_t desc_dma , buf_dma ;
543+ struct page * page , * new_page ;
544+ struct page_pool * pool ;
545+ struct sk_buff * skb ;
546546 void * * swdata ;
547547 u32 * psdata ;
548+ void * pa ;
548549 int ret ;
549550
551+ pool = rx_chn -> pg_pool ;
550552 ret = k3_udma_glue_pop_rx_chn (rx_chn -> rx_chn , flow_id , & desc_dma );
551553 if (ret ) {
552554 if (ret != - ENODATA )
@@ -558,48 +560,61 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
558560 return 0 ;
559561
560562 desc_rx = k3_cppi_desc_pool_dma2virt (rx_chn -> desc_pool , desc_dma );
561-
562563 swdata = cppi5_hdesc_get_swdata (desc_rx );
563- skb = * swdata ;
564-
565- psdata = cppi5_hdesc_get_psdata (desc_rx );
566- /* RX HW timestamp */
567- if (emac -> rx_ts_enabled )
568- emac_rx_timestamp (emac , skb , psdata );
569-
564+ page = * swdata ;
565+ page_pool_dma_sync_for_cpu (pool , page , 0 , PAGE_SIZE );
570566 cppi5_hdesc_get_obuf (desc_rx , & buf_dma , & buf_dma_len );
571567 k3_udma_glue_rx_cppi5_to_dma_addr (rx_chn -> rx_chn , & buf_dma );
572568 pkt_len = cppi5_hdesc_get_pktlen (desc_rx );
573569 /* firmware adds 4 CRC bytes, strip them */
574570 pkt_len -= 4 ;
575571 cppi5_desc_get_tags_ids (& desc_rx -> hdr , & port_id , NULL );
576572
577- dma_unmap_single (rx_chn -> dma_dev , buf_dma , buf_dma_len , DMA_FROM_DEVICE );
578573 k3_cppi_desc_pool_free (rx_chn -> desc_pool , desc_rx );
579574
580- skb -> dev = ndev ;
581- new_skb = netdev_alloc_skb_ip_align (ndev , PRUETH_MAX_PKT_SIZE );
582575 /* if allocation fails we drop the packet but push the
583- * descriptor back to the ring with old skb to prevent a stall
576+ * descriptor back to the ring with old page to prevent a stall
584577 */
585- if (!new_skb ) {
578+ new_page = page_pool_dev_alloc_pages (pool );
579+ if (unlikely (!new_page )) {
580+ new_page = page ;
586581 ndev -> stats .rx_dropped ++ ;
587- new_skb = skb ;
588- } else {
589- /* send the filled skb up the n/w stack */
590- skb_put ( skb , pkt_len );
591- if ( emac -> prueth -> is_switch_mode )
592- skb -> offload_fwd_mark = emac -> offload_fwd_mark ;
593- skb -> protocol = eth_type_trans ( skb , ndev );
594- napi_gro_receive ( & emac -> napi_rx , skb ) ;
595- ndev -> stats . rx_bytes += pkt_len ;
596- ndev -> stats . rx_packets ++ ;
582+ goto requeue ;
583+ }
584+
585+ /* prepare skb and send to n/w stack */
586+ pa = page_address ( page );
587+ skb = napi_build_skb ( pa , PAGE_SIZE ) ;
588+ if (! skb ) {
589+ ndev -> stats . rx_dropped ++ ;
590+ page_pool_recycle_direct ( pool , page ) ;
591+ goto requeue ;
597592 }
598593
594+ skb_reserve (skb , PRUETH_HEADROOM );
595+ skb_put (skb , pkt_len );
596+ skb -> dev = ndev ;
597+
598+ psdata = cppi5_hdesc_get_psdata (desc_rx );
599+ /* RX HW timestamp */
600+ if (emac -> rx_ts_enabled )
601+ emac_rx_timestamp (emac , skb , psdata );
602+
603+ if (emac -> prueth -> is_switch_mode )
604+ skb -> offload_fwd_mark = emac -> offload_fwd_mark ;
605+ skb -> protocol = eth_type_trans (skb , ndev );
606+
607+ skb_mark_for_recycle (skb );
608+ napi_gro_receive (& emac -> napi_rx , skb );
609+ ndev -> stats .rx_bytes += pkt_len ;
610+ ndev -> stats .rx_packets ++ ;
611+
612+ requeue :
599613 /* queue another RX DMA */
600- ret = prueth_dma_rx_push (emac , new_skb , & emac -> rx_chns );
614+ ret = prueth_dma_rx_push_mapped (emac , & emac -> rx_chns , new_page ,
615+ PRUETH_MAX_PKT_SIZE );
601616 if (WARN_ON (ret < 0 )) {
602- dev_kfree_skb_any ( new_skb );
617+ page_pool_recycle_direct ( pool , new_page );
603618 ndev -> stats .rx_errors ++ ;
604619 ndev -> stats .rx_dropped ++ ;
605620 }
@@ -611,22 +626,16 @@ static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
611626{
612627 struct prueth_rx_chn * rx_chn = data ;
613628 struct cppi5_host_desc_t * desc_rx ;
614- struct sk_buff * skb ;
615- dma_addr_t buf_dma ;
616- u32 buf_dma_len ;
629+ struct page_pool * pool ;
630+ struct page * page ;
617631 void * * swdata ;
618632
633+ pool = rx_chn -> pg_pool ;
619634 desc_rx = k3_cppi_desc_pool_dma2virt (rx_chn -> desc_pool , desc_dma );
620635 swdata = cppi5_hdesc_get_swdata (desc_rx );
621- skb = * swdata ;
622- cppi5_hdesc_get_obuf (desc_rx , & buf_dma , & buf_dma_len );
623- k3_udma_glue_rx_cppi5_to_dma_addr (rx_chn -> rx_chn , & buf_dma );
624-
625- dma_unmap_single (rx_chn -> dma_dev , buf_dma , buf_dma_len ,
626- DMA_FROM_DEVICE );
636+ page = * swdata ;
637+ page_pool_recycle_direct (pool , page );
627638 k3_cppi_desc_pool_free (rx_chn -> desc_pool , desc_rx );
628-
629- dev_kfree_skb_any (skb );
630639}
631640
632641static int prueth_tx_ts_cookie_get (struct prueth_emac * emac )
@@ -907,29 +916,71 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
907916}
908917EXPORT_SYMBOL_GPL (icssg_napi_rx_poll );
909918
919+ static struct page_pool * prueth_create_page_pool (struct prueth_emac * emac ,
920+ struct device * dma_dev ,
921+ int size )
922+ {
923+ struct page_pool_params pp_params = { 0 };
924+ struct page_pool * pool ;
925+
926+ pp_params .order = 0 ;
927+ pp_params .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ;
928+ pp_params .pool_size = size ;
929+ pp_params .nid = dev_to_node (emac -> prueth -> dev );
930+ pp_params .dma_dir = DMA_BIDIRECTIONAL ;
931+ pp_params .dev = dma_dev ;
932+ pp_params .napi = & emac -> napi_rx ;
933+ pp_params .max_len = PAGE_SIZE ;
934+
935+ pool = page_pool_create (& pp_params );
936+ if (IS_ERR (pool ))
937+ netdev_err (emac -> ndev , "cannot create rx page pool\n" );
938+
939+ return pool ;
940+ }
941+
910942int prueth_prepare_rx_chan (struct prueth_emac * emac ,
911943 struct prueth_rx_chn * chn ,
912944 int buf_size )
913945{
914- struct sk_buff * skb ;
946+ struct page_pool * pool ;
947+ struct page * page ;
915948 int i , ret ;
916949
950+ pool = prueth_create_page_pool (emac , chn -> dma_dev , chn -> descs_num );
951+ if (IS_ERR (pool ))
952+ return PTR_ERR (pool );
953+
954+ chn -> pg_pool = pool ;
955+
917956 for (i = 0 ; i < chn -> descs_num ; i ++ ) {
918- skb = __netdev_alloc_skb_ip_align (NULL , buf_size , GFP_KERNEL );
919- if (!skb )
920- return - ENOMEM ;
957+ /* NOTE: we're not using memory efficiently here.
958+ * 1 full page (4KB?) used here instead of
959+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
960+ */
961+ page = page_pool_dev_alloc_pages (pool );
962+ if (!page ) {
963+ netdev_err (emac -> ndev , "couldn't allocate rx page\n" );
964+ ret = - ENOMEM ;
965+ goto recycle_alloc_pg ;
966+ }
921967
922- ret = prueth_dma_rx_push (emac , skb , chn );
968+ ret = prueth_dma_rx_push_mapped (emac , chn , page , buf_size );
923969 if (ret < 0 ) {
924970 netdev_err (emac -> ndev ,
925- "cannot submit skb for rx chan %s ret %d\n" ,
971+ "cannot submit page for rx chan %s ret %d\n" ,
926972 chn -> name , ret );
927- kfree_skb ( skb );
928- return ret ;
973+ page_pool_recycle_direct ( pool , page );
974+ goto recycle_alloc_pg ;
929975 }
930976 }
931977
932978 return 0 ;
979+
980+ recycle_alloc_pg :
981+ prueth_reset_rx_chan (& emac -> rx_chns , PRUETH_MAX_RX_FLOWS , false);
982+
983+ return ret ;
933984}
934985EXPORT_SYMBOL_GPL (prueth_prepare_rx_chan );
935986
@@ -958,6 +1009,9 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
9581009 prueth_rx_cleanup , !!i );
9591010 if (disable )
9601011 k3_udma_glue_disable_rx_chn (chn -> rx_chn );
1012+
1013+ page_pool_destroy (chn -> pg_pool );
1014+ chn -> pg_pool = NULL ;
9611015}
9621016EXPORT_SYMBOL_GPL (prueth_reset_rx_chan );
9631017
0 commit comments