3838#include <net/ipv6.h>
3939#include <net/tso.h>
4040#include <net/page_pool.h>
41+ #include <linux/bpf_trace.h>
4142
4243/* Registers */
4344#define MVNETA_RXQ_CONFIG_REG (q ) (0x1400 + ((q) << 2))
323324 ETH_HLEN + ETH_FCS_LEN, \
324325 cache_line_size())
325326
327+ #define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
328+ NET_IP_ALIGN)
326329#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
327- NET_SKB_PAD ))
330+ MVNETA_SKB_HEADROOM ))
328331#define MVNETA_SKB_SIZE (len ) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
329332#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
330333
@@ -352,6 +355,11 @@ struct mvneta_statistic {
352355#define T_REG_64 64
353356#define T_SW 1
354357
358+ #define MVNETA_XDP_PASS BIT(0)
359+ #define MVNETA_XDP_DROPPED BIT(1)
360+ #define MVNETA_XDP_TX BIT(2)
361+ #define MVNETA_XDP_REDIR BIT(3)
362+
355363static const struct mvneta_statistic mvneta_statistics [] = {
356364 { 0x3000 , T_REG_64 , "good_octets_received" , },
357365 { 0x3010 , T_REG_32 , "good_frames_received" , },
@@ -431,6 +439,8 @@ struct mvneta_port {
431439 u32 cause_rx_tx ;
432440 struct napi_struct napi ;
433441
442+ struct bpf_prog * xdp_prog ;
443+
434444 /* Core clock */
435445 struct clk * clk ;
436446 /* AXI clock */
@@ -1951,11 +1961,51 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
19511961 return i ;
19521962}
19531963
1964+ static int
1965+ mvneta_run_xdp (struct mvneta_port * pp , struct mvneta_rx_queue * rxq ,
1966+ struct bpf_prog * prog , struct xdp_buff * xdp )
1967+ {
1968+ u32 ret , act = bpf_prog_run_xdp (prog , xdp );
1969+
1970+ switch (act ) {
1971+ case XDP_PASS :
1972+ ret = MVNETA_XDP_PASS ;
1973+ break ;
1974+ case XDP_REDIRECT : {
1975+ int err ;
1976+
1977+ err = xdp_do_redirect (pp -> dev , xdp , prog );
1978+ if (err ) {
1979+ ret = MVNETA_XDP_DROPPED ;
1980+ xdp_return_buff (xdp );
1981+ } else {
1982+ ret = MVNETA_XDP_REDIR ;
1983+ }
1984+ break ;
1985+ }
1986+ default :
1987+ bpf_warn_invalid_xdp_action (act );
1988+ /* fall through */
1989+ case XDP_ABORTED :
1990+ trace_xdp_exception (pp -> dev , prog , act );
1991+ /* fall through */
1992+ case XDP_DROP :
1993+ page_pool_recycle_direct (rxq -> page_pool ,
1994+ virt_to_head_page (xdp -> data ));
1995+ ret = MVNETA_XDP_DROPPED ;
1996+ break ;
1997+ }
1998+
1999+ return ret ;
2000+ }
2001+
19542002static int
19552003mvneta_swbm_rx_frame (struct mvneta_port * pp ,
19562004 struct mvneta_rx_desc * rx_desc ,
19572005 struct mvneta_rx_queue * rxq ,
1958- struct page * page )
2006+ struct xdp_buff * xdp ,
2007+ struct bpf_prog * xdp_prog ,
2008+ struct page * page , u32 * xdp_ret )
19592009{
19602010 unsigned char * data = page_address (page );
19612011 int data_len = - MVNETA_MH_SIZE , len ;
@@ -1975,7 +2025,26 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
19752025 rx_desc -> buf_phys_addr ,
19762026 len , dma_dir );
19772027
1978- rxq -> skb = build_skb (data , PAGE_SIZE );
2028+ xdp -> data_hard_start = data ;
2029+ xdp -> data = data + MVNETA_SKB_HEADROOM + MVNETA_MH_SIZE ;
2030+ xdp -> data_end = xdp -> data + data_len ;
2031+ xdp_set_data_meta_invalid (xdp );
2032+
2033+ if (xdp_prog ) {
2034+ u32 ret ;
2035+
2036+ ret = mvneta_run_xdp (pp , rxq , xdp_prog , xdp );
2037+ if (ret != MVNETA_XDP_PASS ) {
2038+ mvneta_update_stats (pp , 1 ,
2039+ xdp -> data_end - xdp -> data ,
2040+ false);
2041+ rx_desc -> buf_phys_addr = 0 ;
2042+ * xdp_ret |= ret ;
2043+ return ret ;
2044+ }
2045+ }
2046+
2047+ rxq -> skb = build_skb (xdp -> data_hard_start , PAGE_SIZE );
19792048 if (unlikely (!rxq -> skb )) {
19802049 netdev_err (dev ,
19812050 "Can't allocate skb on queue %d\n" ,
@@ -1986,8 +2055,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
19862055 }
19872056 page_pool_release_page (rxq -> page_pool , page );
19882057
1989- skb_reserve (rxq -> skb , MVNETA_MH_SIZE + NET_SKB_PAD );
1990- skb_put (rxq -> skb , data_len );
2058+ skb_reserve (rxq -> skb ,
2059+ xdp -> data - xdp -> data_hard_start );
2060+ skb_put (rxq -> skb , xdp -> data_end - xdp -> data );
19912061 mvneta_rx_csum (pp , rx_desc -> status , rxq -> skb );
19922062
19932063 rxq -> left_size = rx_desc -> data_size - len ;
@@ -2021,7 +2091,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
20212091 /* refill descriptor with new buffer later */
20222092 skb_add_rx_frag (rxq -> skb ,
20232093 skb_shinfo (rxq -> skb )-> nr_frags ,
2024- page , NET_SKB_PAD , data_len ,
2094+ page , MVNETA_SKB_HEADROOM , data_len ,
20252095 PAGE_SIZE );
20262096 }
20272097 page_pool_release_page (rxq -> page_pool , page );
@@ -2036,11 +2106,18 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
20362106{
20372107 int rcvd_pkts = 0 , rcvd_bytes = 0 , rx_proc = 0 ;
20382108 struct net_device * dev = pp -> dev ;
2109+ struct bpf_prog * xdp_prog ;
2110+ struct xdp_buff xdp_buf ;
20392111 int rx_todo , refill ;
2112+ u32 xdp_ret = 0 ;
20402113
20412114 /* Get number of received packets */
20422115 rx_todo = mvneta_rxq_busy_desc_num_get (pp , rxq );
20432116
2117+ rcu_read_lock ();
2118+ xdp_prog = READ_ONCE (pp -> xdp_prog );
2119+ xdp_buf .rxq = & rxq -> xdp_rxq ;
2120+
20442121 /* Fairness NAPI loop */
20452122 while (rx_proc < budget && rx_proc < rx_todo ) {
20462123 struct mvneta_rx_desc * rx_desc = mvneta_rxq_next_desc_get (rxq );
@@ -2069,7 +2146,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
20692146 continue ;
20702147 }
20712148
2072- err = mvneta_swbm_rx_frame (pp , rx_desc , rxq , page );
2149+ err = mvneta_swbm_rx_frame (pp , rx_desc , rxq , & xdp_buf ,
2150+ xdp_prog , page , & xdp_ret );
20732151 if (err )
20742152 continue ;
20752153 } else {
@@ -2104,6 +2182,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
21042182 /* clean uncomplete skb pointer in queue */
21052183 rxq -> skb = NULL ;
21062184 }
2185+ rcu_read_unlock ();
2186+
2187+ if (xdp_ret & MVNETA_XDP_REDIR )
2188+ xdp_do_flush_map ();
21072189
21082190 if (rcvd_pkts )
21092191 mvneta_update_stats (pp , rcvd_pkts , rcvd_bytes , false);
@@ -2847,13 +2929,14 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
28472929static int mvneta_create_page_pool (struct mvneta_port * pp ,
28482930 struct mvneta_rx_queue * rxq , int size )
28492931{
2932+ struct bpf_prog * xdp_prog = READ_ONCE (pp -> xdp_prog );
28502933 struct page_pool_params pp_params = {
28512934 .order = 0 ,
28522935 .flags = PP_FLAG_DMA_MAP ,
28532936 .pool_size = size ,
28542937 .nid = cpu_to_node (0 ),
28552938 .dev = pp -> dev -> dev .parent ,
2856- .dma_dir = DMA_FROM_DEVICE ,
2939+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE ,
28572940 };
28582941 int err ;
28592942
@@ -3320,6 +3403,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
33203403 mtu = ALIGN (MVNETA_RX_PKT_SIZE (mtu ), 8 );
33213404 }
33223405
3406+ if (pp -> xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE ) {
3407+ netdev_info (dev , "Illegal MTU value %d for XDP mode\n" , mtu );
3408+ return - EINVAL ;
3409+ }
3410+
33233411 dev -> mtu = mtu ;
33243412
33253413 if (!netif_running (dev )) {
@@ -3989,6 +4077,47 @@ static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39894077 return phylink_mii_ioctl (pp -> phylink , ifr , cmd );
39904078}
39914079
4080+ static int mvneta_xdp_setup (struct net_device * dev , struct bpf_prog * prog ,
4081+ struct netlink_ext_ack * extack )
4082+ {
4083+ bool need_update , running = netif_running (dev );
4084+ struct mvneta_port * pp = netdev_priv (dev );
4085+ struct bpf_prog * old_prog ;
4086+
4087+ if (prog && dev -> mtu > MVNETA_MAX_RX_BUF_SIZE ) {
4088+ NL_SET_ERR_MSG_MOD (extack , "Jumbo frames not supported on XDP" );
4089+ return - EOPNOTSUPP ;
4090+ }
4091+
4092+ need_update = !!pp -> xdp_prog != !!prog ;
4093+ if (running && need_update )
4094+ mvneta_stop (dev );
4095+
4096+ old_prog = xchg (& pp -> xdp_prog , prog );
4097+ if (old_prog )
4098+ bpf_prog_put (old_prog );
4099+
4100+ if (running && need_update )
4101+ return mvneta_open (dev );
4102+
4103+ return 0 ;
4104+ }
4105+
4106+ static int mvneta_xdp (struct net_device * dev , struct netdev_bpf * xdp )
4107+ {
4108+ struct mvneta_port * pp = netdev_priv (dev );
4109+
4110+ switch (xdp -> command ) {
4111+ case XDP_SETUP_PROG :
4112+ return mvneta_xdp_setup (dev , xdp -> prog , xdp -> extack );
4113+ case XDP_QUERY_PROG :
4114+ xdp -> prog_id = pp -> xdp_prog ? pp -> xdp_prog -> aux -> id : 0 ;
4115+ return 0 ;
4116+ default :
4117+ return - EINVAL ;
4118+ }
4119+ }
4120+
39924121/* Ethtool methods */
39934122
39944123/* Set link ksettings (phy address, speed) for ethtools */
@@ -4385,6 +4514,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
43854514 .ndo_fix_features = mvneta_fix_features ,
43864515 .ndo_get_stats64 = mvneta_get_stats64 ,
43874516 .ndo_do_ioctl = mvneta_ioctl ,
4517+ .ndo_bpf = mvneta_xdp ,
43884518};
43894519
43904520static const struct ethtool_ops mvneta_eth_tool_ops = {
@@ -4675,7 +4805,7 @@ static int mvneta_probe(struct platform_device *pdev)
46754805 SET_NETDEV_DEV (dev , & pdev -> dev );
46764806
46774807 pp -> id = global_port_id ++ ;
4678- pp -> rx_offset_correction = NET_SKB_PAD ;
4808+ pp -> rx_offset_correction = MVNETA_SKB_HEADROOM ;
46794809
46804810 /* Obtain access to BM resources if enabled and already initialized */
46814811 bm_node = of_parse_phandle (dn , "buffer-manager" , 0 );
0 commit comments