@@ -1378,6 +1378,73 @@ static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
13781378	return  received ;
13791379}
13801380
1381+ static  int  gem_poll (struct  napi_struct  * napi , int  budget )
1382+ {
1383+ 	struct  macb_queue  * queue  =  container_of (napi , struct  macb_queue , napi );
1384+ 	struct  macb  * bp  =  queue -> bp ;
1385+ 	int  work_done ;
1386+ 	u32  status ;
1387+ 
1388+ 	status  =  macb_readl (bp , RSR );
1389+ 	/* Don't clear yet the BNA error bit if any */ 
1390+ 	status  &= ~MACB_BIT (BNA );
1391+ 	macb_writel (bp , RSR , status );
1392+ 
1393+ 
1394+ 	netdev_vdbg (bp -> dev , "poll: status = %08lx, budget = %d\n" ,
1395+ 		    (unsigned long )status , budget );
1396+ 
1397+ 	work_done  =  bp -> macbgem_ops .mog_rx (queue , napi , budget );
1398+ 
1399+ 	if  (work_done  <  budget ) {
1400+ 		napi_complete_done (napi , work_done );
1401+ 
1402+ 		/* Packets received while interrupts were disabled or BNA error */ 
1403+ 		status  =  macb_readl (bp , RSR );
1404+ 		if  (status ) {
1405+ 			if  (bp -> caps  &  MACB_CAPS_ISR_CLEAR_ON_WRITE )
1406+ 				queue_writel (queue , ISR , MACB_BIT (RCOMP ));
1407+ 
1408+ 			if  (unlikely (status  &  MACB_BIT (BNA ))) {
1409+ 				netdev_warn (bp -> dev ,
1410+ 					   "buffer not available for incoming packet\n" );
1411+ 
1412+ 				/* disable Rx interrupts and eventual HRESP error */ 
1413+ 				queue_writel (queue , IDR , bp -> rx_intr_mask  | MACB_BIT (HRESP ));
1414+ 
1415+ 				/* free a slot to allow the refill */ 
1416+ 				queue -> rx_tail ++ ;
1417+ 				bp -> dev -> stats .rx_dropped ++ ;
1418+ 				queue -> stats .rx_dropped ++ ;
1419+ 
1420+ 				/* clear the error before resolving it to avoid a race */ 
1421+ 				macb_writel (bp , RSR , MACB_BIT (BNA ));
1422+ 
1423+ 				/* refill one slot to make it available for the DMA */ 
1424+ 				gem_rx_refill (queue );
1425+ 
1426+ 				/* enable Rx interrupts */ 
1427+ 				queue_writel (queue , IER , bp -> rx_intr_mask  | MACB_BIT (HRESP ));
1428+ 			}
1429+ 
1430+ 			napi_reschedule (napi );
1431+ 		} else  {
1432+ 			queue_writel (queue , IER , bp -> rx_intr_mask );
1433+ 
1434+ 			/* IRQ are disabled, not masked. Reschedule to avoid a race */ 
1435+ 			status  =  macb_readl (bp , RSR );
1436+ 			if  (status ) {
1437+ 				queue_writel (queue , IER , bp -> rx_intr_mask );
1438+ 				napi_reschedule (napi );
1439+ 			}
1440+ 		}
1441+ 	}
1442+ 
1443+ 	/* TODO: Handle errors */ 
1444+ 
1445+ 	return  work_done ;
1446+ }
1447+ 
13811448static  int  macb_poll (struct  napi_struct  * napi , int  budget )
13821449{
13831450	struct  macb_queue  * queue  =  container_of (napi , struct  macb_queue , napi );
@@ -3571,7 +3638,11 @@ static int macb_init(struct platform_device *pdev)
35713638
35723639		queue  =  & bp -> queues [q ];
35733640		queue -> bp  =  bp ;
3574- 		netif_napi_add (dev , & queue -> napi , macb_poll , NAPI_POLL_WEIGHT );
3641+ 		if  (macb_is_gem (bp )) {
3642+ 			netif_napi_add (dev , & queue -> napi , gem_poll , NAPI_POLL_WEIGHT );
3643+ 		} else  {
3644+ 			netif_napi_add (dev , & queue -> napi , macb_poll , NAPI_POLL_WEIGHT );
3645+ 		}
35753646		if  (hw_q ) {
35763647			queue -> ISR   =  GEM_ISR (hw_q  -  1 );
35773648			queue -> IER   =  GEM_IER (hw_q  -  1 );
0 commit comments