@@ -699,79 +699,74 @@ static inline __be16 sum16_as_be(__sum16 sum)
699
699
return (__force __be16 )sum ;
700
700
}
701
701
702
- static void txq_submit_skb (struct tx_queue * txq , struct sk_buff * skb )
702
+ static int txq_submit_skb (struct tx_queue * txq , struct sk_buff * skb )
703
703
{
704
704
struct mv643xx_eth_private * mp = txq_to_mp (txq );
705
705
int nr_frags = skb_shinfo (skb )-> nr_frags ;
706
706
int tx_index ;
707
707
struct tx_desc * desc ;
708
708
u32 cmd_sts ;
709
+ u16 l4i_chk ;
709
710
int length ;
710
711
711
712
cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA ;
712
-
713
- tx_index = txq_alloc_desc_index (txq );
714
- desc = & txq -> tx_desc_area [tx_index ];
715
-
716
- if (nr_frags ) {
717
- txq_submit_frag_skb (txq , skb );
718
- length = skb_headlen (skb );
719
- } else {
720
- cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT ;
721
- length = skb -> len ;
722
- }
723
-
724
- desc -> byte_cnt = length ;
725
- desc -> buf_ptr = dma_map_single (NULL , skb -> data , length , DMA_TO_DEVICE );
713
+ l4i_chk = 0 ;
726
714
727
715
if (skb -> ip_summed == CHECKSUM_PARTIAL ) {
728
- int mac_hdr_len ;
716
+ int tag_bytes ;
729
717
730
718
BUG_ON (skb -> protocol != htons (ETH_P_IP ) &&
731
719
skb -> protocol != htons (ETH_P_8021Q ));
732
720
733
- cmd_sts |= GEN_TCP_UDP_CHECKSUM |
734
- GEN_IP_V4_CHECKSUM |
735
- ip_hdr (skb )-> ihl << TX_IHL_SHIFT ;
721
+ tag_bytes = (void * )ip_hdr (skb ) - (void * )skb -> data - ETH_HLEN ;
722
+ if (unlikely (tag_bytes & ~12 )) {
723
+ if (skb_checksum_help (skb ) == 0 )
724
+ goto no_csum ;
725
+ kfree_skb (skb );
726
+ return 1 ;
727
+ }
736
728
737
- mac_hdr_len = (void * )ip_hdr (skb ) - (void * )skb -> data ;
738
- switch (mac_hdr_len - ETH_HLEN ) {
739
- case 0 :
740
- break ;
741
- case 4 :
742
- cmd_sts |= MAC_HDR_EXTRA_4_BYTES ;
743
- break ;
744
- case 8 :
745
- cmd_sts |= MAC_HDR_EXTRA_8_BYTES ;
746
- break ;
747
- case 12 :
729
+ if (tag_bytes & 4 )
748
730
cmd_sts |= MAC_HDR_EXTRA_4_BYTES ;
731
+ if (tag_bytes & 8 )
749
732
cmd_sts |= MAC_HDR_EXTRA_8_BYTES ;
750
- break ;
751
- default :
752
- if (net_ratelimit ())
753
- dev_printk (KERN_ERR , & txq_to_mp (txq )-> dev -> dev ,
754
- "mac header length is %d?!\n" , mac_hdr_len );
755
- break ;
756
- }
733
+
734
+ cmd_sts |= GEN_TCP_UDP_CHECKSUM |
735
+ GEN_IP_V4_CHECKSUM |
736
+ ip_hdr (skb )-> ihl << TX_IHL_SHIFT ;
757
737
758
738
switch (ip_hdr (skb )-> protocol ) {
759
739
case IPPROTO_UDP :
760
740
cmd_sts |= UDP_FRAME ;
761
- desc -> l4i_chk = ntohs (sum16_as_be (udp_hdr (skb )-> check ));
741
+ l4i_chk = ntohs (sum16_as_be (udp_hdr (skb )-> check ));
762
742
break ;
763
743
case IPPROTO_TCP :
764
- desc -> l4i_chk = ntohs (sum16_as_be (tcp_hdr (skb )-> check ));
744
+ l4i_chk = ntohs (sum16_as_be (tcp_hdr (skb )-> check ));
765
745
break ;
766
746
default :
767
747
BUG ();
768
748
}
769
749
} else {
750
+ no_csum :
770
751
/* Errata BTS #50, IHL must be 5 if no HW checksum */
771
752
cmd_sts |= 5 << TX_IHL_SHIFT ;
772
- desc -> l4i_chk = 0 ;
773
753
}
774
754
755
+ tx_index = txq_alloc_desc_index (txq );
756
+ desc = & txq -> tx_desc_area [tx_index ];
757
+
758
+ if (nr_frags ) {
759
+ txq_submit_frag_skb (txq , skb );
760
+ length = skb_headlen (skb );
761
+ } else {
762
+ cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT ;
763
+ length = skb -> len ;
764
+ }
765
+
766
+ desc -> l4i_chk = l4i_chk ;
767
+ desc -> byte_cnt = length ;
768
+ desc -> buf_ptr = dma_map_single (NULL , skb -> data , length , DMA_TO_DEVICE );
769
+
775
770
__skb_queue_tail (& txq -> tx_skb , skb );
776
771
777
772
/* ensure all other descriptors are written before first cmd_sts */
@@ -786,6 +781,8 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
786
781
txq_enable (txq );
787
782
788
783
txq -> tx_desc_count += nr_frags + 1 ;
784
+
785
+ return 0 ;
789
786
}
790
787
791
788
static int mv643xx_eth_xmit (struct sk_buff * skb , struct net_device * dev )
@@ -794,7 +791,6 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
794
791
int queue ;
795
792
struct tx_queue * txq ;
796
793
struct netdev_queue * nq ;
797
- int entries_left ;
798
794
799
795
queue = skb_get_queue_mapping (skb );
800
796
txq = mp -> txq + queue ;
@@ -815,14 +811,17 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
815
811
return NETDEV_TX_OK ;
816
812
}
817
813
818
- txq_submit_skb (txq , skb );
819
- txq -> tx_bytes += skb -> len ;
820
- txq -> tx_packets ++ ;
821
- dev -> trans_start = jiffies ;
814
+ if (!txq_submit_skb (txq , skb )) {
815
+ int entries_left ;
816
+
817
+ txq -> tx_bytes += skb -> len ;
818
+ txq -> tx_packets ++ ;
819
+ dev -> trans_start = jiffies ;
822
820
823
- entries_left = txq -> tx_ring_size - txq -> tx_desc_count ;
824
- if (entries_left < MAX_SKB_FRAGS + 1 )
825
- netif_tx_stop_queue (nq );
821
+ entries_left = txq -> tx_ring_size - txq -> tx_desc_count ;
822
+ if (entries_left < MAX_SKB_FRAGS + 1 )
823
+ netif_tx_stop_queue (nq );
824
+ }
826
825
827
826
return NETDEV_TX_OK ;
828
827
}
0 commit comments