@@ -816,27 +816,24 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
816816
817817 if (ctx -> mss ) { /* TSO */
818818 ctx -> eth_ip_hdr_size = skb_transport_offset (skb );
819- ctx -> l4_hdr_size = ((struct tcphdr * )
820- skb_transport_header (skb ))-> doff * 4 ;
819+ ctx -> l4_hdr_size = tcp_hdrlen (skb );
821820 ctx -> copy_size = ctx -> eth_ip_hdr_size + ctx -> l4_hdr_size ;
822821 } else {
823822 if (skb -> ip_summed == CHECKSUM_PARTIAL ) {
824823 ctx -> eth_ip_hdr_size = skb_checksum_start_offset (skb );
825824
826825 if (ctx -> ipv4 ) {
827- struct iphdr * iph = ( struct iphdr * )
828- skb_network_header ( skb );
826+ const struct iphdr * iph = ip_hdr ( skb );
827+
829828 if (iph -> protocol == IPPROTO_TCP )
830- ctx -> l4_hdr_size = ((struct tcphdr * )
831- skb_transport_header (skb ))-> doff * 4 ;
829+ ctx -> l4_hdr_size = tcp_hdrlen (skb );
832830 else if (iph -> protocol == IPPROTO_UDP )
833831 /*
834832 * Use tcp header size so that bytes to
835833 * be copied are more than required by
836834 * the device.
837835 */
838- ctx -> l4_hdr_size =
839- sizeof (struct tcphdr );
836+ ctx -> l4_hdr_size = sizeof (struct tcphdr );
840837 else
841838 ctx -> l4_hdr_size = 0 ;
842839 } else {
@@ -881,14 +878,17 @@ static void
881878vmxnet3_prepare_tso (struct sk_buff * skb ,
882879 struct vmxnet3_tx_ctx * ctx )
883880{
884- struct tcphdr * tcph = (struct tcphdr * )skb_transport_header (skb );
881+ struct tcphdr * tcph = tcp_hdr (skb );
882+
885883 if (ctx -> ipv4 ) {
886- struct iphdr * iph = (struct iphdr * )skb_network_header (skb );
884+ struct iphdr * iph = ip_hdr (skb );
885+
887886 iph -> check = 0 ;
888887 tcph -> check = ~csum_tcpudp_magic (iph -> saddr , iph -> daddr , 0 ,
889888 IPPROTO_TCP , 0 );
890889 } else {
891- struct ipv6hdr * iph = (struct ipv6hdr * )skb_network_header (skb );
890+ struct ipv6hdr * iph = ipv6_hdr (skb );
891+
892892 tcph -> check = ~csum_ipv6_magic (& iph -> saddr , & iph -> daddr , 0 ,
893893 IPPROTO_TCP , 0 );
894894 }
0 commit comments