3333
3434#include <crypto/aead.h>
3535#include <net/xfrm.h>
36+ #include <net/esp.h>
3637
3738#include "en_accel/ipsec_rxtx.h"
3839#include "en_accel/ipsec.h"
@@ -48,17 +49,228 @@ struct mlx5e_ipsec_rx_metadata {
4849 __be32 sa_handle ;
4950} __packed ;
5051
52+ enum {
53+ MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8 ,
54+ MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9 ,
55+ };
56+
57+ struct mlx5e_ipsec_tx_metadata {
58+ __be16 mss_inv ; /* 1/MSS in 16bit fixed point, only for LSO */
59+ __be16 seq ; /* LSBs of the first TCP seq, only for LSO */
60+ u8 esp_next_proto ; /* Next protocol of ESP */
61+ } __packed ;
62+
5163struct mlx5e_ipsec_metadata {
5264 unsigned char syndrome ;
5365 union {
5466 unsigned char raw [5 ];
5567 /* from FPGA to host, on successful decrypt */
5668 struct mlx5e_ipsec_rx_metadata rx ;
69+ /* from host to FPGA */
70+ struct mlx5e_ipsec_tx_metadata tx ;
5771 } __packed content ;
5872 /* packet type ID field */
5973 __be16 ethertype ;
6074} __packed ;
6175
76+ #define MAX_LSO_MSS 2048
77+
78+ /* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
79+ static __be16 mlx5e_ipsec_inverse_table [MAX_LSO_MSS ];
80+
81+ static inline __be16 mlx5e_ipsec_mss_inv (struct sk_buff * skb )
82+ {
83+ return mlx5e_ipsec_inverse_table [skb_shinfo (skb )-> gso_size ];
84+ }
85+
86+ static struct mlx5e_ipsec_metadata * mlx5e_ipsec_add_metadata (struct sk_buff * skb )
87+ {
88+ struct mlx5e_ipsec_metadata * mdata ;
89+ struct ethhdr * eth ;
90+
91+ if (unlikely (skb_cow_head (skb , sizeof (* mdata ))))
92+ return ERR_PTR (- ENOMEM );
93+
94+ eth = (struct ethhdr * )skb_push (skb , sizeof (* mdata ));
95+ skb -> mac_header -= sizeof (* mdata );
96+ mdata = (struct mlx5e_ipsec_metadata * )(eth + 1 );
97+
98+ memmove (skb -> data , skb -> data + sizeof (* mdata ),
99+ 2 * ETH_ALEN );
100+
101+ eth -> h_proto = cpu_to_be16 (MLX5E_METADATA_ETHER_TYPE );
102+
103+ memset (mdata -> content .raw , 0 , sizeof (mdata -> content .raw ));
104+ return mdata ;
105+ }
106+
107+ static int mlx5e_ipsec_remove_trailer (struct sk_buff * skb , struct xfrm_state * x )
108+ {
109+ unsigned int alen = crypto_aead_authsize (x -> data );
110+ struct ipv6hdr * ipv6hdr = ipv6_hdr (skb );
111+ struct iphdr * ipv4hdr = ip_hdr (skb );
112+ unsigned int trailer_len ;
113+ u8 plen ;
114+ int ret ;
115+
116+ ret = skb_copy_bits (skb , skb -> len - alen - 2 , & plen , 1 );
117+ if (unlikely (ret ))
118+ return ret ;
119+
120+ trailer_len = alen + plen + 2 ;
121+
122+ pskb_trim (skb , skb -> len - trailer_len );
123+ if (skb -> protocol == htons (ETH_P_IP )) {
124+ ipv4hdr -> tot_len = htons (ntohs (ipv4hdr -> tot_len ) - trailer_len );
125+ ip_send_check (ipv4hdr );
126+ } else {
127+ ipv6hdr -> payload_len = htons (ntohs (ipv6hdr -> payload_len ) -
128+ trailer_len );
129+ }
130+ return 0 ;
131+ }
132+
133+ static void mlx5e_ipsec_set_swp (struct sk_buff * skb ,
134+ struct mlx5_wqe_eth_seg * eseg , u8 mode ,
135+ struct xfrm_offload * xo )
136+ {
137+ u8 proto ;
138+
139+ /* Tunnel Mode:
140+ * SWP: OutL3 InL3 InL4
141+ * Pkt: MAC IP ESP IP L4
142+ *
143+ * Transport Mode:
144+ * SWP: OutL3 InL4
145+ * InL3
146+ * Pkt: MAC IP ESP L4
147+ *
148+ * Offsets are in 2-byte words, counting from start of frame
149+ */
150+ eseg -> swp_outer_l3_offset = skb_network_offset (skb ) / 2 ;
151+ if (skb -> protocol == htons (ETH_P_IPV6 ))
152+ eseg -> swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6 ;
153+
154+ if (mode == XFRM_MODE_TUNNEL ) {
155+ eseg -> swp_inner_l3_offset = skb_inner_network_offset (skb ) / 2 ;
156+ if (xo -> proto == IPPROTO_IPV6 ) {
157+ eseg -> swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6 ;
158+ proto = inner_ipv6_hdr (skb )-> nexthdr ;
159+ } else {
160+ proto = inner_ip_hdr (skb )-> protocol ;
161+ }
162+ } else {
163+ eseg -> swp_inner_l3_offset = skb_network_offset (skb ) / 2 ;
164+ if (skb -> protocol == htons (ETH_P_IPV6 ))
165+ eseg -> swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6 ;
166+ proto = xo -> proto ;
167+ }
168+ switch (proto ) {
169+ case IPPROTO_UDP :
170+ eseg -> swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP ;
171+ /* Fall through */
172+ case IPPROTO_TCP :
173+ eseg -> swp_inner_l4_offset = skb_inner_transport_offset (skb ) / 2 ;
174+ break ;
175+ }
176+ }
177+
178+ static void mlx5e_ipsec_set_iv (struct sk_buff * skb , struct xfrm_offload * xo )
179+ {
180+ int iv_offset ;
181+ __be64 seqno ;
182+
183+ /* Place the SN in the IV field */
184+ seqno = cpu_to_be64 (xo -> seq .low + ((u64 )xo -> seq .hi << 32 ));
185+ iv_offset = skb_transport_offset (skb ) + sizeof (struct ip_esp_hdr );
186+ skb_store_bits (skb , iv_offset , & seqno , 8 );
187+ }
188+
189+ static void mlx5e_ipsec_set_metadata (struct sk_buff * skb ,
190+ struct mlx5e_ipsec_metadata * mdata ,
191+ struct xfrm_offload * xo )
192+ {
193+ struct ip_esp_hdr * esph ;
194+ struct tcphdr * tcph ;
195+
196+ if (skb_is_gso (skb )) {
197+ /* Add LSO metadata indication */
198+ esph = ip_esp_hdr (skb );
199+ tcph = inner_tcp_hdr (skb );
200+ netdev_dbg (skb -> dev , " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n" ,
201+ skb -> network_header ,
202+ skb -> transport_header ,
203+ skb -> inner_network_header ,
204+ skb -> inner_transport_header );
205+ netdev_dbg (skb -> dev , " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n" ,
206+ skb -> len , skb_shinfo (skb )-> gso_size ,
207+ ntohs (tcph -> source ), ntohs (tcph -> dest ),
208+ ntohl (tcph -> seq ), ntohl (esph -> seq_no ));
209+ mdata -> syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP ;
210+ mdata -> content .tx .mss_inv = mlx5e_ipsec_mss_inv (skb );
211+ mdata -> content .tx .seq = htons (ntohl (tcph -> seq ) & 0xFFFF );
212+ } else {
213+ mdata -> syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD ;
214+ }
215+ mdata -> content .tx .esp_next_proto = xo -> proto ;
216+
217+ netdev_dbg (skb -> dev , " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n" ,
218+ mdata -> syndrome , mdata -> content .tx .esp_next_proto ,
219+ ntohs (mdata -> content .tx .mss_inv ),
220+ ntohs (mdata -> content .tx .seq ));
221+ }
222+
223+ struct sk_buff * mlx5e_ipsec_handle_tx_skb (struct net_device * netdev ,
224+ struct mlx5e_tx_wqe * wqe ,
225+ struct sk_buff * skb )
226+ {
227+ struct mlx5e_priv * priv = netdev_priv (netdev );
228+ struct xfrm_offload * xo = xfrm_offload (skb );
229+ struct mlx5e_ipsec_metadata * mdata ;
230+ struct xfrm_state * x ;
231+
232+ if (!xo )
233+ return skb ;
234+
235+ if (unlikely (skb -> sp -> len != 1 )) {
236+ atomic64_inc (& priv -> ipsec -> sw_stats .ipsec_tx_drop_bundle );
237+ goto drop ;
238+ }
239+
240+ x = xfrm_input_state (skb );
241+ if (unlikely (!x )) {
242+ atomic64_inc (& priv -> ipsec -> sw_stats .ipsec_tx_drop_no_state );
243+ goto drop ;
244+ }
245+
246+ if (unlikely (!x -> xso .offload_handle ||
247+ (skb -> protocol != htons (ETH_P_IP ) &&
248+ skb -> protocol != htons (ETH_P_IPV6 )))) {
249+ atomic64_inc (& priv -> ipsec -> sw_stats .ipsec_tx_drop_not_ip );
250+ goto drop ;
251+ }
252+
253+ if (!skb_is_gso (skb ))
254+ if (unlikely (mlx5e_ipsec_remove_trailer (skb , x ))) {
255+ atomic64_inc (& priv -> ipsec -> sw_stats .ipsec_tx_drop_trailer );
256+ goto drop ;
257+ }
258+ mdata = mlx5e_ipsec_add_metadata (skb );
259+ if (unlikely (IS_ERR (mdata ))) {
260+ atomic64_inc (& priv -> ipsec -> sw_stats .ipsec_tx_drop_metadata );
261+ goto drop ;
262+ }
263+ mlx5e_ipsec_set_swp (skb , & wqe -> eth , x -> props .mode , xo );
264+ mlx5e_ipsec_set_iv (skb , xo );
265+ mlx5e_ipsec_set_metadata (skb , mdata , xo );
266+
267+ return skb ;
268+
269+ drop :
270+ kfree_skb (skb );
271+ return NULL ;
272+ }
273+
62274static inline struct xfrm_state *
63275mlx5e_ipsec_build_sp (struct net_device * netdev , struct sk_buff * skb ,
64276 struct mlx5e_ipsec_metadata * mdata )
@@ -133,3 +345,34 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
133345
134346 return skb ;
135347}
348+
349+ bool mlx5e_ipsec_feature_check (struct sk_buff * skb , struct net_device * netdev ,
350+ netdev_features_t features )
351+ {
352+ struct xfrm_state * x ;
353+
354+ if (skb -> sp && skb -> sp -> len ) {
355+ x = skb -> sp -> xvec [0 ];
356+ if (x && x -> xso .offload_handle )
357+ return true;
358+ }
359+ return false;
360+ }
361+
362+ void mlx5e_ipsec_build_inverse_table (void )
363+ {
364+ u16 mss_inv ;
365+ u32 mss ;
366+
367+ /* Calculate 1/x inverse table for use in GSO data path.
368+ * Using this table, we provide the IPSec accelerator with the value of
369+ * 1/gso_size so that it can infer the position of each segment inside
370+ * the GSO, and increment the ESP sequence number, and generate the IV.
371+ * The HW needs this value in Q0.16 fixed-point number format
372+ */
373+ mlx5e_ipsec_inverse_table [1 ] = htons (0xFFFF );
374+ for (mss = 2 ; mss < MAX_LSO_MSS ; mss ++ ) {
375+ mss_inv = ((1ULL << 32 ) / mss ) >> 16 ;
376+ mlx5e_ipsec_inverse_table [mss ] = htons (mss_inv );
377+ }
378+ }
0 commit comments