33
44#ifdef CONFIG_CHELSIO_TLS_DEVICE
55#include "chcr_ktls.h"
6+ #include "clip_tbl.h"
67
78static int chcr_init_tcb_fields (struct chcr_ktls_info * tx_info );
89/*
@@ -153,8 +154,10 @@ static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
153154 /* FALLTHRU */
154155 case KTLS_CONN_SET_TCB_RPL :
155156 /* Check if l2t state is valid, then move to ready state. */
156- if (cxgb4_check_l2t_valid (tx_info -> l2te ))
157+ if (cxgb4_check_l2t_valid (tx_info -> l2te )) {
157158 tx_info -> connection_state = KTLS_CONN_TX_READY ;
159+ atomic64_inc (& tx_info -> adap -> chcr_stats .ktls_tx_ctx );
160+ }
158161 break ;
159162
160163 case KTLS_CONN_TX_READY :
@@ -219,6 +222,56 @@ static int chcr_ktls_act_open_req(struct sock *sk,
219222 return cxgb4_l2t_send (tx_info -> netdev , skb , tx_info -> l2te );
220223}
221224
225+ /*
226+ * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
227+ * @sk - tcp socket.
228+ * @tx_info - driver specific tls info.
229+ * @atid - connection active tid.
230+ * return - send success/failure.
231+ */
232+ static int chcr_ktls_act_open_req6 (struct sock * sk ,
233+ struct chcr_ktls_info * tx_info ,
234+ int atid )
235+ {
236+ struct inet_sock * inet = inet_sk (sk );
237+ struct cpl_t6_act_open_req6 * cpl6 ;
238+ struct cpl_act_open_req6 * cpl ;
239+ struct sk_buff * skb ;
240+ unsigned int len ;
241+ int qid_atid ;
242+ u64 options ;
243+
244+ len = sizeof (* cpl6 );
245+ skb = alloc_skb (len , GFP_KERNEL );
246+ if (unlikely (!skb ))
247+ return - ENOMEM ;
248+ /* mark it a control pkt */
249+ set_wr_txq (skb , CPL_PRIORITY_CONTROL , tx_info -> port_id );
250+
251+ cpl6 = __skb_put_zero (skb , len );
252+ cpl = (struct cpl_act_open_req6 * )cpl6 ;
253+ INIT_TP_WR (cpl6 , 0 );
254+ qid_atid = TID_QID_V (tx_info -> rx_qid ) | TID_TID_V (atid );
255+ OPCODE_TID (cpl ) = htonl (MK_OPCODE_TID (CPL_ACT_OPEN_REQ6 , qid_atid ));
256+ cpl -> local_port = inet -> inet_sport ;
257+ cpl -> peer_port = inet -> inet_dport ;
258+ cpl -> local_ip_hi = * (__be64 * )& sk -> sk_v6_rcv_saddr .in6_u .u6_addr8 [0 ];
259+ cpl -> local_ip_lo = * (__be64 * )& sk -> sk_v6_rcv_saddr .in6_u .u6_addr8 [8 ];
260+ cpl -> peer_ip_hi = * (__be64 * )& sk -> sk_v6_daddr .in6_u .u6_addr8 [0 ];
261+ cpl -> peer_ip_lo = * (__be64 * )& sk -> sk_v6_daddr .in6_u .u6_addr8 [8 ];
262+
263+ /* first 64 bit option field. */
264+ options = TCAM_BYPASS_F | ULP_MODE_V (ULP_MODE_NONE ) | NON_OFFLOAD_F |
265+ SMAC_SEL_V (tx_info -> smt_idx ) | TX_CHAN_V (tx_info -> tx_chan );
266+ cpl -> opt0 = cpu_to_be64 (options );
267+ /* next 64 bit option field. */
268+ options =
269+ TX_QUEUE_V (tx_info -> adap -> params .tp .tx_modq [tx_info -> tx_chan ]);
270+ cpl -> opt2 = htonl (options );
271+
272+ return cxgb4_l2t_send (tx_info -> netdev , skb , tx_info -> l2te );
273+ }
274+
222275/*
223276 * chcr_setup_connection: create a TCB entry so that TP will form tcp packets.
224277 * @sk - tcp socket.
@@ -245,7 +298,13 @@ static int chcr_setup_connection(struct sock *sk,
245298 ret = chcr_ktls_act_open_req (sk , tx_info , atid );
246299 } else {
247300 tx_info -> ip_family = AF_INET6 ;
248- ret = - EOPNOTSUPP ;
301+ ret =
302+ cxgb4_clip_get (tx_info -> netdev ,
303+ (const u32 * )& sk -> sk_v6_rcv_saddr .in6_u .u6_addr8 ,
304+ 1 );
305+ if (ret )
306+ goto out ;
307+ ret = chcr_ktls_act_open_req6 (sk , tx_info , atid );
249308 }
250309
251310 /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
@@ -322,23 +381,35 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
322381 struct chcr_ktls_ofld_ctx_tx * tx_ctx =
323382 chcr_get_ktls_tx_context (tls_ctx );
324383 struct chcr_ktls_info * tx_info = tx_ctx -> chcr_info ;
384+ struct sock * sk ;
325385
326386 if (!tx_info )
327387 return ;
388+ sk = tx_info -> sk ;
328389
329390 spin_lock (& tx_info -> lock );
330391 tx_info -> connection_state = KTLS_CONN_CLOSED ;
331392 spin_unlock (& tx_info -> lock );
332393
394+ /* clear l2t entry */
333395 if (tx_info -> l2te )
334396 cxgb4_l2t_release (tx_info -> l2te );
335397
398+ /* clear clip entry */
399+ if (tx_info -> ip_family == AF_INET6 )
400+ cxgb4_clip_release (netdev ,
401+ (const u32 * )& sk -> sk_v6_daddr .in6_u .u6_addr8 ,
402+ 1 );
403+
404+ /* clear tid */
336405 if (tx_info -> tid != -1 ) {
337406 /* clear tcb state and then release tid */
338407 chcr_ktls_mark_tcb_close (tx_info );
339408 cxgb4_remove_tid (& tx_info -> adap -> tids , tx_info -> tx_chan ,
340409 tx_info -> tid , tx_info -> ip_family );
341410 }
411+
412+ atomic64_inc (& tx_info -> adap -> chcr_stats .ktls_tx_connection_close );
342413 kvfree (tx_info );
343414 tx_ctx -> chcr_info = NULL ;
344415}
@@ -424,7 +495,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
424495 ipv6_addr_type (& sk -> sk_v6_daddr ) == IPV6_ADDR_MAPPED )) {
425496 memcpy (daaddr , & sk -> sk_daddr , 4 );
426497 } else {
427- goto out2 ;
498+ memcpy ( daaddr , sk -> sk_v6_daddr . in6_u . u6_addr8 , 16 ) ;
428499 }
429500
430501 /* get the l2t index */
@@ -458,10 +529,12 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
458529 if (ret )
459530 goto out2 ;
460531
532+ atomic64_inc (& adap -> chcr_stats .ktls_tx_connection_open );
461533 return 0 ;
462534out2 :
463535 kvfree (tx_info );
464536out :
537+ atomic64_inc (& adap -> chcr_stats .ktls_tx_connection_fail );
465538 return ret ;
466539}
467540
@@ -729,6 +802,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
729802 TCB_SND_UNA_RAW_V
730803 (TCB_SND_UNA_RAW_M ),
731804 TCB_SND_UNA_RAW_V (0 ), 0 );
805+ atomic64_inc (& tx_info -> adap -> chcr_stats .ktls_tx_ooo );
732806 cpl ++ ;
733807 }
734808 /* update ack */
@@ -1152,6 +1226,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
11521226
11531227 chcr_txq_advance (& q -> q , ndesc );
11541228 cxgb4_ring_tx_db (adap , & q -> q , ndesc );
1229+ atomic64_inc (& adap -> chcr_stats .ktls_tx_send_records );
11551230
11561231 return 0 ;
11571232}
@@ -1562,6 +1637,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
15621637 /* check if it is a complete record */
15631638 if (tls_end_offset == record -> len ) {
15641639 nskb = skb ;
1640+ atomic64_inc (& tx_info -> adap -> chcr_stats .ktls_tx_complete_pkts );
15651641 } else {
15661642 dev_kfree_skb_any (skb );
15671643
@@ -1579,6 +1655,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
15791655 */
15801656 if (chcr_ktls_update_snd_una (tx_info , q ))
15811657 goto out ;
1658+ atomic64_inc (& tx_info -> adap -> chcr_stats .ktls_tx_end_pkts );
15821659 }
15831660
15841661 if (chcr_ktls_xmit_wr_complete (nskb , tx_info , q , tcp_seq ,
@@ -1649,6 +1726,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
16491726 /* free the last trimmed portion */
16501727 dev_kfree_skb_any (skb );
16511728 skb = tmp_skb ;
1729+ atomic64_inc (& tx_info -> adap -> chcr_stats .ktls_tx_trimmed_pkts );
16521730 }
16531731 data_len = skb -> data_len ;
16541732 /* check if the middle record's start point is 16 byte aligned. CTR
@@ -1720,6 +1798,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
17201798 */
17211799 if (chcr_ktls_update_snd_una (tx_info , q ))
17221800 goto out ;
1801+ atomic64_inc (& tx_info -> adap -> chcr_stats .ktls_tx_middle_pkts );
17231802 } else {
17241803 /* Else means, its a partial first part of the record. Check if
17251804 * its only the header, don't need to send for encryption then.
@@ -1734,6 +1813,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
17341813 }
17351814 return 0 ;
17361815 }
1816+ atomic64_inc (& tx_info -> adap -> chcr_stats .ktls_tx_start_pkts );
17371817 }
17381818
17391819 if (chcr_ktls_xmit_wr_short (skb , tx_info , q , tcp_seq , tcp_push_no_fin ,
@@ -1755,6 +1835,7 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
17551835 struct tcphdr * th = tcp_hdr (skb );
17561836 int data_len , qidx , ret = 0 , mss ;
17571837 struct tls_record_info * record ;
1838+ struct chcr_stats_debug * stats ;
17581839 struct chcr_ktls_info * tx_info ;
17591840 u32 tls_end_offset , tcp_seq ;
17601841 struct tls_context * tls_ctx ;
@@ -1800,6 +1881,8 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
18001881 return NETDEV_TX_BUSY ;
18011882
18021883 adap = tx_info -> adap ;
1884+ stats = & adap -> chcr_stats ;
1885+
18031886 qidx = skb -> queue_mapping ;
18041887 q = & adap -> sge .ethtxq [qidx + tx_info -> first_qset ];
18051888 cxgb4_reclaim_completed_tx (adap , & q -> q , true);
@@ -1829,6 +1912,7 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
18291912 * part of the record is received. Incase of partial end part of record,
18301913 * we will send the complete record again.
18311914 */
1915+
18321916 do {
18331917 int i ;
18341918
@@ -1843,11 +1927,13 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
18431927 */
18441928 if (unlikely (!record )) {
18451929 spin_unlock_irqrestore (& tx_ctx -> base .lock , flags );
1930+ atomic64_inc (& stats -> ktls_tx_drop_no_sync_data );
18461931 goto out ;
18471932 }
18481933
18491934 if (unlikely (tls_record_is_start_marker (record ))) {
18501935 spin_unlock_irqrestore (& tx_ctx -> base .lock , flags );
1936+ atomic64_inc (& stats -> ktls_tx_skip_no_sync_data );
18511937 goto out ;
18521938 }
18531939
@@ -1918,6 +2004,10 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
19182004 } while (data_len > 0 );
19192005
19202006 tx_info -> prev_seq = ntohl (th -> seq ) + skb -> data_len ;
2007+
2008+ atomic64_inc (& stats -> ktls_tx_encrypted_packets );
2009+ atomic64_add (skb -> data_len , & stats -> ktls_tx_encrypted_bytes );
2010+
19212011 /* tcp finish is set, send a separate tcp msg including all the options
19222012 * as well.
19232013 */
0 commit comments