@@ -361,9 +361,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
361361 struct tls_context * tls_ctx ,
362362 enum tls_offload_ctx_dir direction )
363363{
364- struct chcr_ktls_ofld_ctx_tx * tx_ctx =
365- chcr_get_ktls_tx_context (tls_ctx );
366- struct chcr_ktls_info * tx_info = tx_ctx -> chcr_info ;
364+ struct chcr_ktls_info * tx_info = chcr_get_ktls_tx_info (tls_ctx );
367365 struct ch_ktls_port_stats_debug * port_stats ;
368366 struct chcr_ktls_uld_ctx * u_ctx ;
369367
@@ -396,7 +394,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
396394 port_stats = & tx_info -> adap -> ch_ktls_stats .ktls_port [tx_info -> port_id ];
397395 atomic64_inc (& port_stats -> ktls_tx_connection_close );
398396 kvfree (tx_info );
399- tx_ctx -> chcr_info = NULL ;
397+ chcr_set_ktls_tx_info ( tls_ctx , NULL ) ;
400398 /* release module refcount */
401399 module_put (THIS_MODULE );
402400}
@@ -417,7 +415,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
417415{
418416 struct tls_context * tls_ctx = tls_get_ctx (sk );
419417 struct ch_ktls_port_stats_debug * port_stats ;
420- struct chcr_ktls_ofld_ctx_tx * tx_ctx ;
421418 struct chcr_ktls_uld_ctx * u_ctx ;
422419 struct chcr_ktls_info * tx_info ;
423420 struct dst_entry * dst ;
@@ -427,8 +424,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
427424 u8 daaddr [16 ];
428425 int ret = -1 ;
429426
430- tx_ctx = chcr_get_ktls_tx_context (tls_ctx );
431-
432427 pi = netdev_priv (netdev );
433428 adap = pi -> adapter ;
434429 port_stats = & adap -> ch_ktls_stats .ktls_port [pi -> port_id ];
@@ -440,7 +435,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
440435 goto out ;
441436 }
442437
443- if (tx_ctx -> chcr_info )
438+ if (chcr_get_ktls_tx_info ( tls_ctx ) )
444439 goto out ;
445440
446441 if (u_ctx && u_ctx -> detach )
@@ -566,7 +561,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
566561 goto free_tid ;
567562
568563 atomic64_inc (& port_stats -> ktls_tx_ctx );
569- tx_ctx -> chcr_info = tx_info ;
564+ chcr_set_ktls_tx_info ( tls_ctx , tx_info ) ;
570565
571566 return 0 ;
572567
@@ -647,7 +642,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
647642{
648643 const struct cpl_act_open_rpl * p = (void * )input ;
649644 struct chcr_ktls_info * tx_info = NULL ;
650- struct chcr_ktls_ofld_ctx_tx * tx_ctx ;
645+ struct tls_offload_context_tx * tx_ctx ;
651646 struct chcr_ktls_uld_ctx * u_ctx ;
652647 unsigned int atid , tid , status ;
653648 struct tls_context * tls_ctx ;
@@ -686,7 +681,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
686681 cxgb4_insert_tid (t , tx_info , tx_info -> tid , tx_info -> ip_family );
687682 /* Adding tid */
688683 tls_ctx = tls_get_ctx (tx_info -> sk );
689- tx_ctx = chcr_get_ktls_tx_context (tls_ctx );
684+ tx_ctx = tls_offload_ctx_tx (tls_ctx );
690685 u_ctx = adap -> uld [CXGB4_ULD_KTLS ].handle ;
691686 if (u_ctx ) {
692687 ret = xa_insert_bh (& u_ctx -> tid_list , tid , tx_ctx ,
@@ -1926,7 +1921,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
19261921{
19271922 u32 tls_end_offset , tcp_seq , skb_data_len , skb_offset ;
19281923 struct ch_ktls_port_stats_debug * port_stats ;
1929- struct chcr_ktls_ofld_ctx_tx * tx_ctx ;
1924+ struct tls_offload_context_tx * tx_ctx ;
19301925 struct ch_ktls_stats_debug * stats ;
19311926 struct tcphdr * th = tcp_hdr (skb );
19321927 int data_len , qidx , ret = 0 , mss ;
@@ -1946,16 +1941,16 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
19461941 mss = skb_is_gso (skb ) ? skb_shinfo (skb )-> gso_size : data_len ;
19471942
19481943 tls_ctx = tls_get_ctx (skb -> sk );
1944+ tx_ctx = tls_offload_ctx_tx (tls_ctx );
19491945 tls_netdev = rcu_dereference_bh (tls_ctx -> netdev );
19501946 /* Don't quit on NULL: if tls_device_down is running in parallel,
1951- * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
1947+ * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
19521948 * true. Rather continue processing this packet.
19531949 */
19541950 if (unlikely (tls_netdev && tls_netdev != dev ))
19551951 goto out ;
19561952
1957- tx_ctx = chcr_get_ktls_tx_context (tls_ctx );
1958- tx_info = tx_ctx -> chcr_info ;
1953+ tx_info = chcr_get_ktls_tx_info (tls_ctx );
19591954
19601955 if (unlikely (!tx_info ))
19611956 goto out ;
@@ -1981,19 +1976,19 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
19811976 * we will send the complete record again.
19821977 */
19831978
1984- spin_lock_irqsave (& tx_ctx -> base . lock , flags );
1979+ spin_lock_irqsave (& tx_ctx -> lock , flags );
19851980
19861981 do {
19871982
19881983 cxgb4_reclaim_completed_tx (adap , & q -> q , true);
19891984 /* fetch the tls record */
1990- record = tls_get_record (& tx_ctx -> base , tcp_seq ,
1985+ record = tls_get_record (tx_ctx , tcp_seq ,
19911986 & tx_info -> record_no );
19921987 /* By the time packet reached to us, ACK is received, and record
19931988 * won't be found in that case, handle it gracefully.
19941989 */
19951990 if (unlikely (!record )) {
1996- spin_unlock_irqrestore (& tx_ctx -> base . lock , flags );
1991+ spin_unlock_irqrestore (& tx_ctx -> lock , flags );
19971992 atomic64_inc (& port_stats -> ktls_tx_drop_no_sync_data );
19981993 goto out ;
19991994 }
@@ -2017,7 +2012,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
20172012 tls_end_offset !=
20182013 record -> len );
20192014 if (ret ) {
2020- spin_unlock_irqrestore (& tx_ctx -> base . lock ,
2015+ spin_unlock_irqrestore (& tx_ctx -> lock ,
20212016 flags );
20222017 goto out ;
20232018 }
@@ -2048,7 +2043,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
20482043 /* free the refcount taken earlier */
20492044 if (tls_end_offset < data_len )
20502045 dev_kfree_skb_any (skb );
2051- spin_unlock_irqrestore (& tx_ctx -> base . lock , flags );
2046+ spin_unlock_irqrestore (& tx_ctx -> lock , flags );
20522047 goto out ;
20532048 }
20542049
@@ -2084,7 +2079,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
20842079
20852080 /* if any failure, come out from the loop. */
20862081 if (ret ) {
2087- spin_unlock_irqrestore (& tx_ctx -> base . lock , flags );
2082+ spin_unlock_irqrestore (& tx_ctx -> lock , flags );
20882083 if (th -> fin )
20892084 dev_kfree_skb_any (skb );
20902085
@@ -2099,7 +2094,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
20992094
21002095 } while (data_len > 0 );
21012096
2102- spin_unlock_irqrestore (& tx_ctx -> base . lock , flags );
2097+ spin_unlock_irqrestore (& tx_ctx -> lock , flags );
21032098 atomic64_inc (& port_stats -> ktls_tx_encrypted_packets );
21042099 atomic64_add (skb_data_len , & port_stats -> ktls_tx_encrypted_bytes );
21052100
@@ -2187,17 +2182,17 @@ static void clear_conn_resources(struct chcr_ktls_info *tx_info)
21872182static void ch_ktls_reset_all_conn (struct chcr_ktls_uld_ctx * u_ctx )
21882183{
21892184 struct ch_ktls_port_stats_debug * port_stats ;
2190- struct chcr_ktls_ofld_ctx_tx * tx_ctx ;
2185+ struct tls_offload_context_tx * tx_ctx ;
21912186 struct chcr_ktls_info * tx_info ;
21922187 unsigned long index ;
21932188
21942189 xa_for_each (& u_ctx -> tid_list , index , tx_ctx ) {
2195- tx_info = tx_ctx -> chcr_info ;
2190+ tx_info = __chcr_get_ktls_tx_info ( tx_ctx ) ;
21962191 clear_conn_resources (tx_info );
21972192 port_stats = & tx_info -> adap -> ch_ktls_stats .ktls_port [tx_info -> port_id ];
21982193 atomic64_inc (& port_stats -> ktls_tx_connection_close );
21992194 kvfree (tx_info );
2200- tx_ctx -> chcr_info = NULL ;
2195+ memset ( tx_ctx -> driver_state , 0 , TLS_DRIVER_STATE_SIZE_TX ) ;
22012196 /* release module refcount */
22022197 module_put (THIS_MODULE );
22032198 }
0 commit comments