8
8
#include <linux/init.h>
9
9
#include <linux/slab.h>
10
10
#include <linux/err.h>
11
+ #include <linux/key.h>
11
12
#include <linux/nvme-tcp.h>
13
+ #include <linux/nvme-keyring.h>
12
14
#include <net/sock.h>
13
15
#include <net/tcp.h>
16
+ #include <net/tls.h>
17
+ #include <net/handshake.h>
14
18
#include <linux/inet.h>
15
19
#include <linux/llist.h>
16
20
#include <crypto/hash.h>
@@ -66,6 +70,16 @@ device_param_cb(idle_poll_period_usecs, &set_param_ops,
66
70
MODULE_PARM_DESC (idle_poll_period_usecs ,
67
71
"nvmet tcp io_work poll till idle time period in usecs: Default 0" );
68
72
73
+ #ifdef CONFIG_NVME_TARGET_TCP_TLS
74
+ /*
75
+ * TLS handshake timeout
76
+ */
77
+ static int tls_handshake_timeout = 10 ;
78
+ module_param (tls_handshake_timeout , int , 0644 );
79
+ MODULE_PARM_DESC (tls_handshake_timeout ,
80
+ "nvme TLS handshake timeout in seconds (default 10)" );
81
+ #endif
82
+
69
83
#define NVMET_TCP_RECV_BUDGET 8
70
84
#define NVMET_TCP_SEND_BUDGET 8
71
85
#define NVMET_TCP_IO_WORK_BUDGET 64
@@ -122,8 +136,10 @@ struct nvmet_tcp_cmd {
122
136
123
137
enum nvmet_tcp_queue_state {
124
138
NVMET_TCP_Q_CONNECTING ,
139
+ NVMET_TCP_Q_TLS_HANDSHAKE ,
125
140
NVMET_TCP_Q_LIVE ,
126
141
NVMET_TCP_Q_DISCONNECTING ,
142
+ NVMET_TCP_Q_FAILED ,
127
143
};
128
144
129
145
struct nvmet_tcp_queue {
@@ -132,6 +148,7 @@ struct nvmet_tcp_queue {
132
148
struct work_struct io_work ;
133
149
struct nvmet_cq nvme_cq ;
134
150
struct nvmet_sq nvme_sq ;
151
+ struct kref kref ;
135
152
136
153
/* send state */
137
154
struct nvmet_tcp_cmd * cmds ;
@@ -155,6 +172,10 @@ struct nvmet_tcp_queue {
155
172
struct ahash_request * snd_hash ;
156
173
struct ahash_request * rcv_hash ;
157
174
175
+ /* TLS state */
176
+ key_serial_t tls_pskid ;
177
+ struct delayed_work tls_handshake_tmo_work ;
178
+
158
179
unsigned long poll_end ;
159
180
160
181
spinlock_t state_lock ;
@@ -918,6 +939,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
918
939
free_crypto :
919
940
if (queue -> hdr_digest || queue -> data_digest )
920
941
nvmet_tcp_free_crypto (queue );
942
+ queue -> state = NVMET_TCP_Q_FAILED ;
921
943
return ret ;
922
944
}
923
945
@@ -1283,12 +1305,25 @@ static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1283
1305
return ret ;
1284
1306
}
1285
1307
1308
+ static void nvmet_tcp_release_queue (struct kref * kref )
1309
+ {
1310
+ struct nvmet_tcp_queue * queue =
1311
+ container_of (kref , struct nvmet_tcp_queue , kref );
1312
+
1313
+ WARN_ON (queue -> state != NVMET_TCP_Q_DISCONNECTING );
1314
+ queue_work (nvmet_wq , & queue -> release_work );
1315
+ }
1316
+
1286
1317
static void nvmet_tcp_schedule_release_queue (struct nvmet_tcp_queue * queue )
1287
1318
{
1288
1319
spin_lock (& queue -> state_lock );
1320
+ if (queue -> state == NVMET_TCP_Q_TLS_HANDSHAKE ) {
1321
+ /* Socket closed during handshake */
1322
+ tls_handshake_cancel (queue -> sock -> sk );
1323
+ }
1289
1324
if (queue -> state != NVMET_TCP_Q_DISCONNECTING ) {
1290
1325
queue -> state = NVMET_TCP_Q_DISCONNECTING ;
1291
- queue_work ( nvmet_wq , & queue -> release_work );
1326
+ kref_put ( & queue -> kref , nvmet_tcp_release_queue );
1292
1327
}
1293
1328
spin_unlock (& queue -> state_lock );
1294
1329
}
@@ -1485,6 +1520,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
1485
1520
mutex_unlock (& nvmet_tcp_queue_mutex );
1486
1521
1487
1522
nvmet_tcp_restore_socket_callbacks (queue );
1523
+ cancel_delayed_work_sync (& queue -> tls_handshake_tmo_work );
1488
1524
cancel_work_sync (& queue -> io_work );
1489
1525
/* stop accepting incoming data */
1490
1526
queue -> rcv_state = NVMET_TCP_RECV_ERR ;
@@ -1512,8 +1548,13 @@ static void nvmet_tcp_data_ready(struct sock *sk)
1512
1548
1513
1549
read_lock_bh (& sk -> sk_callback_lock );
1514
1550
queue = sk -> sk_user_data ;
1515
- if (likely (queue ))
1516
- queue_work_on (queue_cpu (queue ), nvmet_tcp_wq , & queue -> io_work );
1551
+ if (likely (queue )) {
1552
+ if (queue -> data_ready )
1553
+ queue -> data_ready (sk );
1554
+ if (queue -> state != NVMET_TCP_Q_TLS_HANDSHAKE )
1555
+ queue_work_on (queue_cpu (queue ), nvmet_tcp_wq ,
1556
+ & queue -> io_work );
1557
+ }
1517
1558
read_unlock_bh (& sk -> sk_callback_lock );
1518
1559
}
1519
1560
@@ -1621,6 +1662,87 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1621
1662
return ret ;
1622
1663
}
1623
1664
1665
+ #ifdef CONFIG_NVME_TARGET_TCP_TLS
1666
+ static void nvmet_tcp_tls_handshake_done (void * data , int status ,
1667
+ key_serial_t peerid )
1668
+ {
1669
+ struct nvmet_tcp_queue * queue = data ;
1670
+
1671
+ pr_debug ("queue %d: TLS handshake done, key %x, status %d\n" ,
1672
+ queue -> idx , peerid , status );
1673
+ spin_lock_bh (& queue -> state_lock );
1674
+ if (WARN_ON (queue -> state != NVMET_TCP_Q_TLS_HANDSHAKE )) {
1675
+ spin_unlock_bh (& queue -> state_lock );
1676
+ return ;
1677
+ }
1678
+ if (!status ) {
1679
+ queue -> tls_pskid = peerid ;
1680
+ queue -> state = NVMET_TCP_Q_CONNECTING ;
1681
+ } else
1682
+ queue -> state = NVMET_TCP_Q_FAILED ;
1683
+ spin_unlock_bh (& queue -> state_lock );
1684
+
1685
+ cancel_delayed_work_sync (& queue -> tls_handshake_tmo_work );
1686
+ if (status )
1687
+ nvmet_tcp_schedule_release_queue (queue );
1688
+ else
1689
+ nvmet_tcp_set_queue_sock (queue );
1690
+ kref_put (& queue -> kref , nvmet_tcp_release_queue );
1691
+ }
1692
+
1693
+ static void nvmet_tcp_tls_handshake_timeout (struct work_struct * w )
1694
+ {
1695
+ struct nvmet_tcp_queue * queue = container_of (to_delayed_work (w ),
1696
+ struct nvmet_tcp_queue , tls_handshake_tmo_work );
1697
+
1698
+ pr_warn ("queue %d: TLS handshake timeout\n" , queue -> idx );
1699
+ /*
1700
+ * If tls_handshake_cancel() fails we've lost the race with
1701
+ * nvmet_tcp_tls_handshake_done() */
1702
+ if (!tls_handshake_cancel (queue -> sock -> sk ))
1703
+ return ;
1704
+ spin_lock_bh (& queue -> state_lock );
1705
+ if (WARN_ON (queue -> state != NVMET_TCP_Q_TLS_HANDSHAKE )) {
1706
+ spin_unlock_bh (& queue -> state_lock );
1707
+ return ;
1708
+ }
1709
+ queue -> state = NVMET_TCP_Q_FAILED ;
1710
+ spin_unlock_bh (& queue -> state_lock );
1711
+ nvmet_tcp_schedule_release_queue (queue );
1712
+ kref_put (& queue -> kref , nvmet_tcp_release_queue );
1713
+ }
1714
+
1715
+ static int nvmet_tcp_tls_handshake (struct nvmet_tcp_queue * queue )
1716
+ {
1717
+ int ret = - EOPNOTSUPP ;
1718
+ struct tls_handshake_args args ;
1719
+
1720
+ if (queue -> state != NVMET_TCP_Q_TLS_HANDSHAKE ) {
1721
+ pr_warn ("cannot start TLS in state %d\n" , queue -> state );
1722
+ return - EINVAL ;
1723
+ }
1724
+
1725
+ kref_get (& queue -> kref );
1726
+ pr_debug ("queue %d: TLS ServerHello\n" , queue -> idx );
1727
+ memset (& args , 0 , sizeof (args ));
1728
+ args .ta_sock = queue -> sock ;
1729
+ args .ta_done = nvmet_tcp_tls_handshake_done ;
1730
+ args .ta_data = queue ;
1731
+ args .ta_keyring = key_serial (queue -> port -> nport -> keyring );
1732
+ args .ta_timeout_ms = tls_handshake_timeout * 1000 ;
1733
+
1734
+ ret = tls_server_hello_psk (& args , GFP_KERNEL );
1735
+ if (ret ) {
1736
+ kref_put (& queue -> kref , nvmet_tcp_release_queue );
1737
+ pr_err ("failed to start TLS, err=%d\n" , ret );
1738
+ } else {
1739
+ queue_delayed_work (nvmet_wq , & queue -> tls_handshake_tmo_work ,
1740
+ tls_handshake_timeout * HZ );
1741
+ }
1742
+ return ret ;
1743
+ }
1744
+ #endif
1745
+
1624
1746
static void nvmet_tcp_alloc_queue (struct nvmet_tcp_port * port ,
1625
1747
struct socket * newsock )
1626
1748
{
@@ -1636,11 +1758,16 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1636
1758
1637
1759
INIT_WORK (& queue -> release_work , nvmet_tcp_release_queue_work );
1638
1760
INIT_WORK (& queue -> io_work , nvmet_tcp_io_work );
1761
+ kref_init (& queue -> kref );
1639
1762
queue -> sock = newsock ;
1640
1763
queue -> port = port ;
1641
1764
queue -> nr_cmds = 0 ;
1642
1765
spin_lock_init (& queue -> state_lock );
1643
- queue -> state = NVMET_TCP_Q_CONNECTING ;
1766
+ if (queue -> port -> nport -> disc_addr .tsas .tcp .sectype ==
1767
+ NVMF_TCP_SECTYPE_TLS13 )
1768
+ queue -> state = NVMET_TCP_Q_TLS_HANDSHAKE ;
1769
+ else
1770
+ queue -> state = NVMET_TCP_Q_CONNECTING ;
1644
1771
INIT_LIST_HEAD (& queue -> free_list );
1645
1772
init_llist_head (& queue -> resp_list );
1646
1773
INIT_LIST_HEAD (& queue -> resp_send_list );
@@ -1671,6 +1798,25 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1671
1798
list_add_tail (& queue -> queue_list , & nvmet_tcp_queue_list );
1672
1799
mutex_unlock (& nvmet_tcp_queue_mutex );
1673
1800
1801
+ #ifdef CONFIG_NVME_TARGET_TCP_TLS
1802
+ INIT_DELAYED_WORK (& queue -> tls_handshake_tmo_work ,
1803
+ nvmet_tcp_tls_handshake_timeout );
1804
+ if (queue -> state == NVMET_TCP_Q_TLS_HANDSHAKE ) {
1805
+ struct sock * sk = queue -> sock -> sk ;
1806
+
1807
+ /* Restore the default callbacks before starting upcall */
1808
+ read_lock_bh (& sk -> sk_callback_lock );
1809
+ sk -> sk_user_data = NULL ;
1810
+ sk -> sk_data_ready = port -> data_ready ;
1811
+ read_unlock_bh (& sk -> sk_callback_lock );
1812
+ if (!nvmet_tcp_tls_handshake (queue ))
1813
+ return ;
1814
+
1815
+ /* TLS handshake failed, terminate the connection */
1816
+ goto out_destroy_sq ;
1817
+ }
1818
+ #endif
1819
+
1674
1820
ret = nvmet_tcp_set_queue_sock (queue );
1675
1821
if (ret )
1676
1822
goto out_destroy_sq ;
0 commit comments