Skip to content

Commit 675b453

Browse files
hreineckekeithbusch
authored andcommitted
nvmet-tcp: enable TLS handshake upcall
TLS handshake is handled in userspace with the netlink tls handshake protocol. The patch adds a function to start the TLS handshake upcall for any incoming network connections if the TCP TSAS sectype is set to 'tls1.3'. A config option NVME_TARGET_TCP_TLS selects whether the TLS handshake upcall should be compiled in. The patch also adds reference counting to struct nvmet_tcp_queue to ensure the queue is always valid when the the TLS handshake completes. Signed-off-by: Hannes Reinecke <[email protected]> Reviewed-by: Sagi Grimberg <[email protected]> Signed-off-by: Keith Busch <[email protected]>
1 parent eb39881 commit 675b453

File tree

4 files changed

+187
-4
lines changed

4 files changed

+187
-4
lines changed

drivers/nvme/target/Kconfig

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,21 @@ config NVME_TARGET_TCP
8484

8585
If unsure, say N.
8686

87+
config NVME_TARGET_TCP_TLS
88+
bool "NVMe over Fabrics TCP target TLS encryption support"
89+
depends on NVME_TARGET_TCP
90+
select NVME_COMMON
91+
select NVME_KEYRING
92+
select NET_HANDSHAKE
93+
select KEYS
94+
help
95+
Enables TLS encryption for the NVMe TCP target using the netlink handshake API.
96+
97+
The TLS handshake daemon is available at
98+
https://github.com/oracle/ktls-utils.
99+
100+
If unsure, say N.
101+
87102
config NVME_TARGET_AUTH
88103
bool "NVMe over Fabrics In-band Authentication support"
89104
depends on NVME_TARGET

drivers/nvme/target/configfs.c

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#ifdef CONFIG_NVME_TARGET_AUTH
1616
#include <linux/nvme-auth.h>
1717
#endif
18+
#include <linux/nvme-keyring.h>
1819
#include <crypto/hash.h>
1920
#include <crypto/kpp.h>
2021

@@ -396,6 +397,17 @@ static ssize_t nvmet_addr_tsas_store(struct config_item *item,
396397
return -EINVAL;
397398

398399
found:
400+
if (sectype == NVMF_TCP_SECTYPE_TLS13) {
401+
if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) {
402+
pr_err("TLS is not supported\n");
403+
return -EINVAL;
404+
}
405+
if (!port->keyring) {
406+
pr_err("TLS keyring not configured\n");
407+
return -EINVAL;
408+
}
409+
}
410+
399411
nvmet_port_init_tsas_tcp(port, sectype);
400412
/*
401413
* The TLS implementation currently does not support
@@ -1814,6 +1826,7 @@ static void nvmet_port_release(struct config_item *item)
18141826
flush_workqueue(nvmet_wq);
18151827
list_del(&port->global_entry);
18161828

1829+
key_put(port->keyring);
18171830
kfree(port->ana_state);
18181831
kfree(port);
18191832
}
@@ -1863,6 +1876,14 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
18631876
return ERR_PTR(-ENOMEM);
18641877
}
18651878

1879+
if (nvme_keyring_id()) {
1880+
port->keyring = key_lookup(nvme_keyring_id());
1881+
if (IS_ERR(port->keyring)) {
1882+
pr_warn("NVMe keyring not available, disabling TLS\n");
1883+
port->keyring = NULL;
1884+
}
1885+
}
1886+
18661887
for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
18671888
if (i == NVMET_DEFAULT_ANA_GRPID)
18681889
port->ana_state[1] = NVME_ANA_OPTIMIZED;

drivers/nvme/target/nvmet.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,7 @@ struct nvmet_port {
158158
struct config_group ana_groups_group;
159159
struct nvmet_ana_group ana_default_group;
160160
enum nvme_ana_state *ana_state;
161+
struct key *keyring;
161162
void *priv;
162163
bool enabled;
163164
int inline_data_size;

drivers/nvme/target/tcp.c

Lines changed: 150 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,13 @@
88
#include <linux/init.h>
99
#include <linux/slab.h>
1010
#include <linux/err.h>
11+
#include <linux/key.h>
1112
#include <linux/nvme-tcp.h>
13+
#include <linux/nvme-keyring.h>
1214
#include <net/sock.h>
1315
#include <net/tcp.h>
16+
#include <net/tls.h>
17+
#include <net/handshake.h>
1418
#include <linux/inet.h>
1519
#include <linux/llist.h>
1620
#include <crypto/hash.h>
@@ -66,6 +70,16 @@ device_param_cb(idle_poll_period_usecs, &set_param_ops,
6670
MODULE_PARM_DESC(idle_poll_period_usecs,
6771
"nvmet tcp io_work poll till idle time period in usecs: Default 0");
6872

73+
#ifdef CONFIG_NVME_TARGET_TCP_TLS
74+
/*
75+
* TLS handshake timeout
76+
*/
77+
static int tls_handshake_timeout = 10;
78+
module_param(tls_handshake_timeout, int, 0644);
79+
MODULE_PARM_DESC(tls_handshake_timeout,
80+
"nvme TLS handshake timeout in seconds (default 10)");
81+
#endif
82+
6983
#define NVMET_TCP_RECV_BUDGET 8
7084
#define NVMET_TCP_SEND_BUDGET 8
7185
#define NVMET_TCP_IO_WORK_BUDGET 64
@@ -122,8 +136,10 @@ struct nvmet_tcp_cmd {
122136

123137
enum nvmet_tcp_queue_state {
124138
NVMET_TCP_Q_CONNECTING,
139+
NVMET_TCP_Q_TLS_HANDSHAKE,
125140
NVMET_TCP_Q_LIVE,
126141
NVMET_TCP_Q_DISCONNECTING,
142+
NVMET_TCP_Q_FAILED,
127143
};
128144

129145
struct nvmet_tcp_queue {
@@ -132,6 +148,7 @@ struct nvmet_tcp_queue {
132148
struct work_struct io_work;
133149
struct nvmet_cq nvme_cq;
134150
struct nvmet_sq nvme_sq;
151+
struct kref kref;
135152

136153
/* send state */
137154
struct nvmet_tcp_cmd *cmds;
@@ -155,6 +172,10 @@ struct nvmet_tcp_queue {
155172
struct ahash_request *snd_hash;
156173
struct ahash_request *rcv_hash;
157174

175+
/* TLS state */
176+
key_serial_t tls_pskid;
177+
struct delayed_work tls_handshake_tmo_work;
178+
158179
unsigned long poll_end;
159180

160181
spinlock_t state_lock;
@@ -918,6 +939,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
918939
free_crypto:
919940
if (queue->hdr_digest || queue->data_digest)
920941
nvmet_tcp_free_crypto(queue);
942+
queue->state = NVMET_TCP_Q_FAILED;
921943
return ret;
922944
}
923945

@@ -1283,12 +1305,25 @@ static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
12831305
return ret;
12841306
}
12851307

1308+
static void nvmet_tcp_release_queue(struct kref *kref)
1309+
{
1310+
struct nvmet_tcp_queue *queue =
1311+
container_of(kref, struct nvmet_tcp_queue, kref);
1312+
1313+
WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING);
1314+
queue_work(nvmet_wq, &queue->release_work);
1315+
}
1316+
12861317
static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
12871318
{
12881319
spin_lock(&queue->state_lock);
1320+
if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1321+
/* Socket closed during handshake */
1322+
tls_handshake_cancel(queue->sock->sk);
1323+
}
12891324
if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
12901325
queue->state = NVMET_TCP_Q_DISCONNECTING;
1291-
queue_work(nvmet_wq, &queue->release_work);
1326+
kref_put(&queue->kref, nvmet_tcp_release_queue);
12921327
}
12931328
spin_unlock(&queue->state_lock);
12941329
}
@@ -1485,6 +1520,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
14851520
mutex_unlock(&nvmet_tcp_queue_mutex);
14861521

14871522
nvmet_tcp_restore_socket_callbacks(queue);
1523+
cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
14881524
cancel_work_sync(&queue->io_work);
14891525
/* stop accepting incoming data */
14901526
queue->rcv_state = NVMET_TCP_RECV_ERR;
@@ -1512,8 +1548,13 @@ static void nvmet_tcp_data_ready(struct sock *sk)
15121548

15131549
read_lock_bh(&sk->sk_callback_lock);
15141550
queue = sk->sk_user_data;
1515-
if (likely(queue))
1516-
queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1551+
if (likely(queue)) {
1552+
if (queue->data_ready)
1553+
queue->data_ready(sk);
1554+
if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)
1555+
queue_work_on(queue_cpu(queue), nvmet_tcp_wq,
1556+
&queue->io_work);
1557+
}
15171558
read_unlock_bh(&sk->sk_callback_lock);
15181559
}
15191560

@@ -1621,6 +1662,87 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
16211662
return ret;
16221663
}
16231664

1665+
#ifdef CONFIG_NVME_TARGET_TCP_TLS
1666+
static void nvmet_tcp_tls_handshake_done(void *data, int status,
1667+
key_serial_t peerid)
1668+
{
1669+
struct nvmet_tcp_queue *queue = data;
1670+
1671+
pr_debug("queue %d: TLS handshake done, key %x, status %d\n",
1672+
queue->idx, peerid, status);
1673+
spin_lock_bh(&queue->state_lock);
1674+
if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1675+
spin_unlock_bh(&queue->state_lock);
1676+
return;
1677+
}
1678+
if (!status) {
1679+
queue->tls_pskid = peerid;
1680+
queue->state = NVMET_TCP_Q_CONNECTING;
1681+
} else
1682+
queue->state = NVMET_TCP_Q_FAILED;
1683+
spin_unlock_bh(&queue->state_lock);
1684+
1685+
cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1686+
if (status)
1687+
nvmet_tcp_schedule_release_queue(queue);
1688+
else
1689+
nvmet_tcp_set_queue_sock(queue);
1690+
kref_put(&queue->kref, nvmet_tcp_release_queue);
1691+
}
1692+
1693+
static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w)
1694+
{
1695+
struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w),
1696+
struct nvmet_tcp_queue, tls_handshake_tmo_work);
1697+
1698+
pr_warn("queue %d: TLS handshake timeout\n", queue->idx);
1699+
/*
1700+
* If tls_handshake_cancel() fails we've lost the race with
1701+
* nvmet_tcp_tls_handshake_done() */
1702+
if (!tls_handshake_cancel(queue->sock->sk))
1703+
return;
1704+
spin_lock_bh(&queue->state_lock);
1705+
if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1706+
spin_unlock_bh(&queue->state_lock);
1707+
return;
1708+
}
1709+
queue->state = NVMET_TCP_Q_FAILED;
1710+
spin_unlock_bh(&queue->state_lock);
1711+
nvmet_tcp_schedule_release_queue(queue);
1712+
kref_put(&queue->kref, nvmet_tcp_release_queue);
1713+
}
1714+
1715+
static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
1716+
{
1717+
int ret = -EOPNOTSUPP;
1718+
struct tls_handshake_args args;
1719+
1720+
if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
1721+
pr_warn("cannot start TLS in state %d\n", queue->state);
1722+
return -EINVAL;
1723+
}
1724+
1725+
kref_get(&queue->kref);
1726+
pr_debug("queue %d: TLS ServerHello\n", queue->idx);
1727+
memset(&args, 0, sizeof(args));
1728+
args.ta_sock = queue->sock;
1729+
args.ta_done = nvmet_tcp_tls_handshake_done;
1730+
args.ta_data = queue;
1731+
args.ta_keyring = key_serial(queue->port->nport->keyring);
1732+
args.ta_timeout_ms = tls_handshake_timeout * 1000;
1733+
1734+
ret = tls_server_hello_psk(&args, GFP_KERNEL);
1735+
if (ret) {
1736+
kref_put(&queue->kref, nvmet_tcp_release_queue);
1737+
pr_err("failed to start TLS, err=%d\n", ret);
1738+
} else {
1739+
queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work,
1740+
tls_handshake_timeout * HZ);
1741+
}
1742+
return ret;
1743+
}
1744+
#endif
1745+
16241746
static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
16251747
struct socket *newsock)
16261748
{
@@ -1636,11 +1758,16 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
16361758

16371759
INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
16381760
INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1761+
kref_init(&queue->kref);
16391762
queue->sock = newsock;
16401763
queue->port = port;
16411764
queue->nr_cmds = 0;
16421765
spin_lock_init(&queue->state_lock);
1643-
queue->state = NVMET_TCP_Q_CONNECTING;
1766+
if (queue->port->nport->disc_addr.tsas.tcp.sectype ==
1767+
NVMF_TCP_SECTYPE_TLS13)
1768+
queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
1769+
else
1770+
queue->state = NVMET_TCP_Q_CONNECTING;
16441771
INIT_LIST_HEAD(&queue->free_list);
16451772
init_llist_head(&queue->resp_list);
16461773
INIT_LIST_HEAD(&queue->resp_send_list);
@@ -1671,6 +1798,25 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
16711798
list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
16721799
mutex_unlock(&nvmet_tcp_queue_mutex);
16731800

1801+
#ifdef CONFIG_NVME_TARGET_TCP_TLS
1802+
INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
1803+
nvmet_tcp_tls_handshake_timeout);
1804+
if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1805+
struct sock *sk = queue->sock->sk;
1806+
1807+
/* Restore the default callbacks before starting upcall */
1808+
read_lock_bh(&sk->sk_callback_lock);
1809+
sk->sk_user_data = NULL;
1810+
sk->sk_data_ready = port->data_ready;
1811+
read_unlock_bh(&sk->sk_callback_lock);
1812+
if (!nvmet_tcp_tls_handshake(queue))
1813+
return;
1814+
1815+
/* TLS handshake failed, terminate the connection */
1816+
goto out_destroy_sq;
1817+
}
1818+
#endif
1819+
16741820
ret = nvmet_tcp_set_queue_sock(queue);
16751821
if (ret)
16761822
goto out_destroy_sq;

0 commit comments

Comments
 (0)