Skip to content

Commit 45e5be8

Browse files
dhowellskuba-moo
authored andcommitted
tls/sw: Convert tls_sw_sendpage() to use MSG_SPLICE_PAGES
Convert tls_sw_sendpage() and tls_sw_sendpage_locked() to use sendmsg() with MSG_SPLICE_PAGES rather than directly splicing in the pages itself. [!] Note that tls_sw_sendpage_locked() appears to have the wrong locking upstream. I think the caller will only hold the socket lock, but it should hold tls_ctx->tx_lock too. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells <[email protected]> Reviewed-by: Jakub Kicinski <[email protected]> cc: Chuck Lever <[email protected]> cc: Boris Pismenny <[email protected]> cc: John Fastabend <[email protected]> cc: Jens Axboe <[email protected]> cc: Matthew Wilcox <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]>
1 parent fe1e81d commit 45e5be8

File tree

1 file changed

+35
-138
lines changed

1 file changed

+35
-138
lines changed

net/tls/tls_sw.c

Lines changed: 35 additions & 138 deletions
Original file line numberDiff line numberDiff line change
@@ -960,7 +960,8 @@ static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
960960
return 0;
961961
}
962962

963-
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
963+
static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
964+
size_t size)
964965
{
965966
long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
966967
struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -983,15 +984,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
983984
int ret = 0;
984985
int pending;
985986

986-
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
987-
MSG_CMSG_COMPAT | MSG_SPLICE_PAGES))
988-
return -EOPNOTSUPP;
989-
990-
ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
991-
if (ret)
992-
return ret;
993-
lock_sock(sk);
994-
995987
if (unlikely(msg->msg_controllen)) {
996988
ret = tls_process_cmsg(sk, msg, &record_type);
997989
if (ret) {
@@ -1192,10 +1184,27 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
11921184

11931185
send_end:
11941186
ret = sk_stream_error(sk, msg->msg_flags, ret);
1187+
return copied > 0 ? copied : ret;
1188+
}
11951189

1190+
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1191+
{
1192+
struct tls_context *tls_ctx = tls_get_ctx(sk);
1193+
int ret;
1194+
1195+
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1196+
MSG_CMSG_COMPAT | MSG_SPLICE_PAGES |
1197+
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1198+
return -EOPNOTSUPP;
1199+
1200+
ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1201+
if (ret)
1202+
return ret;
1203+
lock_sock(sk);
1204+
ret = tls_sw_sendmsg_locked(sk, msg, size);
11961205
release_sock(sk);
11971206
mutex_unlock(&tls_ctx->tx_lock);
1198-
return copied > 0 ? copied : ret;
1207+
return ret;
11991208
}
12001209

12011210
/*
@@ -1272,151 +1281,39 @@ void tls_sw_splice_eof(struct socket *sock)
12721281
mutex_unlock(&tls_ctx->tx_lock);
12731282
}
12741283

1275-
static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1276-
int offset, size_t size, int flags)
1277-
{
1278-
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1279-
struct tls_context *tls_ctx = tls_get_ctx(sk);
1280-
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1281-
struct tls_prot_info *prot = &tls_ctx->prot_info;
1282-
unsigned char record_type = TLS_RECORD_TYPE_DATA;
1283-
struct sk_msg *msg_pl;
1284-
struct tls_rec *rec;
1285-
int num_async = 0;
1286-
ssize_t copied = 0;
1287-
bool full_record;
1288-
int record_room;
1289-
int ret = 0;
1290-
bool eor;
1291-
1292-
eor = !(flags & MSG_SENDPAGE_NOTLAST);
1293-
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1294-
1295-
/* Call the sk_stream functions to manage the sndbuf mem. */
1296-
while (size > 0) {
1297-
size_t copy, required_size;
1298-
1299-
if (sk->sk_err) {
1300-
ret = -sk->sk_err;
1301-
goto sendpage_end;
1302-
}
1303-
1304-
if (ctx->open_rec)
1305-
rec = ctx->open_rec;
1306-
else
1307-
rec = ctx->open_rec = tls_get_rec(sk);
1308-
if (!rec) {
1309-
ret = -ENOMEM;
1310-
goto sendpage_end;
1311-
}
1312-
1313-
msg_pl = &rec->msg_plaintext;
1314-
1315-
full_record = false;
1316-
record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1317-
copy = size;
1318-
if (copy >= record_room) {
1319-
copy = record_room;
1320-
full_record = true;
1321-
}
1322-
1323-
required_size = msg_pl->sg.size + copy + prot->overhead_size;
1324-
1325-
if (!sk_stream_memory_free(sk))
1326-
goto wait_for_sndbuf;
1327-
alloc_payload:
1328-
ret = tls_alloc_encrypted_msg(sk, required_size);
1329-
if (ret) {
1330-
if (ret != -ENOSPC)
1331-
goto wait_for_memory;
1332-
1333-
/* Adjust copy according to the amount that was
1334-
* actually allocated. The difference is due
1335-
* to max sg elements limit
1336-
*/
1337-
copy -= required_size - msg_pl->sg.size;
1338-
full_record = true;
1339-
}
1340-
1341-
sk_msg_page_add(msg_pl, page, copy, offset);
1342-
sk_mem_charge(sk, copy);
1343-
1344-
offset += copy;
1345-
size -= copy;
1346-
copied += copy;
1347-
1348-
tls_ctx->pending_open_record_frags = true;
1349-
if (full_record || eor || sk_msg_full(msg_pl)) {
1350-
ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1351-
record_type, &copied, flags);
1352-
if (ret) {
1353-
if (ret == -EINPROGRESS)
1354-
num_async++;
1355-
else if (ret == -ENOMEM)
1356-
goto wait_for_memory;
1357-
else if (ret != -EAGAIN) {
1358-
if (ret == -ENOSPC)
1359-
ret = 0;
1360-
goto sendpage_end;
1361-
}
1362-
}
1363-
}
1364-
continue;
1365-
wait_for_sndbuf:
1366-
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1367-
wait_for_memory:
1368-
ret = sk_stream_wait_memory(sk, &timeo);
1369-
if (ret) {
1370-
if (ctx->open_rec)
1371-
tls_trim_both_msgs(sk, msg_pl->sg.size);
1372-
goto sendpage_end;
1373-
}
1374-
1375-
if (ctx->open_rec)
1376-
goto alloc_payload;
1377-
}
1378-
1379-
if (num_async) {
1380-
/* Transmit if any encryptions have completed */
1381-
if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1382-
cancel_delayed_work(&ctx->tx_work.work);
1383-
tls_tx_records(sk, flags);
1384-
}
1385-
}
1386-
sendpage_end:
1387-
ret = sk_stream_error(sk, flags, ret);
1388-
return copied > 0 ? copied : ret;
1389-
}
1390-
13911284
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
13921285
int offset, size_t size, int flags)
13931286
{
1287+
struct bio_vec bvec;
1288+
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
1289+
13941290
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
13951291
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
13961292
MSG_NO_SHARED_FRAGS))
13971293
return -EOPNOTSUPP;
1294+
if (flags & MSG_SENDPAGE_NOTLAST)
1295+
msg.msg_flags |= MSG_MORE;
13981296

1399-
return tls_sw_do_sendpage(sk, page, offset, size, flags);
1297+
bvec_set_page(&bvec, page, size, offset);
1298+
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
1299+
return tls_sw_sendmsg_locked(sk, &msg, size);
14001300
}
14011301

14021302
int tls_sw_sendpage(struct sock *sk, struct page *page,
14031303
int offset, size_t size, int flags)
14041304
{
1405-
struct tls_context *tls_ctx = tls_get_ctx(sk);
1406-
int ret;
1305+
struct bio_vec bvec;
1306+
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
14071307

14081308
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
14091309
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
14101310
return -EOPNOTSUPP;
1311+
if (flags & MSG_SENDPAGE_NOTLAST)
1312+
msg.msg_flags |= MSG_MORE;
14111313

1412-
ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1413-
if (ret)
1414-
return ret;
1415-
lock_sock(sk);
1416-
ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1417-
release_sock(sk);
1418-
mutex_unlock(&tls_ctx->tx_lock);
1419-
return ret;
1314+
bvec_set_page(&bvec, page, size, offset);
1315+
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
1316+
return tls_sw_sendmsg(sk, &msg, size);
14201317
}
14211318

14221319
static int

0 commit comments

Comments
 (0)