@@ -960,7 +960,8 @@ static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
960
960
return 0 ;
961
961
}
962
962
963
- int tls_sw_sendmsg (struct sock * sk , struct msghdr * msg , size_t size )
963
+ static int tls_sw_sendmsg_locked (struct sock * sk , struct msghdr * msg ,
964
+ size_t size )
964
965
{
965
966
long timeo = sock_sndtimeo (sk , msg -> msg_flags & MSG_DONTWAIT );
966
967
struct tls_context * tls_ctx = tls_get_ctx (sk );
@@ -983,15 +984,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
983
984
int ret = 0 ;
984
985
int pending ;
985
986
986
- if (msg -> msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
987
- MSG_CMSG_COMPAT | MSG_SPLICE_PAGES ))
988
- return - EOPNOTSUPP ;
989
-
990
- ret = mutex_lock_interruptible (& tls_ctx -> tx_lock );
991
- if (ret )
992
- return ret ;
993
- lock_sock (sk );
994
-
995
987
if (unlikely (msg -> msg_controllen )) {
996
988
ret = tls_process_cmsg (sk , msg , & record_type );
997
989
if (ret ) {
@@ -1192,10 +1184,27 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1192
1184
1193
1185
send_end :
1194
1186
ret = sk_stream_error (sk , msg -> msg_flags , ret );
1187
+ return copied > 0 ? copied : ret ;
1188
+ }
1195
1189
1190
+ int tls_sw_sendmsg (struct sock * sk , struct msghdr * msg , size_t size )
1191
+ {
1192
+ struct tls_context * tls_ctx = tls_get_ctx (sk );
1193
+ int ret ;
1194
+
1195
+ if (msg -> msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1196
+ MSG_CMSG_COMPAT | MSG_SPLICE_PAGES |
1197
+ MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY ))
1198
+ return - EOPNOTSUPP ;
1199
+
1200
+ ret = mutex_lock_interruptible (& tls_ctx -> tx_lock );
1201
+ if (ret )
1202
+ return ret ;
1203
+ lock_sock (sk );
1204
+ ret = tls_sw_sendmsg_locked (sk , msg , size );
1196
1205
release_sock (sk );
1197
1206
mutex_unlock (& tls_ctx -> tx_lock );
1198
- return copied > 0 ? copied : ret ;
1207
+ return ret ;
1199
1208
}
1200
1209
1201
1210
/*
@@ -1272,151 +1281,39 @@ void tls_sw_splice_eof(struct socket *sock)
1272
1281
mutex_unlock (& tls_ctx -> tx_lock );
1273
1282
}
1274
1283
1275
- static int tls_sw_do_sendpage (struct sock * sk , struct page * page ,
1276
- int offset , size_t size , int flags )
1277
- {
1278
- long timeo = sock_sndtimeo (sk , flags & MSG_DONTWAIT );
1279
- struct tls_context * tls_ctx = tls_get_ctx (sk );
1280
- struct tls_sw_context_tx * ctx = tls_sw_ctx_tx (tls_ctx );
1281
- struct tls_prot_info * prot = & tls_ctx -> prot_info ;
1282
- unsigned char record_type = TLS_RECORD_TYPE_DATA ;
1283
- struct sk_msg * msg_pl ;
1284
- struct tls_rec * rec ;
1285
- int num_async = 0 ;
1286
- ssize_t copied = 0 ;
1287
- bool full_record ;
1288
- int record_room ;
1289
- int ret = 0 ;
1290
- bool eor ;
1291
-
1292
- eor = !(flags & MSG_SENDPAGE_NOTLAST );
1293
- sk_clear_bit (SOCKWQ_ASYNC_NOSPACE , sk );
1294
-
1295
- /* Call the sk_stream functions to manage the sndbuf mem. */
1296
- while (size > 0 ) {
1297
- size_t copy , required_size ;
1298
-
1299
- if (sk -> sk_err ) {
1300
- ret = - sk -> sk_err ;
1301
- goto sendpage_end ;
1302
- }
1303
-
1304
- if (ctx -> open_rec )
1305
- rec = ctx -> open_rec ;
1306
- else
1307
- rec = ctx -> open_rec = tls_get_rec (sk );
1308
- if (!rec ) {
1309
- ret = - ENOMEM ;
1310
- goto sendpage_end ;
1311
- }
1312
-
1313
- msg_pl = & rec -> msg_plaintext ;
1314
-
1315
- full_record = false;
1316
- record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl -> sg .size ;
1317
- copy = size ;
1318
- if (copy >= record_room ) {
1319
- copy = record_room ;
1320
- full_record = true;
1321
- }
1322
-
1323
- required_size = msg_pl -> sg .size + copy + prot -> overhead_size ;
1324
-
1325
- if (!sk_stream_memory_free (sk ))
1326
- goto wait_for_sndbuf ;
1327
- alloc_payload :
1328
- ret = tls_alloc_encrypted_msg (sk , required_size );
1329
- if (ret ) {
1330
- if (ret != - ENOSPC )
1331
- goto wait_for_memory ;
1332
-
1333
- /* Adjust copy according to the amount that was
1334
- * actually allocated. The difference is due
1335
- * to max sg elements limit
1336
- */
1337
- copy -= required_size - msg_pl -> sg .size ;
1338
- full_record = true;
1339
- }
1340
-
1341
- sk_msg_page_add (msg_pl , page , copy , offset );
1342
- sk_mem_charge (sk , copy );
1343
-
1344
- offset += copy ;
1345
- size -= copy ;
1346
- copied += copy ;
1347
-
1348
- tls_ctx -> pending_open_record_frags = true;
1349
- if (full_record || eor || sk_msg_full (msg_pl )) {
1350
- ret = bpf_exec_tx_verdict (msg_pl , sk , full_record ,
1351
- record_type , & copied , flags );
1352
- if (ret ) {
1353
- if (ret == - EINPROGRESS )
1354
- num_async ++ ;
1355
- else if (ret == - ENOMEM )
1356
- goto wait_for_memory ;
1357
- else if (ret != - EAGAIN ) {
1358
- if (ret == - ENOSPC )
1359
- ret = 0 ;
1360
- goto sendpage_end ;
1361
- }
1362
- }
1363
- }
1364
- continue ;
1365
- wait_for_sndbuf :
1366
- set_bit (SOCK_NOSPACE , & sk -> sk_socket -> flags );
1367
- wait_for_memory :
1368
- ret = sk_stream_wait_memory (sk , & timeo );
1369
- if (ret ) {
1370
- if (ctx -> open_rec )
1371
- tls_trim_both_msgs (sk , msg_pl -> sg .size );
1372
- goto sendpage_end ;
1373
- }
1374
-
1375
- if (ctx -> open_rec )
1376
- goto alloc_payload ;
1377
- }
1378
-
1379
- if (num_async ) {
1380
- /* Transmit if any encryptions have completed */
1381
- if (test_and_clear_bit (BIT_TX_SCHEDULED , & ctx -> tx_bitmask )) {
1382
- cancel_delayed_work (& ctx -> tx_work .work );
1383
- tls_tx_records (sk , flags );
1384
- }
1385
- }
1386
- sendpage_end :
1387
- ret = sk_stream_error (sk , flags , ret );
1388
- return copied > 0 ? copied : ret ;
1389
- }
1390
-
1391
1284
int tls_sw_sendpage_locked (struct sock * sk , struct page * page ,
1392
1285
int offset , size_t size , int flags )
1393
1286
{
1287
+ struct bio_vec bvec ;
1288
+ struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES , };
1289
+
1394
1290
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1395
1291
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1396
1292
MSG_NO_SHARED_FRAGS ))
1397
1293
return - EOPNOTSUPP ;
1294
+ if (flags & MSG_SENDPAGE_NOTLAST )
1295
+ msg .msg_flags |= MSG_MORE ;
1398
1296
1399
- return tls_sw_do_sendpage (sk , page , offset , size , flags );
1297
+ bvec_set_page (& bvec , page , size , offset );
1298
+ iov_iter_bvec (& msg .msg_iter , ITER_SOURCE , & bvec , 1 , size );
1299
+ return tls_sw_sendmsg_locked (sk , & msg , size );
1400
1300
}
1401
1301
1402
1302
int tls_sw_sendpage (struct sock * sk , struct page * page ,
1403
1303
int offset , size_t size , int flags )
1404
1304
{
1405
- struct tls_context * tls_ctx = tls_get_ctx ( sk ) ;
1406
- int ret ;
1305
+ struct bio_vec bvec ;
1306
+ struct msghdr msg = { . msg_flags = flags | MSG_SPLICE_PAGES , } ;
1407
1307
1408
1308
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1409
1309
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY ))
1410
1310
return - EOPNOTSUPP ;
1311
+ if (flags & MSG_SENDPAGE_NOTLAST )
1312
+ msg .msg_flags |= MSG_MORE ;
1411
1313
1412
- ret = mutex_lock_interruptible (& tls_ctx -> tx_lock );
1413
- if (ret )
1414
- return ret ;
1415
- lock_sock (sk );
1416
- ret = tls_sw_do_sendpage (sk , page , offset , size , flags );
1417
- release_sock (sk );
1418
- mutex_unlock (& tls_ctx -> tx_lock );
1419
- return ret ;
1314
+ bvec_set_page (& bvec , page , size , offset );
1315
+ iov_iter_bvec (& msg .msg_iter , ITER_SOURCE , & bvec , 1 , size );
1316
+ return tls_sw_sendmsg (sk , & msg , size );
1420
1317
}
1421
1318
1422
1319
static int
0 commit comments