Skip to content

Commit 4d55f23

Browse files
committed
io_uring: don't recycle provided buffer if punted to async worker
We only really need to recycle the buffer when going async for a file type that has an indefinite reponse time (eg non-file/bdev). And for files that to arm poll, the async worker will arm poll anyway and the buffer will get recycled there. In that latter case, we're not holding ctx->uring_lock. Ensure we take the issue_flags into account and acquire it if we need to. Fixes: b1c6264 ("io_uring: recycle provided buffers if request goes async") Reported-by: Stefan Roesch <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent d89a4fa commit 4d55f23

File tree

1 file changed

+8
-3
lines changed

1 file changed

+8
-3
lines changed

fs/io_uring.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1383,7 +1383,7 @@ static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
13831383
return NULL;
13841384
}
13851385

1386-
static void io_kbuf_recycle(struct io_kiocb *req)
1386+
static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
13871387
{
13881388
struct io_ring_ctx *ctx = req->ctx;
13891389
struct io_buffer_list *bl;
@@ -1392,13 +1392,19 @@ static void io_kbuf_recycle(struct io_kiocb *req)
13921392
if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
13931393
return;
13941394

1395+
if (issue_flags & IO_URING_F_UNLOCKED)
1396+
mutex_lock(&ctx->uring_lock);
1397+
13951398
lockdep_assert_held(&ctx->uring_lock);
13961399

13971400
buf = req->kbuf;
13981401
bl = io_buffer_get_list(ctx, buf->bgid);
13991402
list_add(&buf->list, &bl->buf_list);
14001403
req->flags &= ~REQ_F_BUFFER_SELECTED;
14011404
req->kbuf = NULL;
1405+
1406+
if (issue_flags & IO_URING_F_UNLOCKED)
1407+
mutex_unlock(&ctx->uring_lock);
14021408
}
14031409

14041410
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
@@ -6254,7 +6260,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
62546260
req->flags |= REQ_F_POLLED;
62556261
ipt.pt._qproc = io_async_queue_proc;
62566262

6257-
io_kbuf_recycle(req);
6263+
io_kbuf_recycle(req, issue_flags);
62586264

62596265
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
62606266
if (ret || ipt.error)
@@ -7504,7 +7510,6 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
75047510
* Queued up for async execution, worker will release
75057511
* submit reference when the iocb is actually submitted.
75067512
*/
7507-
io_kbuf_recycle(req);
75087513
io_queue_async_work(req, NULL);
75097514
break;
75107515
case IO_APOLL_OK:

0 commit comments

Comments
 (0)