Skip to content

Commit b1c6264

Browse files
committed
io_uring: recycle provided buffers if request goes async
If we are using provided buffers, it's less than useful to have a buffer selected and pinned if a request needs to go async or arms poll for notification trigger on when we can process it. Recycle the buffer in those events, so we don't pin it for the duration of the request. Signed-off-by: Jens Axboe <[email protected]>
1 parent 2be2eb0 commit b1c6264

File tree

1 file changed

+36
-0
lines changed

1 file changed

+36
-0
lines changed

fs/io_uring.c

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,7 @@ struct io_buffer {
269269
__u64 addr;
270270
__u32 len;
271271
__u16 bid;
272+
__u16 bgid;
272273
};
273274

274275
struct io_restriction {
@@ -1351,6 +1352,36 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req,
13511352
return cflags;
13521353
}
13531354

1355+
static void io_kbuf_recycle(struct io_kiocb *req)
1356+
{
1357+
struct io_ring_ctx *ctx = req->ctx;
1358+
struct io_buffer *head, *buf;
1359+
1360+
if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
1361+
return;
1362+
1363+
lockdep_assert_held(&ctx->uring_lock);
1364+
1365+
buf = req->kbuf;
1366+
1367+
head = xa_load(&ctx->io_buffers, buf->bgid);
1368+
if (head) {
1369+
list_add(&buf->list, &head->list);
1370+
} else {
1371+
int ret;
1372+
1373+
INIT_LIST_HEAD(&buf->list);
1374+
1375+
/* if we fail, just leave buffer attached */
1376+
ret = xa_insert(&ctx->io_buffers, buf->bgid, buf, GFP_KERNEL);
1377+
if (unlikely(ret < 0))
1378+
return;
1379+
}
1380+
1381+
req->flags &= ~REQ_F_BUFFER_SELECTED;
1382+
req->kbuf = NULL;
1383+
}
1384+
13541385
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
13551386
bool cancel_all)
13561387
__must_hold(&req->ctx->timeout_lock)
@@ -4763,6 +4794,7 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
47634794
buf->addr = addr;
47644795
buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
47654796
buf->bid = bid;
4797+
buf->bgid = pbuf->bgid;
47664798
addr += pbuf->len;
47674799
bid++;
47684800
if (!*head) {
@@ -7395,8 +7427,12 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
73957427
* Queued up for async execution, worker will release
73967428
* submit reference when the iocb is actually submitted.
73977429
*/
7430+
io_kbuf_recycle(req);
73987431
io_queue_async_work(req, NULL);
73997432
break;
7433+
case IO_APOLL_OK:
7434+
io_kbuf_recycle(req);
7435+
break;
74007436
}
74017437

74027438
if (linked_timeout)

0 commit comments

Comments
 (0)