Skip to content

Commit 9cae36a

Browse files
committed
io_uring: reinstate the inflight tracking
After some debugging, it was realized that we really do still need the old inflight tracking for any file type that has io_uring_fops assigned. If we don't, then trivial circular references will mean that we never get the ctx cleaned up and hence it'll leak. Just bring back the inflight tracking, which then also means we can eliminate the conditional dropping of the file when task_work is queued. Fixes: d536123 ("io_uring: drop the old style inflight file tracking") Signed-off-by: Jens Axboe <[email protected]>
1 parent 61c1b44 commit 9cae36a

File tree

1 file changed

+56
-26
lines changed

1 file changed

+56
-26
lines changed

fs/io_uring.c

Lines changed: 56 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,8 @@
112112
IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
113113

114114
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
115-
REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
115+
REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
116+
REQ_F_ASYNC_DATA)
116117

117118
#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
118119
IO_REQ_CLEAN_FLAGS)
@@ -540,6 +541,7 @@ struct io_uring_task {
540541
const struct io_ring_ctx *last;
541542
struct io_wq *io_wq;
542543
struct percpu_counter inflight;
544+
atomic_t inflight_tracked;
543545
atomic_t in_idle;
544546

545547
spinlock_t task_lock;
@@ -1356,8 +1358,6 @@ static void io_clean_op(struct io_kiocb *req);
13561358
static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
13571359
unsigned issue_flags);
13581360
static struct file *io_file_get_normal(struct io_kiocb *req, int fd);
1359-
static void io_drop_inflight_file(struct io_kiocb *req);
1360-
static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
13611361
static void io_queue_sqe(struct io_kiocb *req);
13621362
static void io_rsrc_put_work(struct work_struct *work);
13631363

@@ -1760,9 +1760,29 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
17601760
bool cancel_all)
17611761
__must_hold(&req->ctx->timeout_lock)
17621762
{
1763+
struct io_kiocb *req;
1764+
17631765
if (task && head->task != task)
17641766
return false;
1765-
return cancel_all;
1767+
if (cancel_all)
1768+
return true;
1769+
1770+
io_for_each_link(req, head) {
1771+
if (req->flags & REQ_F_INFLIGHT)
1772+
return true;
1773+
}
1774+
return false;
1775+
}
1776+
1777+
static bool io_match_linked(struct io_kiocb *head)
1778+
{
1779+
struct io_kiocb *req;
1780+
1781+
io_for_each_link(req, head) {
1782+
if (req->flags & REQ_F_INFLIGHT)
1783+
return true;
1784+
}
1785+
return false;
17661786
}
17671787

17681788
/*
@@ -1772,9 +1792,24 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
17721792
static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
17731793
bool cancel_all)
17741794
{
1795+
bool matched;
1796+
17751797
if (task && head->task != task)
17761798
return false;
1777-
return cancel_all;
1799+
if (cancel_all)
1800+
return true;
1801+
1802+
if (head->flags & REQ_F_LINK_TIMEOUT) {
1803+
struct io_ring_ctx *ctx = head->ctx;
1804+
1805+
/* protect against races with linked timeouts */
1806+
spin_lock_irq(&ctx->timeout_lock);
1807+
matched = io_match_linked(head);
1808+
spin_unlock_irq(&ctx->timeout_lock);
1809+
} else {
1810+
matched = io_match_linked(head);
1811+
}
1812+
return matched;
17781813
}
17791814

17801815
static inline bool req_has_async_data(struct io_kiocb *req)
@@ -1930,6 +1965,14 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
19301965
return req->flags & REQ_F_FIXED_FILE;
19311966
}
19321967

1968+
static inline void io_req_track_inflight(struct io_kiocb *req)
1969+
{
1970+
if (!(req->flags & REQ_F_INFLIGHT)) {
1971+
req->flags |= REQ_F_INFLIGHT;
1972+
atomic_inc(&current->io_uring->inflight_tracked);
1973+
}
1974+
}
1975+
19331976
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
19341977
{
19351978
if (WARN_ON_ONCE(!req->link))
@@ -2991,8 +3034,6 @@ static void __io_req_task_work_add(struct io_kiocb *req,
29913034
unsigned long flags;
29923035
bool running;
29933036

2994-
io_drop_inflight_file(req);
2995-
29963037
spin_lock_irqsave(&tctx->task_lock, flags);
29973038
wq_list_add_tail(&req->io_task_work.node, list);
29983039
running = tctx->task_running;
@@ -6914,10 +6955,6 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
69146955

69156956
if (!req->cqe.res) {
69166957
struct poll_table_struct pt = { ._key = req->apoll_events };
6917-
unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
6918-
6919-
if (unlikely(!io_assign_file(req, flags)))
6920-
return -EBADF;
69216958
req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
69226959
}
69236960

@@ -8325,6 +8362,11 @@ static void io_clean_op(struct io_kiocb *req)
83258362
kfree(req->apoll);
83268363
req->apoll = NULL;
83278364
}
8365+
if (req->flags & REQ_F_INFLIGHT) {
8366+
struct io_uring_task *tctx = req->task->io_uring;
8367+
8368+
atomic_dec(&tctx->inflight_tracked);
8369+
}
83288370
if (req->flags & REQ_F_CREDS)
83298371
put_cred(req->creds);
83308372
if (req->flags & REQ_F_ASYNC_DATA) {
@@ -8631,19 +8673,6 @@ static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
86318673
return file;
86328674
}
86338675

8634-
/*
8635-
* Drop the file for requeue operations. Only used of req->file is the
8636-
* io_uring descriptor itself.
8637-
*/
8638-
static void io_drop_inflight_file(struct io_kiocb *req)
8639-
{
8640-
if (unlikely(req->flags & REQ_F_INFLIGHT)) {
8641-
fput(req->file);
8642-
req->file = NULL;
8643-
req->flags &= ~REQ_F_INFLIGHT;
8644-
}
8645-
}
8646-
86478676
static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
86488677
{
86498678
struct file *file = fget(fd);
@@ -8652,7 +8681,7 @@ static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
86528681

86538682
/* we don't allow fixed io_uring files */
86548683
if (file && file->f_op == &io_uring_fops)
8655-
req->flags |= REQ_F_INFLIGHT;
8684+
io_req_track_inflight(req);
86568685
return file;
86578686
}
86588687

@@ -10416,6 +10445,7 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
1041610445
xa_init(&tctx->xa);
1041710446
init_waitqueue_head(&tctx->wait);
1041810447
atomic_set(&tctx->in_idle, 0);
10448+
atomic_set(&tctx->inflight_tracked, 0);
1041910449
task->io_uring = tctx;
1042010450
spin_lock_init(&tctx->task_lock);
1042110451
INIT_WQ_LIST(&tctx->task_list);
@@ -11647,7 +11677,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
1164711677
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
1164811678
{
1164911679
if (tracked)
11650-
return 0;
11680+
return atomic_read(&tctx->inflight_tracked);
1165111681
return percpu_counter_sum(&tctx->inflight);
1165211682
}
1165311683

0 commit comments

Comments
 (0)