Skip to content

Commit b21432b

Browse files
committed
io_uring: pass in struct io_cancel_data consistently
In preparation for being able to not only key cancel off the user_data, pass in the io_cancel_data struct for the various functions that deal with request cancelation. Signed-off-by: Jens Axboe <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 98d3dcc commit b21432b

File tree

1 file changed

+44
-32
lines changed

1 file changed

+44
-32
lines changed

fs/io_uring.c

Lines changed: 44 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -988,6 +988,11 @@ struct io_defer_entry {
988988
u32 seq;
989989
};
990990

991+
struct io_cancel_data {
992+
struct io_ring_ctx *ctx;
993+
u64 data;
994+
};
995+
991996
struct io_op_def {
992997
/* needs req->file assigned */
993998
unsigned needs_file : 1;
@@ -6298,16 +6303,16 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
62986303
return found;
62996304
}
63006305

6301-
static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
6302-
bool poll_only)
6306+
static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
6307+
struct io_cancel_data *cd)
63036308
__must_hold(&ctx->completion_lock)
63046309
{
63056310
struct hlist_head *list;
63066311
struct io_kiocb *req;
63076312

6308-
list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
6313+
list = &ctx->cancel_hash[hash_long(cd->data, ctx->cancel_hash_bits)];
63096314
hlist_for_each_entry(req, list, hash_node) {
6310-
if (sqe_addr != req->cqe.user_data)
6315+
if (cd->data != req->cqe.user_data)
63116316
continue;
63126317
if (poll_only && req->opcode != IORING_OP_POLL_ADD)
63136318
continue;
@@ -6326,10 +6331,10 @@ static bool io_poll_disarm(struct io_kiocb *req)
63266331
return true;
63276332
}
63286333

6329-
static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
6334+
static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
63306335
__must_hold(&ctx->completion_lock)
63316336
{
6332-
struct io_kiocb *req = io_poll_find(ctx, sqe_addr, false);
6337+
struct io_kiocb *req = io_poll_find(ctx, false, cd);
63336338

63346339
if (!req)
63356340
return -ENOENT;
@@ -6421,13 +6426,14 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
64216426

64226427
static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
64236428
{
6429+
struct io_cancel_data cd = { .data = req->poll_update.old_user_data, };
64246430
struct io_ring_ctx *ctx = req->ctx;
64256431
struct io_kiocb *preq;
64266432
int ret2, ret = 0;
64276433
bool locked;
64286434

64296435
spin_lock(&ctx->completion_lock);
6430-
preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
6436+
preq = io_poll_find(ctx, true, &cd);
64316437
if (!preq || !io_poll_disarm(preq)) {
64326438
spin_unlock(&ctx->completion_lock);
64336439
ret = preq ? -EALREADY : -ENOENT;
@@ -6487,15 +6493,15 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
64876493
}
64886494

64896495
static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
6490-
__u64 user_data)
6496+
struct io_cancel_data *cd)
64916497
__must_hold(&ctx->timeout_lock)
64926498
{
64936499
struct io_timeout_data *io;
64946500
struct io_kiocb *req;
64956501
bool found = false;
64966502

64976503
list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
6498-
found = user_data == req->cqe.user_data;
6504+
found = cd->data == req->cqe.user_data;
64996505
if (found)
65006506
break;
65016507
}
@@ -6509,13 +6515,13 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
65096515
return req;
65106516
}
65116517

6512-
static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
6518+
static int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
65136519
__must_hold(&ctx->completion_lock)
65146520
{
65156521
struct io_kiocb *req;
65166522

65176523
spin_lock_irq(&ctx->timeout_lock);
6518-
req = io_timeout_extract(ctx, user_data);
6524+
req = io_timeout_extract(ctx, cd);
65196525
spin_unlock_irq(&ctx->timeout_lock);
65206526

65216527
if (IS_ERR(req))
@@ -6569,7 +6575,8 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
65696575
struct timespec64 *ts, enum hrtimer_mode mode)
65706576
__must_hold(&ctx->timeout_lock)
65716577
{
6572-
struct io_kiocb *req = io_timeout_extract(ctx, user_data);
6578+
struct io_cancel_data cd = { .data = user_data, };
6579+
struct io_kiocb *req = io_timeout_extract(ctx, &cd);
65736580
struct io_timeout_data *data;
65746581

65756582
if (IS_ERR(req))
@@ -6634,8 +6641,10 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
66346641
int ret;
66356642

66366643
if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) {
6644+
struct io_cancel_data cd = { .data = tr->addr, };
6645+
66376646
spin_lock(&ctx->completion_lock);
6638-
ret = io_timeout_cancel(ctx, tr->addr);
6647+
ret = io_timeout_cancel(ctx, &cd);
66396648
spin_unlock(&ctx->completion_lock);
66406649
} else {
66416650
enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
@@ -6763,30 +6772,24 @@ static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
67636772
return 0;
67646773
}
67656774

6766-
struct io_cancel_data {
6767-
struct io_ring_ctx *ctx;
6768-
u64 user_data;
6769-
};
6770-
67716775
static bool io_cancel_cb(struct io_wq_work *work, void *data)
67726776
{
67736777
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
67746778
struct io_cancel_data *cd = data;
67756779

6776-
return req->ctx == cd->ctx && req->cqe.user_data == cd->user_data;
6780+
return req->ctx == cd->ctx && req->cqe.user_data == cd->data;
67776781
}
67786782

6779-
static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
6780-
struct io_ring_ctx *ctx)
6783+
static int io_async_cancel_one(struct io_uring_task *tctx,
6784+
struct io_cancel_data *cd)
67816785
{
6782-
struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
67836786
enum io_wq_cancel cancel_ret;
67846787
int ret = 0;
67856788

67866789
if (!tctx || !tctx->io_wq)
67876790
return -ENOENT;
67886791

6789-
cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
6792+
cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, false);
67906793
switch (cancel_ret) {
67916794
case IO_WQ_CANCEL_OK:
67926795
ret = 0;
@@ -6802,14 +6805,14 @@ static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
68026805
return ret;
68036806
}
68046807

6805-
static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
6808+
static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
68066809
{
68076810
struct io_ring_ctx *ctx = req->ctx;
68086811
int ret;
68096812

68106813
WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
68116814

6812-
ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
6815+
ret = io_async_cancel_one(req->task->io_uring, cd);
68136816
/*
68146817
* Fall-through even for -EALREADY, as we may have poll armed
68156818
* that need unarming.
@@ -6818,10 +6821,10 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
68186821
return 0;
68196822

68206823
spin_lock(&ctx->completion_lock);
6821-
ret = io_poll_cancel(ctx, sqe_addr);
6824+
ret = io_poll_cancel(ctx, cd);
68226825
if (ret != -ENOENT)
68236826
goto out;
6824-
ret = io_timeout_cancel(ctx, sqe_addr);
6827+
ret = io_timeout_cancel(ctx, cd);
68256828
out:
68266829
spin_unlock(&ctx->completion_lock);
68276830
return ret;
@@ -6845,11 +6848,14 @@ static int io_async_cancel_prep(struct io_kiocb *req,
68456848
static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
68466849
{
68476850
struct io_ring_ctx *ctx = req->ctx;
6848-
u64 sqe_addr = req->cancel.addr;
6851+
struct io_cancel_data cd = {
6852+
.ctx = ctx,
6853+
.data = req->cancel.addr,
6854+
};
68496855
struct io_tctx_node *node;
68506856
int ret;
68516857

6852-
ret = io_try_cancel_userdata(req, sqe_addr);
6858+
ret = io_try_cancel(req, &cd);
68536859
if (ret != -ENOENT)
68546860
goto done;
68556861

@@ -6859,7 +6865,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
68596865
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
68606866
struct io_uring_task *tctx = node->task->io_uring;
68616867

6862-
ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
6868+
ret = io_async_cancel_one(tctx, &cd);
68636869
if (ret != -ENOENT)
68646870
break;
68656871
}
@@ -7455,8 +7461,14 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
74557461
int ret = -ENOENT;
74567462

74577463
if (prev) {
7458-
if (!(req->task->flags & PF_EXITING))
7459-
ret = io_try_cancel_userdata(req, prev->cqe.user_data);
7464+
if (!(req->task->flags & PF_EXITING)) {
7465+
struct io_cancel_data cd = {
7466+
.ctx = req->ctx,
7467+
.data = prev->cqe.user_data,
7468+
};
7469+
7470+
ret = io_try_cancel(req, &cd);
7471+
}
74607472
io_req_complete_post(req, ret ?: -ETIME, 0);
74617473
io_put_req(prev);
74627474
} else {

0 commit comments

Comments
 (0)