Skip to content

Commit b69de28

Browse files
committed
io_uring: allow events and user_data update of running poll requests
This adds two new POLL_ADD flags, IORING_POLL_UPDATE_EVENTS and IORING_POLL_UPDATE_USER_DATA. As with the other POLL_ADD flag, these are masked into sqe->len. If set, the POLL_ADD will have the following behavior: - sqe->addr must contain the the user_data of the poll request that needs to be modified. This field is otherwise invalid for a POLL_ADD command. - If IORING_POLL_UPDATE_EVENTS is set, sqe->poll_events must contain the new mask for the existing poll request. There are no checks for whether these are identical or not, if a matching poll request is found, then it is re-armed with the new mask. - If IORING_POLL_UPDATE_USER_DATA is set, sqe->off must contain the new user_data for the existing poll request. A POLL_ADD with any of these flags set may complete with any of the following results: 1) 0, which means that we successfully found the existing poll request specified, and performed the re-arm procedure. Any error from that re-arm will be exposed as a completion event for that original poll request, not for the update request. 2) -ENOENT, if no existing poll request was found with the given user_data. 3) -EALREADY, if the existing poll request was already in the process of being removed/canceled/completing. 4) -EACCES, if an attempt was made to modify an internal poll request (eg not one originally issued ass IORING_OP_POLL_ADD). The usual -EINVAL cases apply as well, if any invalid fields are set in the sqe for this command type. Signed-off-by: Jens Axboe <[email protected]>
1 parent b2cb805 commit b69de28

File tree

2 files changed

+92
-8
lines changed

2 files changed

+92
-8
lines changed

fs/io_uring.c

Lines changed: 87 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -486,7 +486,15 @@ struct io_poll_iocb {
486486
__poll_t events;
487487
bool done;
488488
bool canceled;
489-
struct wait_queue_entry wait;
489+
bool update_events;
490+
bool update_user_data;
491+
union {
492+
struct wait_queue_entry wait;
493+
struct {
494+
u64 old_user_data;
495+
u64 new_user_data;
496+
};
497+
};
490498
};
491499

492500
struct io_poll_remove {
@@ -4911,8 +4919,9 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
49114919
}
49124920
if (!error)
49134921
error = mangle_poll(mask);
4914-
if (!__io_cqring_fill_event(req, error, flags) ||
4915-
(req->poll.events & EPOLLONESHOT)) {
4922+
if (req->poll.events & EPOLLONESHOT)
4923+
flags = 0;
4924+
if (!__io_cqring_fill_event(req, error, flags)) {
49164925
io_poll_remove_waitqs(req);
49174926
req->poll.done = true;
49184927
flags = 0;
@@ -4992,6 +5001,7 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
49925001
poll->head = NULL;
49935002
poll->done = false;
49945003
poll->canceled = false;
5004+
poll->update_events = poll->update_user_data = false;
49955005
#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
49965006
/* mask in events that we always want/need */
49975007
poll->events = events | IO_POLL_UNMASK;
@@ -5370,24 +5380,36 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
53705380

53715381
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
53725382
return -EINVAL;
5373-
if (sqe->addr || sqe->ioprio || sqe->off || sqe->buf_index)
5383+
if (sqe->ioprio || sqe->buf_index)
53745384
return -EINVAL;
53755385
flags = READ_ONCE(sqe->len);
5376-
if (flags & ~IORING_POLL_ADD_MULTI)
5386+
if (flags & ~(IORING_POLL_ADD_MULTI | IORING_POLL_UPDATE_EVENTS |
5387+
IORING_POLL_UPDATE_USER_DATA))
53775388
return -EINVAL;
5378-
53795389
events = READ_ONCE(sqe->poll32_events);
53805390
#ifdef __BIG_ENDIAN
53815391
events = swahw32(events);
53825392
#endif
5383-
if (!flags)
5393+
if (!(flags & IORING_POLL_ADD_MULTI))
53845394
events |= EPOLLONESHOT;
5395+
poll->update_events = poll->update_user_data = false;
5396+
if (flags & IORING_POLL_UPDATE_EVENTS) {
5397+
poll->update_events = true;
5398+
poll->old_user_data = READ_ONCE(sqe->addr);
5399+
}
5400+
if (flags & IORING_POLL_UPDATE_USER_DATA) {
5401+
poll->update_user_data = true;
5402+
poll->new_user_data = READ_ONCE(sqe->off);
5403+
}
5404+
if (!(poll->update_events || poll->update_user_data) &&
5405+
(sqe->off || sqe->addr))
5406+
return -EINVAL;
53855407
poll->events = demangle_poll(events) |
53865408
(events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
53875409
return 0;
53885410
}
53895411

5390-
static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
5412+
static int __io_poll_add(struct io_kiocb *req)
53915413
{
53925414
struct io_poll_iocb *poll = &req->poll;
53935415
struct io_ring_ctx *ctx = req->ctx;
@@ -5413,6 +5435,63 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
54135435
return ipt.error;
54145436
}
54155437

5438+
static int io_poll_update(struct io_kiocb *req)
5439+
{
5440+
struct io_ring_ctx *ctx = req->ctx;
5441+
struct io_kiocb *preq;
5442+
int ret;
5443+
5444+
spin_lock_irq(&ctx->completion_lock);
5445+
preq = io_poll_find(ctx, req->poll.old_user_data);
5446+
if (!preq) {
5447+
ret = -ENOENT;
5448+
goto err;
5449+
} else if (preq->opcode != IORING_OP_POLL_ADD) {
5450+
/* don't allow internal poll updates */
5451+
ret = -EACCES;
5452+
goto err;
5453+
}
5454+
if (!__io_poll_remove_one(preq, &preq->poll)) {
5455+
/* in process of completing/removal */
5456+
ret = -EALREADY;
5457+
goto err;
5458+
}
5459+
/* we now have a detached poll request. reissue. */
5460+
ret = 0;
5461+
err:
5462+
spin_unlock_irq(&ctx->completion_lock);
5463+
if (ret < 0) {
5464+
req_set_fail_links(req);
5465+
io_req_complete(req, ret);
5466+
return 0;
5467+
}
5468+
/* only mask one event flags, keep behavior flags */
5469+
if (req->poll.update_events) {
5470+
preq->poll.events &= ~0xffff;
5471+
preq->poll.events |= req->poll.events & 0xffff;
5472+
preq->poll.events |= IO_POLL_UNMASK;
5473+
}
5474+
if (req->poll.update_user_data)
5475+
preq->user_data = req->poll.new_user_data;
5476+
5477+
/* complete update request, we're done with it */
5478+
io_req_complete(req, ret);
5479+
5480+
ret = __io_poll_add(preq);
5481+
if (ret < 0) {
5482+
req_set_fail_links(preq);
5483+
io_req_complete(preq, ret);
5484+
}
5485+
return 0;
5486+
}
5487+
5488+
static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
5489+
{
5490+
if (!req->poll.update_events && !req->poll.update_user_data)
5491+
return __io_poll_add(req);
5492+
return io_poll_update(req);
5493+
}
5494+
54165495
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
54175496
{
54185497
struct io_timeout_data *data = container_of(timer,

include/uapi/linux/io_uring.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,8 +166,13 @@ enum {
166166
* IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if
167167
* the poll handler will continue to report
168168
* CQEs on behalf of the same SQE.
169+
*
170+
* IORING_POLL_UPDATE Update existing poll request, matching
171+
* sqe->addr as the old user_data field.
169172
*/
170173
#define IORING_POLL_ADD_MULTI (1U << 0)
174+
#define IORING_POLL_UPDATE_EVENTS (1U << 1)
175+
#define IORING_POLL_UPDATE_USER_DATA (1U << 2)
171176

172177
/*
173178
* IO completion data structure (Completion Queue Entry)

0 commit comments

Comments
 (0)