@@ -403,6 +403,7 @@ struct io_ring_ctx {
403403 */
404404 struct io_rsrc_node * rsrc_node ;
405405 int rsrc_cached_refs ;
406+ atomic_t cancel_seq ;
406407 struct io_file_table file_table ;
407408 unsigned nr_user_files ;
408409 unsigned nr_user_bufs ;
@@ -585,6 +586,7 @@ struct io_sync {
585586struct io_cancel {
586587 struct file * file ;
587588 u64 addr ;
589+ u32 flags ;
588590};
589591
590592struct io_timeout {
@@ -991,6 +993,8 @@ struct io_defer_entry {
991993struct io_cancel_data {
992994 struct io_ring_ctx * ctx ;
993995 u64 data ;
996+ u32 flags ;
997+ int seq ;
994998};
995999
9961000struct io_op_def {
@@ -1726,6 +1730,7 @@ static void io_prep_async_work(struct io_kiocb *req)
17261730
17271731 req -> work .list .next = NULL ;
17281732 req -> work .flags = 0 ;
1733+ req -> work .cancel_seq = atomic_read (& ctx -> cancel_seq );
17291734 if (req -> flags & REQ_F_FORCE_ASYNC )
17301735 req -> work .flags |= IO_WQ_WORK_CONCURRENT ;
17311736
@@ -6159,6 +6164,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
61596164 int v ;
61606165
61616166 INIT_HLIST_NODE (& req -> hash_node );
6167+ req -> work .cancel_seq = atomic_read (& ctx -> cancel_seq );
61626168 io_init_poll_iocb (poll , mask , io_poll_wake );
61636169 poll -> file = req -> file ;
61646170
@@ -6316,6 +6322,11 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
63166322 continue ;
63176323 if (poll_only && req -> opcode != IORING_OP_POLL_ADD )
63186324 continue ;
6325+ if (cd -> flags & IORING_ASYNC_CANCEL_ALL ) {
6326+ if (cd -> seq == req -> work .cancel_seq )
6327+ continue ;
6328+ req -> work .cancel_seq = cd -> seq ;
6329+ }
63196330 return req ;
63206331 }
63216332 return NULL ;
@@ -6501,9 +6512,15 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
65016512 bool found = false;
65026513
65036514 list_for_each_entry (req , & ctx -> timeout_list , timeout .list ) {
6504- found = cd -> data == req -> cqe .user_data ;
6505- if (found )
6506- break ;
6515+ if (cd -> data != req -> cqe .user_data )
6516+ continue ;
6517+ if (cd -> flags & IORING_ASYNC_CANCEL_ALL ) {
6518+ if (cd -> seq == req -> work .cancel_seq )
6519+ continue ;
6520+ req -> work .cancel_seq = cd -> seq ;
6521+ }
6522+ found = true;
6523+ break ;
65076524 }
65086525 if (!found )
65096526 return ERR_PTR (- ENOENT );
@@ -6777,7 +6794,16 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data)
67776794 struct io_kiocb * req = container_of (work , struct io_kiocb , work );
67786795 struct io_cancel_data * cd = data ;
67796796
6780- return req -> ctx == cd -> ctx && req -> cqe .user_data == cd -> data ;
6797+ if (req -> ctx != cd -> ctx )
6798+ return false;
6799+ if (req -> cqe .user_data != cd -> data )
6800+ return false;
6801+ if (cd -> flags & IORING_ASYNC_CANCEL_ALL ) {
6802+ if (cd -> seq == req -> work .cancel_seq )
6803+ return false;
6804+ req -> work .cancel_seq = cd -> seq ;
6805+ }
6806+ return true;
67816807}
67826808
67836809static int io_async_cancel_one (struct io_uring_task * tctx ,
@@ -6789,7 +6815,8 @@ static int io_async_cancel_one(struct io_uring_task *tctx,
67896815 if (!tctx || !tctx -> io_wq )
67906816 return - ENOENT ;
67916817
6792- cancel_ret = io_wq_cancel_cb (tctx -> io_wq , io_cancel_cb , cd , false);
6818+ cancel_ret = io_wq_cancel_cb (tctx -> io_wq , io_cancel_cb , cd ,
6819+ cd -> flags & IORING_ASYNC_CANCEL_ALL );
67936820 switch (cancel_ret ) {
67946821 case IO_WQ_CANCEL_OK :
67956822 ret = 0 ;
@@ -6837,40 +6864,62 @@ static int io_async_cancel_prep(struct io_kiocb *req,
68376864 return - EINVAL ;
68386865 if (unlikely (req -> flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT )))
68396866 return - EINVAL ;
6840- if (sqe -> ioprio || sqe -> off || sqe -> len || sqe -> cancel_flags ||
6841- sqe -> splice_fd_in )
6867+ if (sqe -> ioprio || sqe -> off || sqe -> len || sqe -> splice_fd_in )
68426868 return - EINVAL ;
68436869
68446870 req -> cancel .addr = READ_ONCE (sqe -> addr );
6871+ req -> cancel .flags = READ_ONCE (sqe -> cancel_flags );
6872+ if (req -> cancel .flags & ~IORING_ASYNC_CANCEL_ALL )
6873+ return - EINVAL ;
6874+
68456875 return 0 ;
68466876}
68476877
6848- static int io_async_cancel (struct io_kiocb * req , unsigned int issue_flags )
6878+ static int __io_async_cancel (struct io_cancel_data * cd , struct io_kiocb * req ,
6879+ unsigned int issue_flags )
68496880{
6850- struct io_ring_ctx * ctx = req -> ctx ;
6851- struct io_cancel_data cd = {
6852- .ctx = ctx ,
6853- .data = req -> cancel .addr ,
6854- };
6881+ bool cancel_all = cd -> flags & IORING_ASYNC_CANCEL_ALL ;
6882+ struct io_ring_ctx * ctx = cd -> ctx ;
68556883 struct io_tctx_node * node ;
6856- int ret ;
6884+ int ret , nr = 0 ;
68576885
6858- ret = io_try_cancel (req , & cd );
6859- if (ret != - ENOENT )
6860- goto done ;
6886+ do {
6887+ ret = io_try_cancel (req , cd );
6888+ if (ret == - ENOENT )
6889+ break ;
6890+ if (!cancel_all )
6891+ return ret ;
6892+ nr ++ ;
6893+ } while (1 );
68616894
68626895 /* slow path, try all io-wq's */
68636896 io_ring_submit_lock (ctx , issue_flags );
68646897 ret = - ENOENT ;
68656898 list_for_each_entry (node , & ctx -> tctx_list , ctx_node ) {
68666899 struct io_uring_task * tctx = node -> task -> io_uring ;
68676900
6868- ret = io_async_cancel_one (tctx , & cd );
6869- if (ret != - ENOENT )
6870- break ;
6901+ ret = io_async_cancel_one (tctx , cd );
6902+ if (ret != - ENOENT ) {
6903+ if (!cancel_all )
6904+ break ;
6905+ nr ++ ;
6906+ }
68716907 }
68726908 io_ring_submit_unlock (ctx , issue_flags );
6873- done :
6909+ return cancel_all ? nr : ret ;
6910+ }
6911+
6912+ static int io_async_cancel (struct io_kiocb * req , unsigned int issue_flags )
6913+ {
6914+ struct io_cancel_data cd = {
6915+ .ctx = req -> ctx ,
6916+ .data = req -> cancel .addr ,
6917+ .flags = req -> cancel .flags ,
6918+ .seq = atomic_inc_return (& req -> ctx -> cancel_seq ),
6919+ };
6920+ int ret ;
6921+
6922+ ret = __io_async_cancel (& cd , req , issue_flags );
68746923 if (ret < 0 )
68756924 req_set_fail (req );
68766925 io_req_complete_post (req , ret , 0 );
0 commit comments