@@ -502,6 +502,7 @@ struct io_poll_update {
502502struct io_close {
503503 struct file * file ;
504504 int fd ;
505+ u32 file_slot ;
505506};
506507
507508struct io_timeout_data {
@@ -1098,6 +1099,8 @@ static int io_req_prep_async(struct io_kiocb *req);
10981099
10991100static int io_install_fixed_file (struct io_kiocb * req , struct file * file ,
11001101 unsigned int issue_flags , u32 slot_index );
1102+ static int io_close_fixed (struct io_kiocb * req , unsigned int issue_flags );
1103+
11011104static enum hrtimer_restart io_link_timeout_fn (struct hrtimer * timer );
11021105
11031106static struct kmem_cache * req_cachep ;
@@ -3605,7 +3608,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
36053608 iov_iter_save_state (iter , state );
36063609 }
36073610 req -> result = iov_iter_count (iter );
3608- ret2 = 0 ;
36093611
36103612 /* Ensure we clear previously set non-block flag */
36113613 if (!force_nonblock )
@@ -3670,8 +3672,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
36703672 } else {
36713673copy_iov :
36723674 iov_iter_restore (iter , state );
3673- if (ret2 > 0 )
3674- iov_iter_advance (iter , ret2 );
36753675 ret = io_setup_async_rw (req , iovec , inline_vecs , iter , false);
36763676 return ret ?: - EAGAIN ;
36773677 }
@@ -4387,7 +4387,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
43874387 int i , bid = pbuf -> bid ;
43884388
43894389 for (i = 0 ; i < pbuf -> nbufs ; i ++ ) {
4390- buf = kmalloc (sizeof (* buf ), GFP_KERNEL );
4390+ buf = kmalloc (sizeof (* buf ), GFP_KERNEL_ACCOUNT );
43914391 if (!buf )
43924392 break ;
43934393
@@ -4594,12 +4594,16 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
45944594 if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
45954595 return - EINVAL ;
45964596 if (sqe -> ioprio || sqe -> off || sqe -> addr || sqe -> len ||
4597- sqe -> rw_flags || sqe -> buf_index || sqe -> splice_fd_in )
4597+ sqe -> rw_flags || sqe -> buf_index )
45984598 return - EINVAL ;
45994599 if (req -> flags & REQ_F_FIXED_FILE )
46004600 return - EBADF ;
46014601
46024602 req -> close .fd = READ_ONCE (sqe -> fd );
4603+ req -> close .file_slot = READ_ONCE (sqe -> file_index );
4604+ if (req -> close .file_slot && req -> close .fd )
4605+ return - EINVAL ;
4606+
46034607 return 0 ;
46044608}
46054609
@@ -4611,6 +4615,11 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
46114615 struct file * file = NULL ;
46124616 int ret = - EBADF ;
46134617
4618+ if (req -> close .file_slot ) {
4619+ ret = io_close_fixed (req , issue_flags );
4620+ goto err ;
4621+ }
4622+
46144623 spin_lock (& files -> file_lock );
46154624 fdt = files_fdtable (files );
46164625 if (close -> fd >= fdt -> max_fds ) {
@@ -5338,7 +5347,7 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
53385347 if (req -> poll .events & EPOLLONESHOT )
53395348 flags = 0 ;
53405349 if (!io_cqring_fill_event (ctx , req -> user_data , error , flags )) {
5341- req -> poll .done = true ;
5350+ req -> poll .events |= EPOLLONESHOT ;
53425351 flags = 0 ;
53435352 }
53445353 if (flags & IORING_CQE_F_MORE )
@@ -5367,10 +5376,15 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
53675376 } else {
53685377 bool done ;
53695378
5379+ if (req -> poll .done ) {
5380+ spin_unlock (& ctx -> completion_lock );
5381+ return ;
5382+ }
53705383 done = __io_poll_complete (req , req -> result );
53715384 if (done ) {
53725385 io_poll_remove_double (req );
53735386 hash_del (& req -> hash_node );
5387+ req -> poll .done = true;
53745388 } else {
53755389 req -> result = 0 ;
53765390 add_wait_queue (req -> poll .head , & req -> poll .wait );
@@ -5508,6 +5522,7 @@ static void io_async_task_func(struct io_kiocb *req, bool *locked)
55085522
55095523 hash_del (& req -> hash_node );
55105524 io_poll_remove_double (req );
5525+ apoll -> poll .done = true;
55115526 spin_unlock (& ctx -> completion_lock );
55125527
55135528 if (!READ_ONCE (apoll -> poll .canceled ))
@@ -5828,6 +5843,7 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
58285843 struct io_ring_ctx * ctx = req -> ctx ;
58295844 struct io_poll_table ipt ;
58305845 __poll_t mask ;
5846+ bool done ;
58315847
58325848 ipt .pt ._qproc = io_poll_queue_proc ;
58335849
@@ -5836,13 +5852,13 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
58365852
58375853 if (mask ) { /* no async, we'd stolen it */
58385854 ipt .error = 0 ;
5839- io_poll_complete (req , mask );
5855+ done = io_poll_complete (req , mask );
58405856 }
58415857 spin_unlock (& ctx -> completion_lock );
58425858
58435859 if (mask ) {
58445860 io_cqring_ev_posted (ctx );
5845- if (poll -> events & EPOLLONESHOT )
5861+ if (done )
58465862 io_put_req (req );
58475863 }
58485864 return ipt .error ;
@@ -6333,19 +6349,16 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
63336349 struct io_uring_rsrc_update2 up ;
63346350 int ret ;
63356351
6336- if (issue_flags & IO_URING_F_NONBLOCK )
6337- return - EAGAIN ;
6338-
63396352 up .offset = req -> rsrc_update .offset ;
63406353 up .data = req -> rsrc_update .arg ;
63416354 up .nr = 0 ;
63426355 up .tags = 0 ;
63436356 up .resv = 0 ;
63446357
6345- mutex_lock ( & ctx -> uring_lock );
6358+ io_ring_submit_lock ( ctx , !( issue_flags & IO_URING_F_NONBLOCK ) );
63466359 ret = __io_register_rsrc_update (ctx , IORING_RSRC_FILE ,
63476360 & up , req -> rsrc_update .nr_args );
6348- mutex_unlock ( & ctx -> uring_lock );
6361+ io_ring_submit_unlock ( ctx , !( issue_flags & IO_URING_F_NONBLOCK ) );
63496362
63506363 if (ret < 0 )
63516364 req_set_fail (req );
@@ -8400,6 +8413,44 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
84008413 return ret ;
84018414}
84028415
8416+ static int io_close_fixed (struct io_kiocb * req , unsigned int issue_flags )
8417+ {
8418+ unsigned int offset = req -> close .file_slot - 1 ;
8419+ struct io_ring_ctx * ctx = req -> ctx ;
8420+ struct io_fixed_file * file_slot ;
8421+ struct file * file ;
8422+ int ret , i ;
8423+
8424+ io_ring_submit_lock (ctx , !(issue_flags & IO_URING_F_NONBLOCK ));
8425+ ret = - ENXIO ;
8426+ if (unlikely (!ctx -> file_data ))
8427+ goto out ;
8428+ ret = - EINVAL ;
8429+ if (offset >= ctx -> nr_user_files )
8430+ goto out ;
8431+ ret = io_rsrc_node_switch_start (ctx );
8432+ if (ret )
8433+ goto out ;
8434+
8435+ i = array_index_nospec (offset , ctx -> nr_user_files );
8436+ file_slot = io_fixed_file_slot (& ctx -> file_table , i );
8437+ ret = - EBADF ;
8438+ if (!file_slot -> file_ptr )
8439+ goto out ;
8440+
8441+ file = (struct file * )(file_slot -> file_ptr & FFS_MASK );
8442+ ret = io_queue_rsrc_removal (ctx -> file_data , offset , ctx -> rsrc_node , file );
8443+ if (ret )
8444+ goto out ;
8445+
8446+ file_slot -> file_ptr = 0 ;
8447+ io_rsrc_node_switch (ctx , ctx -> file_data );
8448+ ret = 0 ;
8449+ out :
8450+ io_ring_submit_unlock (ctx , !(issue_flags & IO_URING_F_NONBLOCK ));
8451+ return ret ;
8452+ }
8453+
84038454static int __io_sqe_files_update (struct io_ring_ctx * ctx ,
84048455 struct io_uring_rsrc_update2 * up ,
84058456 unsigned nr_args )
@@ -9166,8 +9217,10 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
91669217 struct io_buffer * buf ;
91679218 unsigned long index ;
91689219
9169- xa_for_each (& ctx -> io_buffers , index , buf )
9220+ xa_for_each (& ctx -> io_buffers , index , buf ) {
91709221 __io_remove_buffers (ctx , buf , index , -1U );
9222+ cond_resched ();
9223+ }
91719224}
91729225
91739226static void io_req_cache_free (struct list_head * list )
@@ -9665,8 +9718,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
96659718 struct io_tctx_node * node ;
96669719 unsigned long index ;
96679720
9668- xa_for_each (& tctx -> xa , index , node )
9721+ xa_for_each (& tctx -> xa , index , node ) {
96699722 io_uring_del_tctx_node (index );
9723+ cond_resched ();
9724+ }
96709725 if (wq ) {
96719726 /*
96729727 * Must be after io_uring_del_task_file() (removes nodes under
0 commit comments