@@ -56,13 +56,6 @@ static struct kmem_cache *kioctx_cachep;
5656
5757static struct workqueue_struct * aio_wq ;
5858
59- /* Used for rare fput completion. */
60- static void aio_fput_routine (struct work_struct * );
61- static DECLARE_WORK (fput_work , aio_fput_routine ) ;
62-
63- static DEFINE_SPINLOCK (fput_lock );
64- static LIST_HEAD (fput_head );
65-
6659static void aio_kick_handler (struct work_struct * );
6760static void aio_queue_work (struct kioctx * );
6861
@@ -479,7 +472,6 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
479472{
480473 unsigned short allocated , to_alloc ;
481474 long avail ;
482- bool called_fput = false;
483475 struct kiocb * req , * n ;
484476 struct aio_ring * ring ;
485477
@@ -495,28 +487,11 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
495487 if (allocated == 0 )
496488 goto out ;
497489
498- retry :
499490 spin_lock_irq (& ctx -> ctx_lock );
500491 ring = kmap_atomic (ctx -> ring_info .ring_pages [0 ]);
501492
502493 avail = aio_ring_avail (& ctx -> ring_info , ring ) - ctx -> reqs_active ;
503494 BUG_ON (avail < 0 );
504- if (avail == 0 && !called_fput ) {
505- /*
506- * Handle a potential starvation case. It is possible that
507- * we hold the last reference on a struct file, causing us
508- * to delay the final fput to non-irq context. In this case,
509- * ctx->reqs_active is artificially high. Calling the fput
510- * routine here may free up a slot in the event completion
511- * ring, allowing this allocation to succeed.
512- */
513- kunmap_atomic (ring );
514- spin_unlock_irq (& ctx -> ctx_lock );
515- aio_fput_routine (NULL );
516- called_fput = true;
517- goto retry ;
518- }
519-
520495 if (avail < allocated ) {
521496 /* Trim back the number of requests. */
522497 list_for_each_entry_safe (req , n , & batch -> head , ki_batch ) {
@@ -570,36 +545,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
570545 wake_up_all (& ctx -> wait );
571546}
572547
573- static void aio_fput_routine (struct work_struct * data )
574- {
575- spin_lock_irq (& fput_lock );
576- while (likely (!list_empty (& fput_head ))) {
577- struct kiocb * req = list_kiocb (fput_head .next );
578- struct kioctx * ctx = req -> ki_ctx ;
579-
580- list_del (& req -> ki_list );
581- spin_unlock_irq (& fput_lock );
582-
583- /* Complete the fput(s) */
584- if (req -> ki_filp != NULL )
585- fput (req -> ki_filp );
586-
587- /* Link the iocb into the context's free list */
588- rcu_read_lock ();
589- spin_lock_irq (& ctx -> ctx_lock );
590- really_put_req (ctx , req );
591- /*
592- * at that point ctx might've been killed, but actual
593- * freeing is RCU'd
594- */
595- spin_unlock_irq (& ctx -> ctx_lock );
596- rcu_read_unlock ();
597-
598- spin_lock_irq (& fput_lock );
599- }
600- spin_unlock_irq (& fput_lock );
601- }
602-
603548/* __aio_put_req
604549 * Returns true if this put was the last user of the request.
605550 */
@@ -618,21 +563,9 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
618563 req -> ki_cancel = NULL ;
619564 req -> ki_retry = NULL ;
620565
621- /*
622- * Try to optimize the aio and eventfd file* puts, by avoiding to
623- * schedule work in case it is not final fput() time. In normal cases,
624- * we would not be holding the last reference to the file*, so
625- * this function will be executed w/out any aio kthread wakeup.
626- */
627- if (unlikely (!fput_atomic (req -> ki_filp ))) {
628- spin_lock (& fput_lock );
629- list_add (& req -> ki_list , & fput_head );
630- spin_unlock (& fput_lock );
631- schedule_work (& fput_work );
632- } else {
633- req -> ki_filp = NULL ;
634- really_put_req (ctx , req );
635- }
566+ fput (req -> ki_filp );
567+ req -> ki_filp = NULL ;
568+ really_put_req (ctx , req );
636569 return 1 ;
637570}
638571
0 commit comments