@@ -562,8 +562,8 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
562562
563563 /*
564564 * Mark all the cfids as closed, and move them to the cfids->dying list.
565- * They'll be cleaned up later by cfids_invalidation_worker. Take
566- * a reference to each cfid during this process.
565+ * They'll be cleaned up by laundromat. Take a reference to each cfid
566+ * during this process.
567567 */
568568 spin_lock (& cfids -> cfid_list_lock );
569569 list_for_each_entry_safe (cfid , q , & cfids -> entries , entry ) {
@@ -580,12 +580,11 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
580580 } else
581581 kref_get (& cfid -> refcount );
582582 }
583- /*
584- * Queue dropping of the dentries once locks have been dropped
585- */
586- if (!list_empty (& cfids -> dying ))
587- queue_work (cfid_put_wq , & cfids -> invalidation_work );
588583 spin_unlock (& cfids -> cfid_list_lock );
584+
585+ /* run laundromat unconditionally now as there might have been previously queued work */
586+ mod_delayed_work (cfid_put_wq , & cfids -> laundromat_work , 0 );
587+ flush_delayed_work (& cfids -> laundromat_work );
589588}
590589
591590static void
@@ -715,25 +714,6 @@ static void free_cached_dir(struct cached_fid *cfid)
715714 kfree (cfid );
716715}
717716
718- static void cfids_invalidation_worker (struct work_struct * work )
719- {
720- struct cached_fids * cfids = container_of (work , struct cached_fids ,
721- invalidation_work );
722- struct cached_fid * cfid , * q ;
723- LIST_HEAD (entry );
724-
725- spin_lock (& cfids -> cfid_list_lock );
726- /* move cfids->dying to the local list */
727- list_cut_before (& entry , & cfids -> dying , & cfids -> dying );
728- spin_unlock (& cfids -> cfid_list_lock );
729-
730- list_for_each_entry_safe (cfid , q , & entry , entry ) {
731- list_del (& cfid -> entry );
732- /* Drop the ref-count acquired in invalidate_all_cached_dirs */
733- kref_put (& cfid -> refcount , smb2_close_cached_fid );
734- }
735- }
736-
737717static void cfids_laundromat_worker (struct work_struct * work )
738718{
739719 struct cached_fids * cfids ;
@@ -743,6 +723,9 @@ static void cfids_laundromat_worker(struct work_struct *work)
743723 cfids = container_of (work , struct cached_fids , laundromat_work .work );
744724
745725 spin_lock (& cfids -> cfid_list_lock );
726+ /* move cfids->dying to the local list */
727+ list_cut_before (& entry , & cfids -> dying , & cfids -> dying );
728+
746729 list_for_each_entry_safe (cfid , q , & cfids -> entries , entry ) {
747730 if (cfid -> last_access_time &&
748731 time_after (jiffies , cfid -> last_access_time + HZ * dir_cache_timeout )) {
@@ -796,7 +779,6 @@ struct cached_fids *init_cached_dirs(void)
796779 INIT_LIST_HEAD (& cfids -> entries );
797780 INIT_LIST_HEAD (& cfids -> dying );
798781
799- INIT_WORK (& cfids -> invalidation_work , cfids_invalidation_worker );
800782 INIT_DELAYED_WORK (& cfids -> laundromat_work , cfids_laundromat_worker );
801783 queue_delayed_work (cfid_put_wq , & cfids -> laundromat_work ,
802784 dir_cache_timeout * HZ );
@@ -820,7 +802,6 @@ void free_cached_dirs(struct cached_fids *cfids)
820802 return ;
821803
822804 cancel_delayed_work_sync (& cfids -> laundromat_work );
823- cancel_work_sync (& cfids -> invalidation_work );
824805
825806 spin_lock (& cfids -> cfid_list_lock );
826807 list_for_each_entry_safe (cfid , q , & cfids -> entries , entry ) {
0 commit comments