@@ -130,6 +130,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
130130#ifdef CONFIG_MEMCG_KMEM
131131
132132LIST_HEAD (slab_root_caches );
133+ static DEFINE_SPINLOCK (memcg_kmem_wq_lock );
133134
134135void slab_init_memcg_params (struct kmem_cache * s )
135136{
@@ -734,14 +735,22 @@ static void kmemcg_cache_deactivate(struct kmem_cache *s)
734735
735736 __kmemcg_cache_deactivate (s );
736737
738+ /*
739+ * memcg_kmem_wq_lock is used to synchronize memcg_params.dying
740+ * flag and make sure that no new kmem_cache deactivation tasks
741+ * are queued (see flush_memcg_workqueue() ).
742+ */
743+ spin_lock_irq (& memcg_kmem_wq_lock );
737744 if (s -> memcg_params .root_cache -> memcg_params .dying )
738- return ;
745+ goto unlock ;
739746
740747 /* pin memcg so that @s doesn't get destroyed in the middle */
741748 css_get (& s -> memcg_params .memcg -> css );
742749
743750 s -> memcg_params .work_fn = __kmemcg_cache_deactivate_after_rcu ;
744751 call_rcu (& s -> memcg_params .rcu_head , kmemcg_rcufn );
752+ unlock :
753+ spin_unlock_irq (& memcg_kmem_wq_lock );
745754}
746755
747756void memcg_deactivate_kmem_caches (struct mem_cgroup * memcg )
@@ -851,9 +860,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
851860
852861static void flush_memcg_workqueue (struct kmem_cache * s )
853862{
854- mutex_lock ( & slab_mutex );
863+ spin_lock_irq ( & memcg_kmem_wq_lock );
855864 s -> memcg_params .dying = true;
856- mutex_unlock ( & slab_mutex );
865+ spin_unlock_irq ( & memcg_kmem_wq_lock );
857866
858867 /*
859868 * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
0 commit comments