@@ -325,6 +325,14 @@ int slab_unmergeable(struct kmem_cache *s)
325325 if (s -> refcount < 0 )
326326 return 1 ;
327327
328+ #ifdef CONFIG_MEMCG_KMEM
329+ /*
330+ * Skip the dying kmem_cache.
331+ */
332+ if (s -> memcg_params .dying )
333+ return 1 ;
334+ #endif
335+
328336 return 0 ;
329337}
330338
@@ -885,12 +893,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
885893 return 0 ;
886894}
887895
888- static void flush_memcg_workqueue (struct kmem_cache * s )
896+ static void memcg_set_kmem_cache_dying (struct kmem_cache * s )
889897{
890898 spin_lock_irq (& memcg_kmem_wq_lock );
891899 s -> memcg_params .dying = true;
892900 spin_unlock_irq (& memcg_kmem_wq_lock );
901+ }
893902
903+ static void flush_memcg_workqueue (struct kmem_cache * s )
904+ {
894905 /*
895906 * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
896907 * sure all registered rcu callbacks have been invoked.
@@ -922,10 +933,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s)
922933{
923934 return 0 ;
924935}
925-
926- static inline void flush_memcg_workqueue (struct kmem_cache * s )
927- {
928- }
929936#endif /* CONFIG_MEMCG_KMEM */
930937
931938void slab_kmem_cache_release (struct kmem_cache * s )
@@ -943,8 +950,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
943950 if (unlikely (!s ))
944951 return ;
945952
946- flush_memcg_workqueue (s );
947-
948953 get_online_cpus ();
949954 get_online_mems ();
950955
@@ -954,6 +959,22 @@ void kmem_cache_destroy(struct kmem_cache *s)
954959 if (s -> refcount )
955960 goto out_unlock ;
956961
962+ #ifdef CONFIG_MEMCG_KMEM
963+ memcg_set_kmem_cache_dying (s );
964+
965+ mutex_unlock (& slab_mutex );
966+
967+ put_online_mems ();
968+ put_online_cpus ();
969+
970+ flush_memcg_workqueue (s );
971+
972+ get_online_cpus ();
973+ get_online_mems ();
974+
975+ mutex_lock (& slab_mutex );
976+ #endif
977+
957978 err = shutdown_memcg_caches (s );
958979 if (!err )
959980 err = shutdown_cache (s );
0 commit comments