@@ -2496,16 +2496,25 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
24962496
24972497static inline void flush_slab (struct kmem_cache * s , struct kmem_cache_cpu * c )
24982498{
2499- void * freelist = c -> freelist ;
2500- struct page * page = c -> page ;
2499+ unsigned long flags ;
2500+ struct page * page ;
2501+ void * freelist ;
2502+
2503+ local_irq_save (flags );
2504+
2505+ page = c -> page ;
2506+ freelist = c -> freelist ;
25012507
25022508 c -> page = NULL ;
25032509 c -> freelist = NULL ;
25042510 c -> tid = next_tid (c -> tid );
25052511
2506- deactivate_slab ( s , page , freelist );
2512+ local_irq_restore ( flags );
25072513
2508- stat (s , CPUSLAB_FLUSH );
2514+ if (page ) {
2515+ deactivate_slab (s , page , freelist );
2516+ stat (s , CPUSLAB_FLUSH );
2517+ }
25092518}
25102519
25112520static inline void __flush_cpu_slab (struct kmem_cache * s , int cpu )
@@ -2526,33 +2535,79 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
25262535 unfreeze_partials_cpu (s , c );
25272536}
25282537
2538+ struct slub_flush_work {
2539+ struct work_struct work ;
2540+ struct kmem_cache * s ;
2541+ bool skip ;
2542+ };
2543+
25292544/*
25302545 * Flush cpu slab.
25312546 *
2532- * Called from IPI handler with interrupts disabled.
2547+ * Called from CPU work handler with migration disabled.
25332548 */
2534- static void flush_cpu_slab (void * d )
2549+ static void flush_cpu_slab (struct work_struct * w )
25352550{
2536- struct kmem_cache * s = d ;
2537- struct kmem_cache_cpu * c = this_cpu_ptr (s -> cpu_slab );
2551+ struct kmem_cache * s ;
2552+ struct kmem_cache_cpu * c ;
2553+ struct slub_flush_work * sfw ;
2554+
2555+ sfw = container_of (w , struct slub_flush_work , work );
2556+
2557+ s = sfw -> s ;
2558+ c = this_cpu_ptr (s -> cpu_slab );
25382559
25392560 if (c -> page )
25402561 flush_slab (s , c );
25412562
25422563 unfreeze_partials (s );
25432564}
25442565
2545- static bool has_cpu_slab (int cpu , void * info )
2566+ static bool has_cpu_slab (int cpu , struct kmem_cache * s )
25462567{
2547- struct kmem_cache * s = info ;
25482568 struct kmem_cache_cpu * c = per_cpu_ptr (s -> cpu_slab , cpu );
25492569
25502570 return c -> page || slub_percpu_partial (c );
25512571}
25522572
2573+ static DEFINE_MUTEX (flush_lock );
2574+ static DEFINE_PER_CPU (struct slub_flush_work , slub_flush ) ;
2575+
2576+ static void flush_all_cpus_locked (struct kmem_cache * s )
2577+ {
2578+ struct slub_flush_work * sfw ;
2579+ unsigned int cpu ;
2580+
2581+ lockdep_assert_cpus_held ();
2582+ mutex_lock (& flush_lock );
2583+
2584+ for_each_online_cpu (cpu ) {
2585+ sfw = & per_cpu (slub_flush , cpu );
2586+ if (!has_cpu_slab (cpu , s )) {
2587+ sfw -> skip = true;
2588+ continue ;
2589+ }
2590+ INIT_WORK (& sfw -> work , flush_cpu_slab );
2591+ sfw -> skip = false;
2592+ sfw -> s = s ;
2593+ schedule_work_on (cpu , & sfw -> work );
2594+ }
2595+
2596+ for_each_online_cpu (cpu ) {
2597+ sfw = & per_cpu (slub_flush , cpu );
2598+ if (sfw -> skip )
2599+ continue ;
2600+ flush_work (& sfw -> work );
2601+ }
2602+
2603+ mutex_unlock (& flush_lock );
2604+ }
2605+
25532606static void flush_all (struct kmem_cache * s )
25542607{
2555- on_each_cpu_cond (has_cpu_slab , flush_cpu_slab , s , 1 );
2608+ cpus_read_lock ();
2609+ flush_all_cpus_locked (s );
2610+ cpus_read_unlock ();
25562611}
25572612
25582613/*
@@ -4097,7 +4152,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
40974152 int node ;
40984153 struct kmem_cache_node * n ;
40994154
4100- flush_all (s );
4155+ flush_all_cpus_locked (s );
41014156 /* Attempt to free all objects */
41024157 for_each_kmem_cache_node (s , node , n ) {
41034158 free_partial (s , n );
@@ -4373,7 +4428,7 @@ EXPORT_SYMBOL(kfree);
43734428 * being allocated from last increasing the chance that the last objects
43744429 * are freed in them.
43754430 */
4376- int __kmem_cache_shrink (struct kmem_cache * s )
4431+ static int __kmem_cache_do_shrink (struct kmem_cache * s )
43774432{
43784433 int node ;
43794434 int i ;
@@ -4385,7 +4440,6 @@ int __kmem_cache_shrink(struct kmem_cache *s)
43854440 unsigned long flags ;
43864441 int ret = 0 ;
43874442
4388- flush_all (s );
43894443 for_each_kmem_cache_node (s , node , n ) {
43904444 INIT_LIST_HEAD (& discard );
43914445 for (i = 0 ; i < SHRINK_PROMOTE_MAX ; i ++ )
@@ -4435,13 +4489,21 @@ int __kmem_cache_shrink(struct kmem_cache *s)
44354489 return ret ;
44364490}
44374491
4492+ int __kmem_cache_shrink (struct kmem_cache * s )
4493+ {
4494+ flush_all (s );
4495+ return __kmem_cache_do_shrink (s );
4496+ }
4497+
44384498static int slab_mem_going_offline_callback (void * arg )
44394499{
44404500 struct kmem_cache * s ;
44414501
44424502 mutex_lock (& slab_mutex );
4443- list_for_each_entry (s , & slab_caches , list )
4444- __kmem_cache_shrink (s );
4503+ list_for_each_entry (s , & slab_caches , list ) {
4504+ flush_all_cpus_locked (s );
4505+ __kmem_cache_do_shrink (s );
4506+ }
44454507 mutex_unlock (& slab_mutex );
44464508
44474509 return 0 ;
0 commit comments