@@ -168,9 +168,9 @@ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
168168 */
169169static int pcpu_nr_empty_pop_pages ;
170170
171- /* reclaim work to release fully free chunks, scheduled from free path */
172- static void pcpu_reclaim (struct work_struct * work );
173- static DECLARE_WORK (pcpu_reclaim_work , pcpu_reclaim );
171+ /* balance work is used to populate or destroy chunks asynchronously */
172+ static void pcpu_balance_workfn (struct work_struct * work );
173+ static DECLARE_WORK (pcpu_balance_work , pcpu_balance_workfn );
174174
175175static bool pcpu_addr_in_first_chunk (void * addr )
176176{
@@ -1080,36 +1080,33 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
10801080}
10811081
10821082/**
1083- * pcpu_reclaim - reclaim fully free chunks, workqueue function
1083+ * pcpu_balance_workfn - reclaim fully free chunks, workqueue function
10841084 * @work: unused
10851085 *
10861086 * Reclaim all fully free chunks except for the first one.
1087- *
1088- * CONTEXT:
1089- * workqueue context.
10901087 */
1091- static void pcpu_reclaim (struct work_struct * work )
1088+ static void pcpu_balance_workfn (struct work_struct * work )
10921089{
1093- LIST_HEAD (todo );
1094- struct list_head * head = & pcpu_slot [pcpu_nr_slots - 1 ];
1090+ LIST_HEAD (to_free );
1091+ struct list_head * free_head = & pcpu_slot [pcpu_nr_slots - 1 ];
10951092 struct pcpu_chunk * chunk , * next ;
10961093
10971094 mutex_lock (& pcpu_alloc_mutex );
10981095 spin_lock_irq (& pcpu_lock );
10991096
1100- list_for_each_entry_safe (chunk , next , head , list ) {
1097+ list_for_each_entry_safe (chunk , next , free_head , list ) {
11011098 WARN_ON (chunk -> immutable );
11021099
11031100 /* spare the first one */
1104- if (chunk == list_first_entry (head , struct pcpu_chunk , list ))
1101+ if (chunk == list_first_entry (free_head , struct pcpu_chunk , list ))
11051102 continue ;
11061103
1107- list_move (& chunk -> list , & todo );
1104+ list_move (& chunk -> list , & to_free );
11081105 }
11091106
11101107 spin_unlock_irq (& pcpu_lock );
11111108
1112- list_for_each_entry_safe (chunk , next , & todo , list ) {
1109+ list_for_each_entry_safe (chunk , next , & to_free , list ) {
11131110 int rs , re ;
11141111
11151112 pcpu_for_each_pop_region (chunk , rs , re , 0 , pcpu_unit_pages ) {
@@ -1163,7 +1160,7 @@ void free_percpu(void __percpu *ptr)
11631160
11641161 list_for_each_entry (pos , & pcpu_slot [pcpu_nr_slots - 1 ], list )
11651162 if (pos != chunk ) {
1166- schedule_work (& pcpu_reclaim_work );
1163+ schedule_work (& pcpu_balance_work );
11671164 break ;
11681165 }
11691166 }
0 commit comments