7676
7777#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
7878#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
79+ #define PCPU_ATOMIC_MAP_MARGIN_LOW 32
80+ #define PCPU_ATOMIC_MAP_MARGIN_HIGH 64
7981
8082#ifdef CONFIG_SMP
8183/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
@@ -102,9 +104,12 @@ struct pcpu_chunk {
102104 int free_size ; /* free bytes in the chunk */
103105 int contig_hint ; /* max contiguous size hint */
104106 void * base_addr ; /* base address of this chunk */
107+
105108 int map_used ; /* # of map entries used before the sentry */
106109 int map_alloc ; /* # of map entries allocated */
107110 int * map ; /* allocation map */
111+ struct work_struct map_extend_work ;/* async ->map[] extension */
112+
108113 void * data ; /* chunk data */
109114 int first_free ; /* no free below this */
110115 bool immutable ; /* no [de]population allowed */
@@ -318,9 +323,14 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
318323/**
319324 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
320325 * @chunk: chunk of interest
326+ * @is_atomic: the allocation context
321327 *
322- * Determine whether area map of @chunk needs to be extended to
323- * accommodate a new allocation.
328+ * Determine whether area map of @chunk needs to be extended. If
329+ * @is_atomic, only the amount necessary for a new allocation is
330+ * considered; however, async extension is scheduled if the left amount is
331+ * low. If !@is_atomic, it aims for more empty space. Combined, this
332+ * ensures that the map is likely to have enough available space to
333+ * accomodate atomic allocations which can't extend maps directly.
324334 *
325335 * CONTEXT:
326336 * pcpu_lock.
@@ -329,15 +339,25 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
329339 * New target map allocation length if extension is necessary, 0
330340 * otherwise.
331341 */
332- static int pcpu_need_to_extend (struct pcpu_chunk * chunk )
342+ static int pcpu_need_to_extend (struct pcpu_chunk * chunk , bool is_atomic )
333343{
334- int new_alloc ;
344+ int margin , new_alloc ;
345+
346+ if (is_atomic ) {
347+ margin = 3 ;
335348
336- if (chunk -> map_alloc >= chunk -> map_used + 3 )
349+ if (chunk -> map_alloc <
350+ chunk -> map_used + PCPU_ATOMIC_MAP_MARGIN_LOW )
351+ schedule_work (& chunk -> map_extend_work );
352+ } else {
353+ margin = PCPU_ATOMIC_MAP_MARGIN_HIGH ;
354+ }
355+
356+ if (chunk -> map_alloc >= chunk -> map_used + margin )
337357 return 0 ;
338358
339359 new_alloc = PCPU_DFL_MAP_ALLOC ;
340- while (new_alloc < chunk -> map_used + 3 )
360+ while (new_alloc < chunk -> map_used + margin )
341361 new_alloc *= 2 ;
342362
343363 return new_alloc ;
@@ -394,6 +414,20 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
394414 return 0 ;
395415}
396416
417+ static void pcpu_map_extend_workfn (struct work_struct * work )
418+ {
419+ struct pcpu_chunk * chunk = container_of (work , struct pcpu_chunk ,
420+ map_extend_work );
421+ int new_alloc ;
422+
423+ spin_lock_irq (& pcpu_lock );
424+ new_alloc = pcpu_need_to_extend (chunk , false);
425+ spin_unlock_irq (& pcpu_lock );
426+
427+ if (new_alloc )
428+ pcpu_extend_area_map (chunk , new_alloc );
429+ }
430+
397431/**
398432 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
399433 * @chunk: chunk the candidate area belongs to
@@ -647,6 +681,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
647681 chunk -> map_used = 1 ;
648682
649683 INIT_LIST_HEAD (& chunk -> list );
684+ INIT_WORK (& chunk -> map_extend_work , pcpu_map_extend_workfn );
650685 chunk -> free_size = pcpu_unit_size ;
651686 chunk -> contig_hint = pcpu_unit_size ;
652687
@@ -767,7 +802,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
767802 goto fail_unlock ;
768803 }
769804
770- while ((new_alloc = pcpu_need_to_extend (chunk ))) {
805+ while ((new_alloc = pcpu_need_to_extend (chunk , is_atomic ))) {
771806 spin_unlock_irqrestore (& pcpu_lock , flags );
772807 if (is_atomic ||
773808 pcpu_extend_area_map (chunk , new_alloc ) < 0 ) {
@@ -792,7 +827,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
792827 if (size > chunk -> contig_hint )
793828 continue ;
794829
795- new_alloc = pcpu_need_to_extend (chunk );
830+ new_alloc = pcpu_need_to_extend (chunk , is_atomic );
796831 if (new_alloc ) {
797832 if (is_atomic )
798833 continue ;
@@ -1418,6 +1453,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
14181453 */
14191454 schunk = memblock_virt_alloc (pcpu_chunk_struct_size , 0 );
14201455 INIT_LIST_HEAD (& schunk -> list );
1456+ INIT_WORK (& schunk -> map_extend_work , pcpu_map_extend_workfn );
14211457 schunk -> base_addr = base_addr ;
14221458 schunk -> map = smap ;
14231459 schunk -> map_alloc = ARRAY_SIZE (smap );
@@ -1446,6 +1482,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
14461482 if (dyn_size ) {
14471483 dchunk = memblock_virt_alloc (pcpu_chunk_struct_size , 0 );
14481484 INIT_LIST_HEAD (& dchunk -> list );
1485+ INIT_WORK (& dchunk -> map_extend_work , pcpu_map_extend_workfn );
14491486 dchunk -> base_addr = base_addr ;
14501487 dchunk -> map = dmap ;
14511488 dchunk -> map_alloc = ARRAY_SIZE (dmap );
0 commit comments