3030#include <linux/idr.h>
3131#include <linux/spinlock.h>
3232#include <linux/percpu.h>
33+ #include <linux/locallock.h>
3334
3435#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
3536#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
@@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
4546static DEFINE_PER_CPU (int , idr_preload_cnt ) ;
4647static DEFINE_SPINLOCK (simple_ida_lock );
4748
49+ #ifdef CONFIG_PREEMPT_RT_FULL
50+ static DEFINE_LOCAL_IRQ_LOCK (idr_lock );
51+
52+ static inline void idr_preload_lock (void )
53+ {
54+ local_lock (idr_lock );
55+ }
56+
57+ static inline void idr_preload_unlock (void )
58+ {
59+ local_unlock (idr_lock );
60+ }
61+
62+ void idr_preload_end (void )
63+ {
64+ idr_preload_unlock ();
65+ }
66+ EXPORT_SYMBOL (idr_preload_end );
67+ #else
68+ static inline void idr_preload_lock (void )
69+ {
70+ preempt_disable ();
71+ }
72+
73+ static inline void idr_preload_unlock (void )
74+ {
75+ preempt_enable ();
76+ }
77+ #endif
78+
79+
4880/* the maximum ID which can be allocated given idr->layers */
4981static int idr_max (int layers )
5082{
@@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
115147 * context. See idr_preload() for details.
116148 */
117149 if (!in_interrupt ()) {
118- preempt_disable ();
150+ idr_preload_lock ();
119151 new = __this_cpu_read (idr_preload_head );
120152 if (new ) {
121153 __this_cpu_write (idr_preload_head , new -> ary [0 ]);
122154 __this_cpu_dec (idr_preload_cnt );
123155 new -> ary [0 ] = NULL ;
124156 }
125- preempt_enable ();
157+ idr_preload_unlock ();
126158 if (new )
127159 return new ;
128160 }
@@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
366398 idr_mark_full (pa , id );
367399}
368400
369-
370401/**
371402 * idr_preload - preload for idr_alloc()
372403 * @gfp_mask: allocation mask to use for preloading
@@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
401432 WARN_ON_ONCE (in_interrupt ());
402433 might_sleep_if (gfpflags_allow_blocking (gfp_mask ));
403434
404- preempt_disable ();
435+ idr_preload_lock ();
405436
406437 /*
407438 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
@@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
413444 while (__this_cpu_read (idr_preload_cnt ) < MAX_IDR_FREE ) {
414445 struct idr_layer * new ;
415446
416- preempt_enable ();
447+ idr_preload_unlock ();
417448 new = kmem_cache_zalloc (idr_layer_cache , gfp_mask );
418- preempt_disable ();
449+ idr_preload_lock ();
419450 if (!new )
420451 break ;
421452
0 commit comments