@@ -68,6 +68,7 @@ static int fill_pool(void)
68
68
{
69
69
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN ;
70
70
struct debug_obj * new ;
71
+ unsigned long flags ;
71
72
72
73
if (likely (obj_pool_free >= ODEBUG_POOL_MIN_LEVEL ))
73
74
return obj_pool_free ;
@@ -81,10 +82,10 @@ static int fill_pool(void)
81
82
if (!new )
82
83
return obj_pool_free ;
83
84
84
- spin_lock (& pool_lock );
85
+ spin_lock_irqsave (& pool_lock , flags );
85
86
hlist_add_head (& new -> node , & obj_pool );
86
87
obj_pool_free ++ ;
87
- spin_unlock (& pool_lock );
88
+ spin_unlock_irqrestore (& pool_lock , flags );
88
89
}
89
90
return obj_pool_free ;
90
91
}
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
110
111
}
111
112
112
113
/*
113
- * Allocate a new object. If the pool is empty and no refill possible,
114
- * switch off the debugger.
114
+ * Allocate a new object. If the pool is empty, switch off the debugger.
115
115
*/
116
116
static struct debug_obj *
117
117
alloc_object (void * addr , struct debug_bucket * b , struct debug_obj_descr * descr )
118
118
{
119
119
struct debug_obj * obj = NULL ;
120
- int retry = 0 ;
121
120
122
- repeat :
123
121
spin_lock (& pool_lock );
124
122
if (obj_pool .first ) {
125
123
obj = hlist_entry (obj_pool .first , typeof (* obj ), node );
@@ -141,9 +139,6 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
141
139
}
142
140
spin_unlock (& pool_lock );
143
141
144
- if (fill_pool () && !obj && !retry ++ )
145
- goto repeat ;
146
-
147
142
return obj ;
148
143
}
149
144
@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
261
256
struct debug_obj * obj ;
262
257
unsigned long flags ;
263
258
259
+ fill_pool ();
260
+
264
261
db = get_bucket ((unsigned long ) addr );
265
262
266
263
spin_lock_irqsave (& db -> lock , flags );
0 commit comments