@@ -35,44 +35,12 @@ unsigned long stack_trace_max_size;
3535arch_spinlock_t stack_trace_max_lock =
3636 (arch_spinlock_t )__ARCH_SPIN_LOCK_UNLOCKED ;
3737
38- static DEFINE_PER_CPU (int , trace_active ) ;
38+ DEFINE_PER_CPU (int , disable_stack_tracer );
3939static DEFINE_MUTEX (stack_sysctl_mutex );
4040
4141int stack_tracer_enabled ;
4242static int last_stack_tracer_enabled ;
4343
44- /**
45- * stack_tracer_disable - temporarily disable the stack tracer
46- *
47- * There's a few locations (namely in RCU) where stack tracing
48- * cannot be executed. This function is used to disable stack
49- * tracing during those critical sections.
50- *
51- * This function must be called with preemption or interrupts
52- * disabled and stack_tracer_enable() must be called shortly after
53- * while preemption or interrupts are still disabled.
54- */
55- void stack_tracer_disable (void )
56- {
57- /* Preemption or interupts must be disabled */
58- if (IS_ENABLED (CONFIG_PREEMPT_DEBUG ))
59- WARN_ON_ONCE (!preempt_count () || !irqs_disabled ());
60- this_cpu_inc (trace_active );
61- }
62-
63- /**
64- * stack_tracer_enable - re-enable the stack tracer
65- *
66- * After stack_tracer_disable() is called, stack_tracer_enable()
67- * must be called shortly afterward.
68- */
69- void stack_tracer_enable (void )
70- {
71- if (IS_ENABLED (CONFIG_PREEMPT_DEBUG ))
72- WARN_ON_ONCE (!preempt_count () || !irqs_disabled ());
73- this_cpu_dec (trace_active );
74- }
75-
7644void stack_trace_print (void )
7745{
7846 long i ;
@@ -243,16 +211,16 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
243211 preempt_disable_notrace ();
244212
245213 /* no atomic needed, we only modify this variable by this cpu */
246- __this_cpu_inc (trace_active );
247- if (__this_cpu_read (trace_active ) != 1 )
214+ __this_cpu_inc (disable_stack_tracer );
215+ if (__this_cpu_read (disable_stack_tracer ) != 1 )
248216 goto out ;
249217
250218 ip += MCOUNT_INSN_SIZE ;
251219
252220 check_stack (ip , & stack );
253221
254222 out :
255- __this_cpu_dec (trace_active );
223+ __this_cpu_dec (disable_stack_tracer );
256224 /* prevent recursion in schedule */
257225 preempt_enable_notrace ();
258226}
@@ -294,15 +262,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
294262 /*
295263 * In case we trace inside arch_spin_lock() or after (NMI),
296264 * we will cause circular lock, so we also need to increase
297- * the percpu trace_active here.
265+ * the percpu disable_stack_tracer here.
298266 */
299- __this_cpu_inc (trace_active );
267+ __this_cpu_inc (disable_stack_tracer );
300268
301269 arch_spin_lock (& stack_trace_max_lock );
302270 * ptr = val ;
303271 arch_spin_unlock (& stack_trace_max_lock );
304272
305- __this_cpu_dec (trace_active );
273+ __this_cpu_dec (disable_stack_tracer );
306274 local_irq_restore (flags );
307275
308276 return count ;
@@ -338,7 +306,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
338306{
339307 local_irq_disable ();
340308
341- __this_cpu_inc (trace_active );
309+ __this_cpu_inc (disable_stack_tracer );
342310
343311 arch_spin_lock (& stack_trace_max_lock );
344312
@@ -352,7 +320,7 @@ static void t_stop(struct seq_file *m, void *p)
352320{
353321 arch_spin_unlock (& stack_trace_max_lock );
354322
355- __this_cpu_dec (trace_active );
323+ __this_cpu_dec (disable_stack_tracer );
356324
357325 local_irq_enable ();
358326}
0 commit comments