Skip to content

Commit 8aaf1ee

Browse files
committed
tracing: Rename trace_active to disable_stack_tracer and inline its modification
In order to eliminate a function call, make "trace_active" into "disable_stack_tracer" and convert stack_tracer_disable() and friends into static inline functions. Acked-by: Paul E. McKenney <[email protected]> Signed-off-by: Steven Rostedt (VMware) <[email protected]>
1 parent 5367278 commit 8aaf1ee

File tree

2 files changed

+43
-43
lines changed

2 files changed

+43
-43
lines changed

include/linux/ftrace.h

Lines changed: 34 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -287,8 +287,40 @@ stack_trace_sysctl(struct ctl_table *table, int write,
287287
void __user *buffer, size_t *lenp,
288288
loff_t *ppos);
289289

290-
void stack_tracer_disable(void);
291-
void stack_tracer_enable(void);
290+
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
291+
DECLARE_PER_CPU(int, disable_stack_tracer);
292+
293+
/**
294+
* stack_tracer_disable - temporarily disable the stack tracer
295+
*
296+
* There's a few locations (namely in RCU) where stack tracing
297+
* cannot be executed. This function is used to disable stack
298+
* tracing during those critical sections.
299+
*
300+
* This function must be called with preemption or interrupts
301+
* disabled and stack_tracer_enable() must be called shortly after
302+
* while preemption or interrupts are still disabled.
303+
*/
304+
static inline void stack_tracer_disable(void)
305+
{
306+
/* Preemption or interupts must be disabled */
307+
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
308+
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
309+
this_cpu_inc(disable_stack_tracer);
310+
}
311+
312+
/**
313+
* stack_tracer_enable - re-enable the stack tracer
314+
*
315+
* After stack_tracer_disable() is called, stack_tracer_enable()
316+
* must be called shortly afterward.
317+
*/
318+
static inline void stack_tracer_enable(void)
319+
{
320+
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
321+
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
322+
this_cpu_dec(disable_stack_tracer);
323+
}
292324
#else
293325
static inline void stack_tracer_disable(void) { }
294326
static inline void stack_tracer_enable(void) { }

kernel/trace/trace_stack.c

Lines changed: 9 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -35,44 +35,12 @@ unsigned long stack_trace_max_size;
3535
arch_spinlock_t stack_trace_max_lock =
3636
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
3737

38-
static DEFINE_PER_CPU(int, trace_active);
38+
DEFINE_PER_CPU(int, disable_stack_tracer);
3939
static DEFINE_MUTEX(stack_sysctl_mutex);
4040

4141
int stack_tracer_enabled;
4242
static int last_stack_tracer_enabled;
4343

44-
/**
45-
* stack_tracer_disable - temporarily disable the stack tracer
46-
*
47-
* There's a few locations (namely in RCU) where stack tracing
48-
* cannot be executed. This function is used to disable stack
49-
* tracing during those critical sections.
50-
*
51-
* This function must be called with preemption or interrupts
52-
* disabled and stack_tracer_enable() must be called shortly after
53-
* while preemption or interrupts are still disabled.
54-
*/
55-
void stack_tracer_disable(void)
56-
{
57-
/* Preemption or interupts must be disabled */
58-
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
59-
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
60-
this_cpu_inc(trace_active);
61-
}
62-
63-
/**
64-
* stack_tracer_enable - re-enable the stack tracer
65-
*
66-
* After stack_tracer_disable() is called, stack_tracer_enable()
67-
* must be called shortly afterward.
68-
*/
69-
void stack_tracer_enable(void)
70-
{
71-
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
72-
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
73-
this_cpu_dec(trace_active);
74-
}
75-
7644
void stack_trace_print(void)
7745
{
7846
long i;
@@ -243,16 +211,16 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
243211
preempt_disable_notrace();
244212

245213
/* no atomic needed, we only modify this variable by this cpu */
246-
__this_cpu_inc(trace_active);
247-
if (__this_cpu_read(trace_active) != 1)
214+
__this_cpu_inc(disable_stack_tracer);
215+
if (__this_cpu_read(disable_stack_tracer) != 1)
248216
goto out;
249217

250218
ip += MCOUNT_INSN_SIZE;
251219

252220
check_stack(ip, &stack);
253221

254222
out:
255-
__this_cpu_dec(trace_active);
223+
__this_cpu_dec(disable_stack_tracer);
256224
/* prevent recursion in schedule */
257225
preempt_enable_notrace();
258226
}
@@ -294,15 +262,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
294262
/*
295263
* In case we trace inside arch_spin_lock() or after (NMI),
296264
* we will cause circular lock, so we also need to increase
297-
* the percpu trace_active here.
265+
* the percpu disable_stack_tracer here.
298266
*/
299-
__this_cpu_inc(trace_active);
267+
__this_cpu_inc(disable_stack_tracer);
300268

301269
arch_spin_lock(&stack_trace_max_lock);
302270
*ptr = val;
303271
arch_spin_unlock(&stack_trace_max_lock);
304272

305-
__this_cpu_dec(trace_active);
273+
__this_cpu_dec(disable_stack_tracer);
306274
local_irq_restore(flags);
307275

308276
return count;
@@ -338,7 +306,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
338306
{
339307
local_irq_disable();
340308

341-
__this_cpu_inc(trace_active);
309+
__this_cpu_inc(disable_stack_tracer);
342310

343311
arch_spin_lock(&stack_trace_max_lock);
344312

@@ -352,7 +320,7 @@ static void t_stop(struct seq_file *m, void *p)
352320
{
353321
arch_spin_unlock(&stack_trace_max_lock);
354322

355-
__this_cpu_dec(trace_active);
323+
__this_cpu_dec(disable_stack_tracer);
356324

357325
local_irq_enable();
358326
}

0 commit comments

Comments
 (0)