Skip to content

Commit 252babc

Browse files
committed
tracing: Replace the per_cpu() with __this_cpu*() in trace_stack.c
The updates to the trace_active per cpu variable can be updated with the __this_cpu_*() functions as it only gets updated on the CPU that the variable is on. Thanks to Paul McKenney for suggesting __this_cpu_* instead of this_cpu_*. Acked-by: Paul E. McKenney <[email protected]> Signed-off-by: Steven Rostedt (VMware) <[email protected]>
1 parent 0598e4f commit 252babc

File tree

1 file changed

+7
-16
lines changed

1 file changed

+7
-16
lines changed

kernel/trace/trace_stack.c

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -207,21 +207,20 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
207207
struct ftrace_ops *op, struct pt_regs *pt_regs)
208208
{
209209
unsigned long stack;
210-
int cpu;
211210

212211
preempt_disable_notrace();
213212

214-
cpu = raw_smp_processor_id();
215213
/* no atomic needed, we only modify this variable by this cpu */
216-
if (per_cpu(trace_active, cpu)++ != 0)
214+
__this_cpu_inc(trace_active);
215+
if (__this_cpu_read(trace_active) != 1)
217216
goto out;
218217

219218
ip += MCOUNT_INSN_SIZE;
220219

221220
check_stack(ip, &stack);
222221

223222
out:
224-
per_cpu(trace_active, cpu)--;
223+
__this_cpu_dec(trace_active);
225224
/* prevent recursion in schedule */
226225
preempt_enable_notrace();
227226
}
@@ -253,7 +252,6 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
253252
long *ptr = filp->private_data;
254253
unsigned long val, flags;
255254
int ret;
256-
int cpu;
257255

258256
ret = kstrtoul_from_user(ubuf, count, 10, &val);
259257
if (ret)
@@ -266,14 +264,13 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
266264
* we will cause circular lock, so we also need to increase
267265
* the percpu trace_active here.
268266
*/
269-
cpu = smp_processor_id();
270-
per_cpu(trace_active, cpu)++;
267+
__this_cpu_inc(trace_active);
271268

272269
arch_spin_lock(&stack_trace_max_lock);
273270
*ptr = val;
274271
arch_spin_unlock(&stack_trace_max_lock);
275272

276-
per_cpu(trace_active, cpu)--;
273+
__this_cpu_dec(trace_active);
277274
local_irq_restore(flags);
278275

279276
return count;
@@ -307,12 +304,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
307304

308305
static void *t_start(struct seq_file *m, loff_t *pos)
309306
{
310-
int cpu;
311-
312307
local_irq_disable();
313308

314-
cpu = smp_processor_id();
315-
per_cpu(trace_active, cpu)++;
309+
__this_cpu_inc(trace_active);
316310

317311
arch_spin_lock(&stack_trace_max_lock);
318312

@@ -324,12 +318,9 @@ static void *t_start(struct seq_file *m, loff_t *pos)
324318

325319
static void t_stop(struct seq_file *m, void *p)
326320
{
327-
int cpu;
328-
329321
arch_spin_unlock(&stack_trace_max_lock);
330322

331-
cpu = smp_processor_id();
332-
per_cpu(trace_active, cpu)--;
323+
__this_cpu_dec(trace_active);
333324

334325
local_irq_enable();
335326
}

0 commit comments

Comments
 (0)