Skip to content

Commit a048d3a

Browse files
committed
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: ftrace: fix modular build ftrace: disable tracing on acpi idle calls ftrace: remove latency-tracer leftover ftrace: only trace preempt off with preempt tracer ftrace: fix 4d3702b (post-v2.6.26): WARNING: at kernel/lockdep.c:2731 check_flags (ftrace)
2 parents fb3b806 + 1fe3710 commit a048d3a

File tree

4 files changed

+28
-16
lines changed

4 files changed

+28
-16
lines changed

drivers/acpi/processor_idle.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,8 @@ static atomic_t c3_cpu_count;
272272
/* Common C-state entry for C2, C3, .. */
273273
static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
274274
{
275+
/* Don't trace irqs off for idle */
276+
stop_critical_timings();
275277
if (cstate->entry_method == ACPI_CSTATE_FFH) {
276278
/* Call into architectural FFH based C-state */
277279
acpi_processor_ffh_cstate_enter(cstate);
@@ -284,6 +286,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
284286
gets asserted in time to freeze execution properly. */
285287
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
286288
}
289+
start_critical_timings();
287290
}
288291
#endif /* !CONFIG_CPU_IDLE */
289292

@@ -1418,6 +1421,8 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
14181421
*/
14191422
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
14201423
{
1424+
/* Don't trace irqs off for idle */
1425+
stop_critical_timings();
14211426
if (cx->entry_method == ACPI_CSTATE_FFH) {
14221427
/* Call into architectural FFH based C-state */
14231428
acpi_processor_ffh_cstate_enter(cx);
@@ -1432,6 +1437,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
14321437
gets asserted in time to freeze execution properly. */
14331438
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
14341439
}
1440+
start_critical_timings();
14351441
}
14361442

14371443
/**

kernel/trace/trace.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1203,9 +1203,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
12031203

12041204
iter->pos = *pos;
12051205

1206-
if (last_ent && !ent)
1207-
seq_puts(m, "\n\nvim:ft=help\n");
1208-
12091206
return ent;
12101207
}
12111208

kernel/trace/trace_irqsoff.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -253,12 +253,14 @@ void start_critical_timings(void)
253253
if (preempt_trace() || irq_trace())
254254
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
255255
}
256+
EXPORT_SYMBOL_GPL(start_critical_timings);
256257

257258
void stop_critical_timings(void)
258259
{
259260
if (preempt_trace() || irq_trace())
260261
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
261262
}
263+
EXPORT_SYMBOL_GPL(stop_critical_timings);
262264

263265
#ifdef CONFIG_IRQSOFF_TRACER
264266
#ifdef CONFIG_PROVE_LOCKING
@@ -337,12 +339,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
337339
#ifdef CONFIG_PREEMPT_TRACER
338340
void trace_preempt_on(unsigned long a0, unsigned long a1)
339341
{
340-
stop_critical_timing(a0, a1);
342+
if (preempt_trace())
343+
stop_critical_timing(a0, a1);
341344
}
342345

343346
void trace_preempt_off(unsigned long a0, unsigned long a1)
344347
{
345-
start_critical_timing(a0, a1);
348+
if (preempt_trace())
349+
start_critical_timing(a0, a1);
346350
}
347351
#endif /* CONFIG_PREEMPT_TRACER */
348352

kernel/trace/trace_sched_wakeup.c

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,8 @@ static struct task_struct *wakeup_task;
2626
static int wakeup_cpu;
2727
static unsigned wakeup_prio = -1;
2828

29-
static DEFINE_SPINLOCK(wakeup_lock);
29+
static raw_spinlock_t wakeup_lock =
30+
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3031

3132
static void __wakeup_reset(struct trace_array *tr);
3233

@@ -56,7 +57,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
5657
if (unlikely(disabled != 1))
5758
goto out;
5859

59-
spin_lock_irqsave(&wakeup_lock, flags);
60+
local_irq_save(flags);
61+
__raw_spin_lock(&wakeup_lock);
6062

6163
if (unlikely(!wakeup_task))
6264
goto unlock;
@@ -71,7 +73,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
7173
trace_function(tr, data, ip, parent_ip, flags);
7274

7375
unlock:
74-
spin_unlock_irqrestore(&wakeup_lock, flags);
76+
__raw_spin_unlock(&wakeup_lock);
77+
local_irq_restore(flags);
7578

7679
out:
7780
atomic_dec(&data->disabled);
@@ -145,7 +148,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
145148
if (likely(disabled != 1))
146149
goto out;
147150

148-
spin_lock_irqsave(&wakeup_lock, flags);
151+
local_irq_save(flags);
152+
__raw_spin_lock(&wakeup_lock);
149153

150154
/* We could race with grabbing wakeup_lock */
151155
if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -174,7 +178,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
174178

175179
out_unlock:
176180
__wakeup_reset(tr);
177-
spin_unlock_irqrestore(&wakeup_lock, flags);
181+
__raw_spin_unlock(&wakeup_lock);
182+
local_irq_restore(flags);
178183
out:
179184
atomic_dec(&tr->data[cpu]->disabled);
180185
}
@@ -209,8 +214,6 @@ static void __wakeup_reset(struct trace_array *tr)
209214
struct trace_array_cpu *data;
210215
int cpu;
211216

212-
assert_spin_locked(&wakeup_lock);
213-
214217
for_each_possible_cpu(cpu) {
215218
data = tr->data[cpu];
216219
tracing_reset(data);
@@ -229,9 +232,11 @@ static void wakeup_reset(struct trace_array *tr)
229232
{
230233
unsigned long flags;
231234

232-
spin_lock_irqsave(&wakeup_lock, flags);
235+
local_irq_save(flags);
236+
__raw_spin_lock(&wakeup_lock);
233237
__wakeup_reset(tr);
234-
spin_unlock_irqrestore(&wakeup_lock, flags);
238+
__raw_spin_unlock(&wakeup_lock);
239+
local_irq_restore(flags);
235240
}
236241

237242
static void
@@ -252,7 +257,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
252257
goto out;
253258

254259
/* interrupts should be off from try_to_wake_up */
255-
spin_lock(&wakeup_lock);
260+
__raw_spin_lock(&wakeup_lock);
256261

257262
/* check for races. */
258263
if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -274,7 +279,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
274279
CALLER_ADDR1, CALLER_ADDR2, flags);
275280

276281
out_locked:
277-
spin_unlock(&wakeup_lock);
282+
__raw_spin_unlock(&wakeup_lock);
278283
out:
279284
atomic_dec(&tr->data[cpu]->disabled);
280285
}

0 commit comments

Comments
 (0)