Skip to content

Commit 12cce59

Browse files
committed
ftrace/x86: Allow !CONFIG_PREEMPT dynamic ops to use allocated trampolines
When the static ftrace_ops (like function tracer) enables tracing, and it is the only callback that is referencing a function, a trampoline is dynamically allocated to the function that calls the callback directly instead of calling a loop function that iterates over all the registered ftrace ops (if more than one ops is registered). But when it comes to dynamically allocated ftrace_ops, where they may be freed, on a CONFIG_PREEMPT kernel there's no way to know when it is safe to free the trampoline. If a task was preempted while executing on the trampoline, there's currently no way to know when it will be off that trampoline. But this is not true when it comes to !CONFIG_PREEMPT. The current method of calling schedule_on_each_cpu() will force tasks off the trampoline, becaues they can not schedule while on it (kernel preemption is not configured). That means it is safe to free a dynamically allocated ftrace ops trampoline when CONFIG_PREEMPT is not configured. Cc: H. Peter Anvin <[email protected]> Cc: Paul E. McKenney <[email protected]> Acked-by: Borislav Petkov <[email protected]> Tested-by: Masami Hiramatsu <[email protected]> Tested-by: Jiri Kosina <[email protected]> Signed-off-by: Steven Rostedt <[email protected]>
1 parent 15d5b02 commit 12cce59

File tree

2 files changed

+26
-0
lines changed

2 files changed

+26
-0
lines changed

arch/x86/kernel/ftrace.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -913,6 +913,14 @@ void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec
913913
return addr_from_call((void *)ops->trampoline + offset);
914914
}
915915

916+
void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
917+
{
918+
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
919+
return;
920+
921+
tramp_free((void *)ops->trampoline);
922+
ops->trampoline = 0;
923+
}
916924

917925
#endif /* CONFIG_X86_64 */
918926
#endif /* CONFIG_DYNAMIC_FTRACE */

kernel/trace/ftrace.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2324,6 +2324,10 @@ static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
23242324
static ftrace_func_t saved_ftrace_func;
23252325
static int ftrace_start_up;
23262326

2327+
void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2328+
{
2329+
}
2330+
23272331
static void control_ops_free(struct ftrace_ops *ops)
23282332
{
23292333
free_percpu(ops->disabled);
@@ -2475,6 +2479,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
24752479
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
24762480
schedule_on_each_cpu(ftrace_sync);
24772481

2482+
arch_ftrace_trampoline_free(ops);
2483+
24782484
if (ops->flags & FTRACE_OPS_FL_CONTROL)
24792485
control_ops_free(ops);
24802486
}
@@ -4725,9 +4731,21 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
47254731

47264732
static void ftrace_update_trampoline(struct ftrace_ops *ops)
47274733
{
4734+
4735+
/*
4736+
* Currently there's no safe way to free a trampoline when the kernel
4737+
* is configured with PREEMPT. That is because a task could be preempted
4738+
* when it jumped to the trampoline, it may be preempted for a long time
4739+
* depending on the system load, and currently there's no way to know
4740+
* when it will be off the trampoline. If the trampoline is freed
4741+
* too early, when the task runs again, it will be executing on freed
4742+
* memory and crash.
4743+
*/
4744+
#ifdef CONFIG_PREEMPT
47284745
/* Currently, only non dynamic ops can have a trampoline */
47294746
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
47304747
return;
4748+
#endif
47314749

47324750
arch_ftrace_update_trampoline(ops);
47334751
}

0 commit comments

Comments
 (0)