Skip to content

Commit ae6aa16

Browse files
mhiramathitachirostedt
authored andcommitted
kprobes: introduce ftrace based optimization
Introduce function trace based kprobes optimization. With using ftrace optimization, kprobes on the mcount calling address, use ftrace's mcount call instead of breakpoint. Furthermore, this optimization works with preemptive kernel not like as current jump-based optimization. Of cource, this feature works only if the probe is on mcount call. Only if kprobe.break_handler is set, that probe is not optimized with ftrace (nor put on ftrace). The reason why this limitation comes is that this break_handler may be used only from jprobes which changes ip address (for fetching the function arguments), but function tracer ignores modified ip address. Changes in v2: - Fix ftrace_ops registering right after setting its filter. - Unregister ftrace_ops if there is no kprobe using. - Remove notrace dependency from __kprobes macro. Link: http://lkml.kernel.org/r/[email protected] Cc: Thomas Gleixner <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ananth N Mavinakayanahalli <[email protected]> Cc: "Frank Ch. Eigler" <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Frederic Weisbecker <[email protected]> Signed-off-by: Masami Hiramatsu <[email protected]> Signed-off-by: Steven Rostedt <[email protected]>
1 parent 4dc9367 commit ae6aa16

File tree

2 files changed

+119
-13
lines changed

2 files changed

+119
-13
lines changed

include/linux/kprobes.h

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
#include <linux/spinlock.h>
3939
#include <linux/rcupdate.h>
4040
#include <linux/mutex.h>
41+
#include <linux/ftrace.h>
4142

4243
#ifdef CONFIG_KPROBES
4344
#include <asm/kprobes.h>
@@ -48,14 +49,26 @@
4849
#define KPROBE_REENTER 0x00000004
4950
#define KPROBE_HIT_SSDONE 0x00000008
5051

52+
/*
53+
* If function tracer is enabled and the arch supports full
54+
* passing of pt_regs to function tracing, then kprobes can
55+
* optimize on top of function tracing.
56+
*/
57+
#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \
58+
&& defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE)
59+
# define KPROBES_CAN_USE_FTRACE
60+
#endif
61+
5162
/* Attach to insert probes on any functions which should be ignored*/
5263
#define __kprobes __attribute__((__section__(".kprobes.text")))
64+
5365
#else /* CONFIG_KPROBES */
5466
typedef int kprobe_opcode_t;
5567
struct arch_specific_insn {
5668
int dummy;
5769
};
5870
#define __kprobes
71+
5972
#endif /* CONFIG_KPROBES */
6073

6174
struct kprobe;
@@ -128,6 +141,7 @@ struct kprobe {
128141
* NOTE:
129142
* this flag is only for optimized_kprobe.
130143
*/
144+
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
131145

132146
/* Has this kprobe gone ? */
133147
static inline int kprobe_gone(struct kprobe *p)
@@ -146,6 +160,13 @@ static inline int kprobe_optimized(struct kprobe *p)
146160
{
147161
return p->flags & KPROBE_FLAG_OPTIMIZED;
148162
}
163+
164+
/* Is this kprobe uses ftrace ? */
165+
static inline int kprobe_ftrace(struct kprobe *p)
166+
{
167+
return p->flags & KPROBE_FLAG_FTRACE;
168+
}
169+
149170
/*
150171
* Special probe type that uses setjmp-longjmp type tricks to resume
151172
* execution at a specified entry with a matching prototype corresponding
@@ -295,6 +316,12 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
295316
#endif
296317

297318
#endif /* CONFIG_OPTPROBES */
319+
#ifdef KPROBES_CAN_USE_FTRACE
320+
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
321+
struct pt_regs *regs);
322+
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
323+
#endif
324+
298325

299326
/* Get the kprobe at this addr (if any) - called with preemption disabled */
300327
struct kprobe *get_kprobe(void *addr);

kernel/kprobes.c

Lines changed: 92 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -759,6 +759,10 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
759759
struct kprobe *ap;
760760
struct optimized_kprobe *op;
761761

762+
/* Impossible to optimize ftrace-based kprobe */
763+
if (kprobe_ftrace(p))
764+
return;
765+
762766
/* For preparing optimization, jump_label_text_reserved() is called */
763767
jump_label_lock();
764768
mutex_lock(&text_mutex);
@@ -915,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
915919
}
916920
#endif /* CONFIG_OPTPROBES */
917921

922+
#ifdef KPROBES_CAN_USE_FTRACE
923+
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
924+
.regs_func = kprobe_ftrace_handler,
925+
.flags = FTRACE_OPS_FL_SAVE_REGS,
926+
};
927+
static int kprobe_ftrace_enabled;
928+
929+
/* Must ensure p->addr is really on ftrace */
930+
static int __kprobes prepare_kprobe(struct kprobe *p)
931+
{
932+
if (!kprobe_ftrace(p))
933+
return arch_prepare_kprobe(p);
934+
935+
return arch_prepare_kprobe_ftrace(p);
936+
}
937+
938+
/* Caller must lock kprobe_mutex */
939+
static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
940+
{
941+
int ret;
942+
943+
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
944+
(unsigned long)p->addr, 0, 0);
945+
WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
946+
kprobe_ftrace_enabled++;
947+
if (kprobe_ftrace_enabled == 1) {
948+
ret = register_ftrace_function(&kprobe_ftrace_ops);
949+
WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
950+
}
951+
}
952+
953+
/* Caller must lock kprobe_mutex */
954+
static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
955+
{
956+
int ret;
957+
958+
kprobe_ftrace_enabled--;
959+
if (kprobe_ftrace_enabled == 0) {
960+
ret = unregister_ftrace_function(&kprobe_ftrace_ops);
961+
WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
962+
}
963+
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
964+
(unsigned long)p->addr, 1, 0);
965+
WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
966+
}
967+
#else /* !KPROBES_CAN_USE_FTRACE */
968+
#define prepare_kprobe(p) arch_prepare_kprobe(p)
969+
#define arm_kprobe_ftrace(p) do {} while (0)
970+
#define disarm_kprobe_ftrace(p) do {} while (0)
971+
#endif
972+
918973
/* Arm a kprobe with text_mutex */
919974
static void __kprobes arm_kprobe(struct kprobe *kp)
920975
{
976+
if (unlikely(kprobe_ftrace(kp))) {
977+
arm_kprobe_ftrace(kp);
978+
return;
979+
}
921980
/*
922981
* Here, since __arm_kprobe() doesn't use stop_machine(),
923982
* this doesn't cause deadlock on text_mutex. So, we don't
@@ -929,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
929988
}
930989

931990
/* Disarm a kprobe with text_mutex */
932-
static void __kprobes disarm_kprobe(struct kprobe *kp)
991+
static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
933992
{
993+
if (unlikely(kprobe_ftrace(kp))) {
994+
disarm_kprobe_ftrace(kp);
995+
return;
996+
}
934997
/* Ditto */
935998
mutex_lock(&text_mutex);
936-
__disarm_kprobe(kp, true);
999+
__disarm_kprobe(kp, reopt);
9371000
mutex_unlock(&text_mutex);
9381001
}
9391002

@@ -1343,14 +1406,33 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
13431406
struct module **probed_mod)
13441407
{
13451408
int ret = 0;
1409+
unsigned long ftrace_addr;
1410+
1411+
/*
1412+
* If the address is located on a ftrace nop, set the
1413+
* breakpoint to the following instruction.
1414+
*/
1415+
ftrace_addr = ftrace_location((unsigned long)p->addr);
1416+
if (ftrace_addr) {
1417+
#ifdef KPROBES_CAN_USE_FTRACE
1418+
/* Given address is not on the instruction boundary */
1419+
if ((unsigned long)p->addr != ftrace_addr)
1420+
return -EILSEQ;
1421+
/* break_handler (jprobe) can not work with ftrace */
1422+
if (p->break_handler)
1423+
return -EINVAL;
1424+
p->flags |= KPROBE_FLAG_FTRACE;
1425+
#else /* !KPROBES_CAN_USE_FTRACE */
1426+
return -EINVAL;
1427+
#endif
1428+
}
13461429

13471430
jump_label_lock();
13481431
preempt_disable();
13491432

13501433
/* Ensure it is not in reserved area nor out of text */
13511434
if (!kernel_text_address((unsigned long) p->addr) ||
13521435
in_kprobes_functions((unsigned long) p->addr) ||
1353-
ftrace_text_reserved(p->addr, p->addr) ||
13541436
jump_label_text_reserved(p->addr, p->addr)) {
13551437
ret = -EINVAL;
13561438
goto out;
@@ -1422,7 +1504,7 @@ int __kprobes register_kprobe(struct kprobe *p)
14221504
}
14231505

14241506
mutex_lock(&text_mutex); /* Avoiding text modification */
1425-
ret = arch_prepare_kprobe(p);
1507+
ret = prepare_kprobe(p);
14261508
mutex_unlock(&text_mutex);
14271509
if (ret)
14281510
goto out;
@@ -1480,7 +1562,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
14801562

14811563
/* Try to disarm and disable this/parent probe */
14821564
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1483-
disarm_kprobe(orig_p);
1565+
disarm_kprobe(orig_p, true);
14841566
orig_p->flags |= KPROBE_FLAG_DISABLED;
14851567
}
14861568
}
@@ -2078,10 +2160,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
20782160

20792161
if (!pp)
20802162
pp = p;
2081-
seq_printf(pi, "%s%s%s\n",
2163+
seq_printf(pi, "%s%s%s%s\n",
20822164
(kprobe_gone(p) ? "[GONE]" : ""),
20832165
((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2084-
(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
2166+
(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2167+
(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
20852168
}
20862169

20872170
static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -2160,14 +2243,12 @@ static void __kprobes arm_all_kprobes(void)
21602243
goto already_enabled;
21612244

21622245
/* Arming kprobes doesn't optimize kprobe itself */
2163-
mutex_lock(&text_mutex);
21642246
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
21652247
head = &kprobe_table[i];
21662248
hlist_for_each_entry_rcu(p, node, head, hlist)
21672249
if (!kprobe_disabled(p))
2168-
__arm_kprobe(p);
2250+
arm_kprobe(p);
21692251
}
2170-
mutex_unlock(&text_mutex);
21712252

21722253
kprobes_all_disarmed = false;
21732254
printk(KERN_INFO "Kprobes globally enabled\n");
@@ -2195,15 +2276,13 @@ static void __kprobes disarm_all_kprobes(void)
21952276
kprobes_all_disarmed = true;
21962277
printk(KERN_INFO "Kprobes globally disabled\n");
21972278

2198-
mutex_lock(&text_mutex);
21992279
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
22002280
head = &kprobe_table[i];
22012281
hlist_for_each_entry_rcu(p, node, head, hlist) {
22022282
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2203-
__disarm_kprobe(p, false);
2283+
disarm_kprobe(p, false);
22042284
}
22052285
}
2206-
mutex_unlock(&text_mutex);
22072286
mutex_unlock(&kprobe_mutex);
22082287

22092288
/* Wait for disarming all kprobes by optimizer */

0 commit comments

Comments
 (0)