Skip to content

Commit 12310e3

Browse files
Jessica YuIngo Molnar
authored andcommitted
kprobes: Propagate error from arm_kprobe_ftrace()
Improve error handling when arming ftrace-based kprobes. Specifically, if we fail to arm a ftrace-based kprobe, register_kprobe()/enable_kprobe() should report an error instead of success. Previously, this has lead to confusing situations where register_kprobe() would return 0 indicating success, but the kprobe would not be functional if ftrace registration during the kprobe arming process had failed. We should therefore take any errors returned by ftrace into account and propagate this error so that we do not register/enable kprobes that cannot be armed. This can happen if, for example, register_ftrace_function() finds an IPMODIFY conflict (since kprobe_ftrace_ops has this flag set) and returns an error. Such a conflict is possible since livepatches also set the IPMODIFY flag for their ftrace_ops. arm_all_kprobes() keeps its current behavior and attempts to arm all kprobes. It returns the last encountered error and gives a warning if not all probes could be armed. This patch is based on Petr Mladek's original patchset (patches 2 and 3) back in 2015, which improved kprobes error handling, found here: https://lkml.org/lkml/2015/2/26/452 However, further work on this had been paused since then and the patches were not upstreamed. Based-on-patches-by: Petr Mladek <[email protected]> Signed-off-by: Jessica Yu <[email protected]> Acked-by: Masami Hiramatsu <[email protected]> Cc: Ananth N Mavinakayanahalli <[email protected]> Cc: Anil S Keshavamurthy <[email protected]> Cc: David S . Miller <[email protected]> Cc: Jiri Kosina <[email protected]> Cc: Joe Lawrence <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Miroslav Benes <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Petr Mladek <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 3f9e646 commit 12310e3

File tree

1 file changed

+75
-25
lines changed

1 file changed

+75
-25
lines changed

kernel/kprobes.c

Lines changed: 75 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -978,18 +978,36 @@ static int prepare_kprobe(struct kprobe *p)
978978
}
979979

980980
/* Caller must lock kprobe_mutex */
981-
static void arm_kprobe_ftrace(struct kprobe *p)
981+
static int arm_kprobe_ftrace(struct kprobe *p)
982982
{
983-
int ret;
983+
int ret = 0;
984984

985985
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
986986
(unsigned long)p->addr, 0, 0);
987-
WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
988-
kprobe_ftrace_enabled++;
989-
if (kprobe_ftrace_enabled == 1) {
987+
if (ret) {
988+
pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
989+
return ret;
990+
}
991+
992+
if (kprobe_ftrace_enabled == 0) {
990993
ret = register_ftrace_function(&kprobe_ftrace_ops);
991-
WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
994+
if (ret) {
995+
pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
996+
goto err_ftrace;
997+
}
992998
}
999+
1000+
kprobe_ftrace_enabled++;
1001+
return ret;
1002+
1003+
err_ftrace:
1004+
/*
1005+
* Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
1006+
* non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
1007+
* empty filter_hash which would undesirably trace all functions.
1008+
*/
1009+
ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
1010+
return ret;
9931011
}
9941012

9951013
/* Caller must lock kprobe_mutex */
@@ -1008,22 +1026,23 @@ static void disarm_kprobe_ftrace(struct kprobe *p)
10081026
}
10091027
#else /* !CONFIG_KPROBES_ON_FTRACE */
10101028
#define prepare_kprobe(p) arch_prepare_kprobe(p)
1011-
#define arm_kprobe_ftrace(p) do {} while (0)
1029+
#define arm_kprobe_ftrace(p) (-ENODEV)
10121030
#define disarm_kprobe_ftrace(p) do {} while (0)
10131031
#endif
10141032

10151033
/* Arm a kprobe with text_mutex */
1016-
static void arm_kprobe(struct kprobe *kp)
1034+
static int arm_kprobe(struct kprobe *kp)
10171035
{
1018-
if (unlikely(kprobe_ftrace(kp))) {
1019-
arm_kprobe_ftrace(kp);
1020-
return;
1021-
}
1036+
if (unlikely(kprobe_ftrace(kp)))
1037+
return arm_kprobe_ftrace(kp);
1038+
10221039
cpus_read_lock();
10231040
mutex_lock(&text_mutex);
10241041
__arm_kprobe(kp);
10251042
mutex_unlock(&text_mutex);
10261043
cpus_read_unlock();
1044+
1045+
return 0;
10271046
}
10281047

10291048
/* Disarm a kprobe with text_mutex */
@@ -1362,9 +1381,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
13621381

13631382
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
13641383
ap->flags &= ~KPROBE_FLAG_DISABLED;
1365-
if (!kprobes_all_disarmed)
1384+
if (!kprobes_all_disarmed) {
13661385
/* Arm the breakpoint again. */
1367-
arm_kprobe(ap);
1386+
ret = arm_kprobe(ap);
1387+
if (ret) {
1388+
ap->flags |= KPROBE_FLAG_DISABLED;
1389+
list_del_rcu(&p->list);
1390+
synchronize_sched();
1391+
}
1392+
}
13681393
}
13691394
return ret;
13701395
}
@@ -1573,8 +1598,14 @@ int register_kprobe(struct kprobe *p)
15731598
hlist_add_head_rcu(&p->hlist,
15741599
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
15751600

1576-
if (!kprobes_all_disarmed && !kprobe_disabled(p))
1577-
arm_kprobe(p);
1601+
if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1602+
ret = arm_kprobe(p);
1603+
if (ret) {
1604+
hlist_del_rcu(&p->hlist);
1605+
synchronize_sched();
1606+
goto out;
1607+
}
1608+
}
15781609

15791610
/* Try to optimize kprobe */
15801611
try_to_optimize_kprobe(p);
@@ -2116,7 +2147,9 @@ int enable_kprobe(struct kprobe *kp)
21162147

21172148
if (!kprobes_all_disarmed && kprobe_disabled(p)) {
21182149
p->flags &= ~KPROBE_FLAG_DISABLED;
2119-
arm_kprobe(p);
2150+
ret = arm_kprobe(p);
2151+
if (ret)
2152+
p->flags |= KPROBE_FLAG_DISABLED;
21202153
}
21212154
out:
21222155
mutex_unlock(&kprobe_mutex);
@@ -2407,11 +2440,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
24072440
.release = seq_release,
24082441
};
24092442

2410-
static void arm_all_kprobes(void)
2443+
static int arm_all_kprobes(void)
24112444
{
24122445
struct hlist_head *head;
24132446
struct kprobe *p;
2414-
unsigned int i;
2447+
unsigned int i, total = 0, errors = 0;
2448+
int err, ret = 0;
24152449

24162450
mutex_lock(&kprobe_mutex);
24172451

@@ -2428,16 +2462,28 @@ static void arm_all_kprobes(void)
24282462
/* Arming kprobes doesn't optimize kprobe itself */
24292463
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
24302464
head = &kprobe_table[i];
2431-
hlist_for_each_entry_rcu(p, head, hlist)
2432-
if (!kprobe_disabled(p))
2433-
arm_kprobe(p);
2465+
/* Arm all kprobes on a best-effort basis */
2466+
hlist_for_each_entry_rcu(p, head, hlist) {
2467+
if (!kprobe_disabled(p)) {
2468+
err = arm_kprobe(p);
2469+
if (err) {
2470+
errors++;
2471+
ret = err;
2472+
}
2473+
total++;
2474+
}
2475+
}
24342476
}
24352477

2436-
printk(KERN_INFO "Kprobes globally enabled\n");
2478+
if (errors)
2479+
pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2480+
errors, total);
2481+
else
2482+
pr_info("Kprobes globally enabled\n");
24372483

24382484
already_enabled:
24392485
mutex_unlock(&kprobe_mutex);
2440-
return;
2486+
return ret;
24412487
}
24422488

24432489
static void disarm_all_kprobes(void)
@@ -2494,6 +2540,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
24942540
{
24952541
char buf[32];
24962542
size_t buf_size;
2543+
int ret = 0;
24972544

24982545
buf_size = min(count, (sizeof(buf)-1));
24992546
if (copy_from_user(buf, user_buf, buf_size))
@@ -2504,7 +2551,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
25042551
case 'y':
25052552
case 'Y':
25062553
case '1':
2507-
arm_all_kprobes();
2554+
ret = arm_all_kprobes();
25082555
break;
25092556
case 'n':
25102557
case 'N':
@@ -2515,6 +2562,9 @@ static ssize_t write_enabled_file_bool(struct file *file,
25152562
return -EINVAL;
25162563
}
25172564

2565+
if (ret)
2566+
return ret;
2567+
25182568
return count;
25192569
}
25202570

0 commit comments

Comments
 (0)