@@ -759,6 +759,10 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
759759 struct kprobe * ap ;
760760 struct optimized_kprobe * op ;
761761
762+ /* Impossible to optimize ftrace-based kprobe */
763+ if (kprobe_ftrace (p ))
764+ return ;
765+
762766 /* For preparing optimization, jump_label_text_reserved() is called */
763767 jump_label_lock ();
764768 mutex_lock (& text_mutex );
@@ -915,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
915919}
916920#endif /* CONFIG_OPTPROBES */
917921
922+ #ifdef KPROBES_CAN_USE_FTRACE
923+ static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
924+ .regs_func = kprobe_ftrace_handler ,
925+ .flags = FTRACE_OPS_FL_SAVE_REGS ,
926+ };
927+ static int kprobe_ftrace_enabled ;
928+
929+ /* Must ensure p->addr is really on ftrace */
930+ static int __kprobes prepare_kprobe (struct kprobe * p )
931+ {
932+ if (!kprobe_ftrace (p ))
933+ return arch_prepare_kprobe (p );
934+
935+ return arch_prepare_kprobe_ftrace (p );
936+ }
937+
938+ /* Caller must lock kprobe_mutex */
939+ static void __kprobes arm_kprobe_ftrace (struct kprobe * p )
940+ {
941+ int ret ;
942+
943+ ret = ftrace_set_filter_ip (& kprobe_ftrace_ops ,
944+ (unsigned long )p -> addr , 0 , 0 );
945+ WARN (ret < 0 , "Failed to arm kprobe-ftrace at %p (%d)\n" , p -> addr , ret );
946+ kprobe_ftrace_enabled ++ ;
947+ if (kprobe_ftrace_enabled == 1 ) {
948+ ret = register_ftrace_function (& kprobe_ftrace_ops );
949+ WARN (ret < 0 , "Failed to init kprobe-ftrace (%d)\n" , ret );
950+ }
951+ }
952+
953+ /* Caller must lock kprobe_mutex */
954+ static void __kprobes disarm_kprobe_ftrace (struct kprobe * p )
955+ {
956+ int ret ;
957+
958+ kprobe_ftrace_enabled -- ;
959+ if (kprobe_ftrace_enabled == 0 ) {
960+ ret = unregister_ftrace_function (& kprobe_ftrace_ops );
961+ WARN (ret < 0 , "Failed to init kprobe-ftrace (%d)\n" , ret );
962+ }
963+ ret = ftrace_set_filter_ip (& kprobe_ftrace_ops ,
964+ (unsigned long )p -> addr , 1 , 0 );
965+ WARN (ret < 0 , "Failed to disarm kprobe-ftrace at %p (%d)\n" , p -> addr , ret );
966+ }
967+ #else /* !KPROBES_CAN_USE_FTRACE */
968+ #define prepare_kprobe (p ) arch_prepare_kprobe(p)
969+ #define arm_kprobe_ftrace (p ) do {} while (0)
970+ #define disarm_kprobe_ftrace (p ) do {} while (0)
971+ #endif
972+
918973/* Arm a kprobe with text_mutex */
919974static void __kprobes arm_kprobe (struct kprobe * kp )
920975{
976+ if (unlikely (kprobe_ftrace (kp ))) {
977+ arm_kprobe_ftrace (kp );
978+ return ;
979+ }
921980 /*
922981 * Here, since __arm_kprobe() doesn't use stop_machine(),
923982 * this doesn't cause deadlock on text_mutex. So, we don't
@@ -929,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
929988}
930989
931990/* Disarm a kprobe with text_mutex */
932- static void __kprobes disarm_kprobe (struct kprobe * kp )
991+ static void __kprobes disarm_kprobe (struct kprobe * kp , bool reopt )
933992{
993+ if (unlikely (kprobe_ftrace (kp ))) {
994+ disarm_kprobe_ftrace (kp );
995+ return ;
996+ }
934997 /* Ditto */
935998 mutex_lock (& text_mutex );
936- __disarm_kprobe (kp , true );
999+ __disarm_kprobe (kp , reopt );
9371000 mutex_unlock (& text_mutex );
9381001}
9391002
@@ -1343,14 +1406,33 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
13431406 struct module * * probed_mod )
13441407{
13451408 int ret = 0 ;
1409+ unsigned long ftrace_addr ;
1410+
1411+ /*
1412+ * If the address is located on a ftrace nop, set the
1413+ * breakpoint to the following instruction.
1414+ */
1415+ ftrace_addr = ftrace_location ((unsigned long )p -> addr );
1416+ if (ftrace_addr ) {
1417+ #ifdef KPROBES_CAN_USE_FTRACE
1418+ /* Given address is not on the instruction boundary */
1419+ if ((unsigned long )p -> addr != ftrace_addr )
1420+ return - EILSEQ ;
1421+ /* break_handler (jprobe) can not work with ftrace */
1422+ if (p -> break_handler )
1423+ return - EINVAL ;
1424+ p -> flags |= KPROBE_FLAG_FTRACE ;
1425+ #else /* !KPROBES_CAN_USE_FTRACE */
1426+ return - EINVAL ;
1427+ #endif
1428+ }
13461429
13471430 jump_label_lock ();
13481431 preempt_disable ();
13491432
13501433 /* Ensure it is not in reserved area nor out of text */
13511434 if (!kernel_text_address ((unsigned long ) p -> addr ) ||
13521435 in_kprobes_functions ((unsigned long ) p -> addr ) ||
1353- ftrace_text_reserved (p -> addr , p -> addr ) ||
13541436 jump_label_text_reserved (p -> addr , p -> addr )) {
13551437 ret = - EINVAL ;
13561438 goto out ;
@@ -1422,7 +1504,7 @@ int __kprobes register_kprobe(struct kprobe *p)
14221504 }
14231505
14241506 mutex_lock (& text_mutex ); /* Avoiding text modification */
1425- ret = arch_prepare_kprobe (p );
1507+ ret = prepare_kprobe (p );
14261508 mutex_unlock (& text_mutex );
14271509 if (ret )
14281510 goto out ;
@@ -1480,7 +1562,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
14801562
14811563 /* Try to disarm and disable this/parent probe */
14821564 if (p == orig_p || aggr_kprobe_disabled (orig_p )) {
1483- disarm_kprobe (orig_p );
1565+ disarm_kprobe (orig_p , true );
14841566 orig_p -> flags |= KPROBE_FLAG_DISABLED ;
14851567 }
14861568 }
@@ -2078,10 +2160,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
20782160
20792161 if (!pp )
20802162 pp = p ;
2081- seq_printf (pi , "%s%s%s\n" ,
2163+ seq_printf (pi , "%s%s%s%s \n" ,
20822164 (kprobe_gone (p ) ? "[GONE]" : "" ),
20832165 ((kprobe_disabled (p ) && !kprobe_gone (p )) ? "[DISABLED]" : "" ),
2084- (kprobe_optimized (pp ) ? "[OPTIMIZED]" : "" ));
2166+ (kprobe_optimized (pp ) ? "[OPTIMIZED]" : "" ),
2167+ (kprobe_ftrace (pp ) ? "[FTRACE]" : "" ));
20852168}
20862169
20872170static void __kprobes * kprobe_seq_start (struct seq_file * f , loff_t * pos )
@@ -2160,14 +2243,12 @@ static void __kprobes arm_all_kprobes(void)
21602243 goto already_enabled ;
21612244
21622245 /* Arming kprobes doesn't optimize kprobe itself */
2163- mutex_lock (& text_mutex );
21642246 for (i = 0 ; i < KPROBE_TABLE_SIZE ; i ++ ) {
21652247 head = & kprobe_table [i ];
21662248 hlist_for_each_entry_rcu (p , node , head , hlist )
21672249 if (!kprobe_disabled (p ))
2168- __arm_kprobe (p );
2250+ arm_kprobe (p );
21692251 }
2170- mutex_unlock (& text_mutex );
21712252
21722253 kprobes_all_disarmed = false;
21732254 printk (KERN_INFO "Kprobes globally enabled\n" );
@@ -2195,15 +2276,13 @@ static void __kprobes disarm_all_kprobes(void)
21952276 kprobes_all_disarmed = true;
21962277 printk (KERN_INFO "Kprobes globally disabled\n" );
21972278
2198- mutex_lock (& text_mutex );
21992279 for (i = 0 ; i < KPROBE_TABLE_SIZE ; i ++ ) {
22002280 head = & kprobe_table [i ];
22012281 hlist_for_each_entry_rcu (p , node , head , hlist ) {
22022282 if (!arch_trampoline_kprobe (p ) && !kprobe_disabled (p ))
2203- __disarm_kprobe (p , false);
2283+ disarm_kprobe (p , false);
22042284 }
22052285 }
2206- mutex_unlock (& text_mutex );
22072286 mutex_unlock (& kprobe_mutex );
22082287
22092288 /* Wait for disarming all kprobes by optimizer */
0 commit comments