4343#include <linux/seq_file.h>
4444#include <linux/debugfs.h>
4545#include <linux/kdebug.h>
46+
4647#include <asm-generic/sections.h>
4748#include <asm/cacheflush.h>
4849#include <asm/errno.h>
50+ #include <asm/uaccess.h>
4951
5052#define KPROBE_HASH_BITS 6
5153#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
@@ -64,6 +66,9 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
6466static struct hlist_head kretprobe_inst_table [KPROBE_TABLE_SIZE ];
6567static atomic_t kprobe_count ;
6668
69+ /* NOTE: change this value only with kprobe_mutex held */
70+ static bool kprobe_enabled ;
71+
6772DEFINE_MUTEX (kprobe_mutex ); /* Protects kprobe_table */
6873DEFINE_SPINLOCK (kretprobe_lock ); /* Protects kretprobe_inst_table */
6974static DEFINE_PER_CPU (struct kprobe * , kprobe_instance ) = NULL ;
@@ -564,12 +569,13 @@ static int __kprobes __register_kprobe(struct kprobe *p,
564569 hlist_add_head_rcu (& p -> hlist ,
565570 & kprobe_table [hash_ptr (p -> addr , KPROBE_HASH_BITS )]);
566571
567- if (atomic_add_return (1 , & kprobe_count ) == \
572+ if (kprobe_enabled ) {
573+ if (atomic_add_return (1 , & kprobe_count ) == \
568574 (ARCH_INACTIVE_KPROBE_COUNT + 1 ))
569- register_page_fault_notifier (& kprobe_page_fault_nb );
570-
571- arch_arm_kprobe (p );
575+ register_page_fault_notifier (& kprobe_page_fault_nb );
572576
577+ arch_arm_kprobe (p );
578+ }
573579out :
574580 mutex_unlock (& kprobe_mutex );
575581
@@ -607,8 +613,13 @@ void __kprobes unregister_kprobe(struct kprobe *p)
607613 if (old_p == p ||
608614 (old_p -> pre_handler == aggr_pre_handler &&
609615 p -> list .next == & old_p -> list && p -> list .prev == & old_p -> list )) {
610- /* Only probe on the hash list */
611- arch_disarm_kprobe (p );
616+ /*
617+ * Only probe on the hash list. Disarm only if kprobes are
618+ * enabled - otherwise, the breakpoint would already have
619+ * been removed. We save on flushing icache.
620+ */
621+ if (kprobe_enabled )
622+ arch_disarm_kprobe (p );
612623 hlist_del_rcu (& old_p -> hlist );
613624 cleanup_p = 1 ;
614625 } else {
@@ -797,6 +808,9 @@ static int __init init_kprobes(void)
797808 }
798809 atomic_set (& kprobe_count , 0 );
799810
811+ /* By default, kprobes are enabled */
812+ kprobe_enabled = true;
813+
800814 err = arch_init_kprobes ();
801815 if (!err )
802816 err = register_die_notifier (& kprobe_exceptions_nb );
@@ -806,7 +820,7 @@ static int __init init_kprobes(void)
806820
807821#ifdef CONFIG_DEBUG_FS
808822static void __kprobes report_probe (struct seq_file * pi , struct kprobe * p ,
809- const char * sym , int offset ,char * modname )
823+ const char * sym , int offset ,char * modname )
810824{
811825 char * kprobe_type ;
812826
@@ -885,9 +899,130 @@ static struct file_operations debugfs_kprobes_operations = {
885899 .release = seq_release ,
886900};
887901
902+ static void __kprobes enable_all_kprobes (void )
903+ {
904+ struct hlist_head * head ;
905+ struct hlist_node * node ;
906+ struct kprobe * p ;
907+ unsigned int i ;
908+
909+ mutex_lock (& kprobe_mutex );
910+
911+ /* If kprobes are already enabled, just return */
912+ if (kprobe_enabled )
913+ goto already_enabled ;
914+
915+ /*
916+ * Re-register the page fault notifier only if there are any
917+ * active probes at the time of enabling kprobes globally
918+ */
919+ if (atomic_read (& kprobe_count ) > ARCH_INACTIVE_KPROBE_COUNT )
920+ register_page_fault_notifier (& kprobe_page_fault_nb );
921+
922+ for (i = 0 ; i < KPROBE_TABLE_SIZE ; i ++ ) {
923+ head = & kprobe_table [i ];
924+ hlist_for_each_entry_rcu (p , node , head , hlist )
925+ arch_arm_kprobe (p );
926+ }
927+
928+ kprobe_enabled = true;
929+ printk (KERN_INFO "Kprobes globally enabled\n" );
930+
931+ already_enabled :
932+ mutex_unlock (& kprobe_mutex );
933+ return ;
934+ }
935+
936+ static void __kprobes disable_all_kprobes (void )
937+ {
938+ struct hlist_head * head ;
939+ struct hlist_node * node ;
940+ struct kprobe * p ;
941+ unsigned int i ;
942+
943+ mutex_lock (& kprobe_mutex );
944+
945+ /* If kprobes are already disabled, just return */
946+ if (!kprobe_enabled )
947+ goto already_disabled ;
948+
949+ kprobe_enabled = false;
950+ printk (KERN_INFO "Kprobes globally disabled\n" );
951+ for (i = 0 ; i < KPROBE_TABLE_SIZE ; i ++ ) {
952+ head = & kprobe_table [i ];
953+ hlist_for_each_entry_rcu (p , node , head , hlist ) {
954+ if (!arch_trampoline_kprobe (p ))
955+ arch_disarm_kprobe (p );
956+ }
957+ }
958+
959+ mutex_unlock (& kprobe_mutex );
960+ /* Allow all currently running kprobes to complete */
961+ synchronize_sched ();
962+
963+ mutex_lock (& kprobe_mutex );
964+ /* Unconditionally unregister the page_fault notifier */
965+ unregister_page_fault_notifier (& kprobe_page_fault_nb );
966+
967+ already_disabled :
968+ mutex_unlock (& kprobe_mutex );
969+ return ;
970+ }
971+
972+ /*
973+ * XXX: The debugfs bool file interface doesn't allow for callbacks
974+ * when the bool state is switched. We can reuse that facility when
975+ * available
976+ */
977+ static ssize_t read_enabled_file_bool (struct file * file ,
978+ char __user * user_buf , size_t count , loff_t * ppos )
979+ {
980+ char buf [3 ];
981+
982+ if (kprobe_enabled )
983+ buf [0 ] = '1' ;
984+ else
985+ buf [0 ] = '0' ;
986+ buf [1 ] = '\n' ;
987+ buf [2 ] = 0x00 ;
988+ return simple_read_from_buffer (user_buf , count , ppos , buf , 2 );
989+ }
990+
991+ static ssize_t write_enabled_file_bool (struct file * file ,
992+ const char __user * user_buf , size_t count , loff_t * ppos )
993+ {
994+ char buf [32 ];
995+ int buf_size ;
996+
997+ buf_size = min (count , (sizeof (buf )- 1 ));
998+ if (copy_from_user (buf , user_buf , buf_size ))
999+ return - EFAULT ;
1000+
1001+ switch (buf [0 ]) {
1002+ case 'y' :
1003+ case 'Y' :
1004+ case '1' :
1005+ enable_all_kprobes ();
1006+ break ;
1007+ case 'n' :
1008+ case 'N' :
1009+ case '0' :
1010+ disable_all_kprobes ();
1011+ break ;
1012+ }
1013+
1014+ return count ;
1015+ }
1016+
1017+ static struct file_operations fops_kp = {
1018+ .read = read_enabled_file_bool ,
1019+ .write = write_enabled_file_bool ,
1020+ };
1021+
8881022static int __kprobes debugfs_kprobe_init (void )
8891023{
8901024 struct dentry * dir , * file ;
1025+ unsigned int value = 1 ;
8911026
8921027 dir = debugfs_create_dir ("kprobes" , NULL );
8931028 if (!dir )
@@ -900,6 +1035,13 @@ static int __kprobes debugfs_kprobe_init(void)
9001035 return - ENOMEM ;
9011036 }
9021037
1038+ file = debugfs_create_file ("enabled" , 0600 , dir ,
1039+ & value , & fops_kp );
1040+ if (!file ) {
1041+ debugfs_remove (dir );
1042+ return - ENOMEM ;
1043+ }
1044+
9031045 return 0 ;
9041046}
9051047
0 commit comments