@@ -91,6 +91,7 @@ static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
9191ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub ;
9292ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub ;
9393ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub ;
94+ static struct ftrace_ops global_ops ;
9495
9596/*
9697 * Traverse the ftrace_list, invoking all entries. The reason that we
@@ -153,7 +154,7 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
153154}
154155#endif
155156
156- static void update_ftrace_function (void )
157+ static void update_global_ops (void )
157158{
158159 ftrace_func_t func ;
159160
@@ -173,6 +174,18 @@ static void update_ftrace_function(void)
173174 set_ftrace_pid_function (func );
174175 func = ftrace_pid_func ;
175176 }
177+
178+ global_ops .func = func ;
179+ }
180+
181+ static void update_ftrace_function (void )
182+ {
183+ ftrace_func_t func ;
184+
185+ update_global_ops ();
186+
187+ func = global_ops .func ;
188+
176189#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
177190 ftrace_trace_function = func ;
178191#else
@@ -181,46 +194,70 @@ static void update_ftrace_function(void)
181194#endif
182195}
183196
184- static int __register_ftrace_function ( struct ftrace_ops * ops )
197+ static void add_ftrace_ops ( struct ftrace_ops * * list , struct ftrace_ops * ops )
185198{
186- ops -> next = ftrace_list ;
199+ ops -> next = * list ;
187200 /*
188201 * We are entering ops into the ftrace_list but another
189202 * CPU might be walking that list. We need to make sure
190203 * the ops->next pointer is valid before another CPU sees
191204 * the ops pointer included into the ftrace_list.
192205 */
193- rcu_assign_pointer (ftrace_list , ops );
194-
195- if (ftrace_enabled )
196- update_ftrace_function ();
197-
198- return 0 ;
206+ rcu_assign_pointer (* list , ops );
199207}
200208
201- static int __unregister_ftrace_function ( struct ftrace_ops * ops )
209+ static int remove_ftrace_ops ( struct ftrace_ops * * list , struct ftrace_ops * ops )
202210{
203211 struct ftrace_ops * * p ;
204212
205213 /*
206214 * If we are removing the last function, then simply point
207215 * to the ftrace_stub.
208216 */
209- if (ftrace_list == ops && ops -> next == & ftrace_list_end ) {
210- ftrace_trace_function = ftrace_stub ;
211- ftrace_list = & ftrace_list_end ;
217+ if (* list == ops && ops -> next == & ftrace_list_end ) {
218+ * list = & ftrace_list_end ;
212219 return 0 ;
213220 }
214221
215- for (p = & ftrace_list ; * p != & ftrace_list_end ; p = & (* p )-> next )
222+ for (p = list ; * p != & ftrace_list_end ; p = & (* p )-> next )
216223 if (* p == ops )
217224 break ;
218225
219226 if (* p != ops )
220227 return -1 ;
221228
222229 * p = (* p )-> next ;
230+ return 0 ;
231+ }
232+
233+ static int __register_ftrace_function (struct ftrace_ops * ops )
234+ {
235+ if (ftrace_disabled )
236+ return - ENODEV ;
237+
238+ if (FTRACE_WARN_ON (ops == & global_ops ))
239+ return - EINVAL ;
240+
241+ add_ftrace_ops (& ftrace_list , ops );
242+ if (ftrace_enabled )
243+ update_ftrace_function ();
244+
245+ return 0 ;
246+ }
223247
248+ static int __unregister_ftrace_function (struct ftrace_ops * ops )
249+ {
250+ int ret ;
251+
252+ if (ftrace_disabled )
253+ return - ENODEV ;
254+
255+ if (FTRACE_WARN_ON (ops == & global_ops ))
256+ return - EINVAL ;
257+
258+ ret = remove_ftrace_ops (& ftrace_list , ops );
259+ if (ret < 0 )
260+ return ret ;
224261 if (ftrace_enabled )
225262 update_ftrace_function ();
226263
@@ -894,7 +931,7 @@ enum {
894931 FTRACE_OPS_FL_ENABLED = 1 ,
895932};
896933
897- struct ftrace_ops global_ops = {
934+ static struct ftrace_ops global_ops = {
898935 .func = ftrace_stub ,
899936 .notrace_hash = EMPTY_HASH ,
900937 .filter_hash = EMPTY_HASH ,
@@ -3263,7 +3300,7 @@ void __init ftrace_init(void)
32633300
32643301#else
32653302
3266- struct ftrace_ops global_ops = {
3303+ static struct ftrace_ops global_ops = {
32673304 .func = ftrace_stub ,
32683305};
32693306
0 commit comments