@@ -1284,6 +1284,8 @@ void trace_dump_stack(void)
12841284 __ftrace_trace_stack (global_trace .buffer , flags , 3 , preempt_count ());
12851285}
12861286
1287+ static DEFINE_PER_CPU (int , user_stack_count ) ;
1288+
12871289void
12881290ftrace_trace_userstack (struct ring_buffer * buffer , unsigned long flags , int pc )
12891291{
@@ -1302,6 +1304,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
13021304 if (unlikely (in_nmi ()))
13031305 return ;
13041306
1307+ /*
1308+ * prevent recursion, since the user stack tracing may
1309+ * trigger other kernel events.
1310+ */
1311+ preempt_disable ();
1312+ if (__this_cpu_read (user_stack_count ))
1313+ goto out ;
1314+
1315+ __this_cpu_inc (user_stack_count );
1316+
1317+
1318+
13051319 event = trace_buffer_lock_reserve (buffer , TRACE_USER_STACK ,
13061320 sizeof (* entry ), flags , pc );
13071321 if (!event )
@@ -1319,6 +1333,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
13191333 save_stack_trace_user (& trace );
13201334 if (!filter_check_discard (call , entry , buffer , event ))
13211335 ring_buffer_unlock_commit (buffer , event );
1336+
1337+ __this_cpu_dec (user_stack_count );
1338+
1339+ out :
1340+ preempt_enable ();
13221341}
13231342
13241343#ifdef UNUSED
0 commit comments