Skip to content

Commit 1a0799a

Browse files
committed
tracing/function-graph-tracer: Move graph event insertion helpers in the graph tracer file
The function graph events helpers which insert the function entry and return events into the ring buffer currently reside in trace.c But this file is quite overloaded and the right place for these helpers is in the function graph tracer file. Then move them to trace_functions_graph.c Signed-off-by: Frederic Weisbecker <[email protected]> Cc: Steven Rostedt <[email protected]>
1 parent 82e04af commit 1a0799a

File tree

4 files changed

+121
-113
lines changed

4 files changed

+121
-113
lines changed

kernel/trace/trace.c

Lines changed: 0 additions & 110 deletions
Original file line numberDiff line numberDiff line change
@@ -942,54 +942,6 @@ trace_function(struct trace_array *tr,
942942
ring_buffer_unlock_commit(tr->buffer, event);
943943
}
944944

945-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
946-
static int __trace_graph_entry(struct trace_array *tr,
947-
struct ftrace_graph_ent *trace,
948-
unsigned long flags,
949-
int pc)
950-
{
951-
struct ftrace_event_call *call = &event_funcgraph_entry;
952-
struct ring_buffer_event *event;
953-
struct ftrace_graph_ent_entry *entry;
954-
955-
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
956-
return 0;
957-
958-
event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
959-
sizeof(*entry), flags, pc);
960-
if (!event)
961-
return 0;
962-
entry = ring_buffer_event_data(event);
963-
entry->graph_ent = *trace;
964-
if (!filter_current_check_discard(call, entry, event))
965-
ring_buffer_unlock_commit(global_trace.buffer, event);
966-
967-
return 1;
968-
}
969-
970-
static void __trace_graph_return(struct trace_array *tr,
971-
struct ftrace_graph_ret *trace,
972-
unsigned long flags,
973-
int pc)
974-
{
975-
struct ftrace_event_call *call = &event_funcgraph_exit;
976-
struct ring_buffer_event *event;
977-
struct ftrace_graph_ret_entry *entry;
978-
979-
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
980-
return;
981-
982-
event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
983-
sizeof(*entry), flags, pc);
984-
if (!event)
985-
return;
986-
entry = ring_buffer_event_data(event);
987-
entry->ret = *trace;
988-
if (!filter_current_check_discard(call, entry, event))
989-
ring_buffer_unlock_commit(global_trace.buffer, event);
990-
}
991-
#endif
992-
993945
void
994946
ftrace(struct trace_array *tr, struct trace_array_cpu *data,
995947
unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -1129,68 +1081,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
11291081
local_irq_restore(flags);
11301082
}
11311083

1132-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1133-
int trace_graph_entry(struct ftrace_graph_ent *trace)
1134-
{
1135-
struct trace_array *tr = &global_trace;
1136-
struct trace_array_cpu *data;
1137-
unsigned long flags;
1138-
long disabled;
1139-
int ret;
1140-
int cpu;
1141-
int pc;
1142-
1143-
if (!ftrace_trace_task(current))
1144-
return 0;
1145-
1146-
if (!ftrace_graph_addr(trace->func))
1147-
return 0;
1148-
1149-
local_irq_save(flags);
1150-
cpu = raw_smp_processor_id();
1151-
data = tr->data[cpu];
1152-
disabled = atomic_inc_return(&data->disabled);
1153-
if (likely(disabled == 1)) {
1154-
pc = preempt_count();
1155-
ret = __trace_graph_entry(tr, trace, flags, pc);
1156-
} else {
1157-
ret = 0;
1158-
}
1159-
/* Only do the atomic if it is not already set */
1160-
if (!test_tsk_trace_graph(current))
1161-
set_tsk_trace_graph(current);
1162-
1163-
atomic_dec(&data->disabled);
1164-
local_irq_restore(flags);
1165-
1166-
return ret;
1167-
}
1168-
1169-
void trace_graph_return(struct ftrace_graph_ret *trace)
1170-
{
1171-
struct trace_array *tr = &global_trace;
1172-
struct trace_array_cpu *data;
1173-
unsigned long flags;
1174-
long disabled;
1175-
int cpu;
1176-
int pc;
1177-
1178-
local_irq_save(flags);
1179-
cpu = raw_smp_processor_id();
1180-
data = tr->data[cpu];
1181-
disabled = atomic_inc_return(&data->disabled);
1182-
if (likely(disabled == 1)) {
1183-
pc = preempt_count();
1184-
__trace_graph_return(tr, trace, flags, pc);
1185-
}
1186-
if (!trace->depth)
1187-
clear_tsk_trace_graph(current);
1188-
atomic_dec(&data->disabled);
1189-
local_irq_restore(flags);
1190-
}
1191-
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1192-
1193-
11941084
/**
11951085
* trace_vbprintk - write binary msg to tracing buffer
11961086
*

kernel/trace/trace.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -471,6 +471,7 @@ void trace_function(struct trace_array *tr,
471471

472472
void trace_graph_return(struct ftrace_graph_ret *trace);
473473
int trace_graph_entry(struct ftrace_graph_ent *trace);
474+
void set_graph_array(struct trace_array *tr);
474475

475476
void tracing_start_cmdline_record(void);
476477
void tracing_stop_cmdline_record(void);

kernel/trace/trace_functions_graph.c

Lines changed: 119 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ static struct tracer_flags tracer_flags = {
5252
.opts = trace_opts
5353
};
5454

55-
/* pid on the last trace processed */
55+
static struct trace_array *graph_array;
5656

5757

5858
/* Add a function return address to the trace stack on thread info.*/
@@ -166,17 +166,133 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
166166
return ret;
167167
}
168168

169+
static int __trace_graph_entry(struct trace_array *tr,
170+
struct ftrace_graph_ent *trace,
171+
unsigned long flags,
172+
int pc)
173+
{
174+
struct ftrace_event_call *call = &event_funcgraph_entry;
175+
struct ring_buffer_event *event;
176+
struct ftrace_graph_ent_entry *entry;
177+
178+
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
179+
return 0;
180+
181+
event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_ENT,
182+
sizeof(*entry), flags, pc);
183+
if (!event)
184+
return 0;
185+
entry = ring_buffer_event_data(event);
186+
entry->graph_ent = *trace;
187+
if (!filter_current_check_discard(call, entry, event))
188+
ring_buffer_unlock_commit(tr->buffer, event);
189+
190+
return 1;
191+
}
192+
193+
int trace_graph_entry(struct ftrace_graph_ent *trace)
194+
{
195+
struct trace_array *tr = graph_array;
196+
struct trace_array_cpu *data;
197+
unsigned long flags;
198+
long disabled;
199+
int ret;
200+
int cpu;
201+
int pc;
202+
203+
if (unlikely(!tr))
204+
return 0;
205+
206+
if (!ftrace_trace_task(current))
207+
return 0;
208+
209+
if (!ftrace_graph_addr(trace->func))
210+
return 0;
211+
212+
local_irq_save(flags);
213+
cpu = raw_smp_processor_id();
214+
data = tr->data[cpu];
215+
disabled = atomic_inc_return(&data->disabled);
216+
if (likely(disabled == 1)) {
217+
pc = preempt_count();
218+
ret = __trace_graph_entry(tr, trace, flags, pc);
219+
} else {
220+
ret = 0;
221+
}
222+
/* Only do the atomic if it is not already set */
223+
if (!test_tsk_trace_graph(current))
224+
set_tsk_trace_graph(current);
225+
226+
atomic_dec(&data->disabled);
227+
local_irq_restore(flags);
228+
229+
return ret;
230+
}
231+
232+
static void __trace_graph_return(struct trace_array *tr,
233+
struct ftrace_graph_ret *trace,
234+
unsigned long flags,
235+
int pc)
236+
{
237+
struct ftrace_event_call *call = &event_funcgraph_exit;
238+
struct ring_buffer_event *event;
239+
struct ftrace_graph_ret_entry *entry;
240+
241+
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
242+
return;
243+
244+
event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_RET,
245+
sizeof(*entry), flags, pc);
246+
if (!event)
247+
return;
248+
entry = ring_buffer_event_data(event);
249+
entry->ret = *trace;
250+
if (!filter_current_check_discard(call, entry, event))
251+
ring_buffer_unlock_commit(tr->buffer, event);
252+
}
253+
254+
void trace_graph_return(struct ftrace_graph_ret *trace)
255+
{
256+
struct trace_array *tr = graph_array;
257+
struct trace_array_cpu *data;
258+
unsigned long flags;
259+
long disabled;
260+
int cpu;
261+
int pc;
262+
263+
local_irq_save(flags);
264+
cpu = raw_smp_processor_id();
265+
data = tr->data[cpu];
266+
disabled = atomic_inc_return(&data->disabled);
267+
if (likely(disabled == 1)) {
268+
pc = preempt_count();
269+
__trace_graph_return(tr, trace, flags, pc);
270+
}
271+
if (!trace->depth)
272+
clear_tsk_trace_graph(current);
273+
atomic_dec(&data->disabled);
274+
local_irq_restore(flags);
275+
}
276+
169277
static int graph_trace_init(struct trace_array *tr)
170278
{
171-
int ret = register_ftrace_graph(&trace_graph_return,
172-
&trace_graph_entry);
279+
int ret;
280+
281+
graph_array = tr;
282+
ret = register_ftrace_graph(&trace_graph_return,
283+
&trace_graph_entry);
173284
if (ret)
174285
return ret;
175286
tracing_start_cmdline_record();
176287

177288
return 0;
178289
}
179290

291+
void set_graph_array(struct trace_array *tr)
292+
{
293+
graph_array = tr;
294+
}
295+
180296
static void graph_trace_reset(struct trace_array *tr)
181297
{
182298
tracing_stop_cmdline_record();

kernel/trace/trace_selftest.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
288288
* to detect and recover from possible hangs
289289
*/
290290
tracing_reset_online_cpus(tr);
291+
set_graph_array(tr);
291292
ret = register_ftrace_graph(&trace_graph_return,
292293
&trace_graph_entry_watchdog);
293294
if (ret) {

0 commit comments

Comments
 (0)