2020#include <bpf/bpf.h>
2121#include <bpf/libbpf.h>
2222#include <bpf/btf.h>
23- #ifdef HAVE_BPF_SKEL
24- #include "bpf_skel/augmented_raw_syscalls.skel.h"
25- #endif
2623#endif
2724#include "util/bpf_map.h"
2825#include "util/rlimit.h"
@@ -155,9 +152,6 @@ struct trace {
155152 * bpf_output ;
156153 } events ;
157154 } syscalls ;
158- #ifdef HAVE_BPF_SKEL
159- struct augmented_raw_syscalls_bpf * skel ;
160- #endif
161155#ifdef HAVE_LIBBPF_SUPPORT
162156 struct btf * btf ;
163157#endif
@@ -3703,7 +3697,10 @@ static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
37033697 goto out ;
37043698}
37053699
3706- #ifdef HAVE_BPF_SKEL
3700+ #ifdef HAVE_LIBBPF_SUPPORT
3701+
3702+ static struct bpf_program * unaugmented_prog ;
3703+
37073704static int syscall_arg_fmt__cache_btf_struct (struct syscall_arg_fmt * arg_fmt , struct btf * btf , char * type )
37083705{
37093706 int id ;
@@ -3721,46 +3718,28 @@ static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, st
37213718 return 0 ;
37223719}
37233720
3724- static struct bpf_program * trace__find_bpf_program_by_title (struct trace * trace , const char * name )
3725- {
3726- struct bpf_program * pos , * prog = NULL ;
3727- const char * sec_name ;
3728-
3729- if (trace -> skel -> obj == NULL )
3730- return NULL ;
3731-
3732- bpf_object__for_each_program (pos , trace -> skel -> obj ) {
3733- sec_name = bpf_program__section_name (pos );
3734- if (sec_name && !strcmp (sec_name , name )) {
3735- prog = pos ;
3736- break ;
3737- }
3738- }
3739-
3740- return prog ;
3741- }
3742-
3743- static struct bpf_program * trace__find_syscall_bpf_prog (struct trace * trace , struct syscall * sc ,
3721+ static struct bpf_program * trace__find_syscall_bpf_prog (struct trace * trace __maybe_unused ,
3722+ struct syscall * sc ,
37443723 const char * prog_name , const char * type )
37453724{
37463725 struct bpf_program * prog ;
37473726
37483727 if (prog_name == NULL ) {
37493728 char default_prog_name [256 ];
37503729 scnprintf (default_prog_name , sizeof (default_prog_name ), "tp/syscalls/sys_%s_%s" , type , sc -> name );
3751- prog = trace__find_bpf_program_by_title ( trace , default_prog_name );
3730+ prog = augmented_syscalls__find_by_title ( default_prog_name );
37523731 if (prog != NULL )
37533732 goto out_found ;
37543733 if (sc -> fmt && sc -> fmt -> alias ) {
37553734 scnprintf (default_prog_name , sizeof (default_prog_name ), "tp/syscalls/sys_%s_%s" , type , sc -> fmt -> alias );
3756- prog = trace__find_bpf_program_by_title ( trace , default_prog_name );
3735+ prog = augmented_syscalls__find_by_title ( default_prog_name );
37573736 if (prog != NULL )
37583737 goto out_found ;
37593738 }
37603739 goto out_unaugmented ;
37613740 }
37623741
3763- prog = trace__find_bpf_program_by_title ( trace , prog_name );
3742+ prog = augmented_syscalls__find_by_title ( prog_name );
37643743
37653744 if (prog != NULL ) {
37663745out_found :
@@ -3770,7 +3749,7 @@ static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, str
37703749 pr_debug ("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n" ,
37713750 prog_name , type , sc -> name );
37723751out_unaugmented :
3773- return trace -> skel -> progs . syscall_unaugmented ;
3752+ return unaugmented_prog ;
37743753}
37753754
37763755static void trace__init_syscall_bpf_progs (struct trace * trace , int e_machine , int id )
@@ -3787,13 +3766,13 @@ static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, in
37873766static int trace__bpf_prog_sys_enter_fd (struct trace * trace , int e_machine , int id )
37883767{
37893768 struct syscall * sc = trace__syscall_info (trace , NULL , e_machine , id );
3790- return sc ? bpf_program__fd (sc -> bpf_prog .sys_enter ) : bpf_program__fd (trace -> skel -> progs . syscall_unaugmented );
3769+ return sc ? bpf_program__fd (sc -> bpf_prog .sys_enter ) : bpf_program__fd (unaugmented_prog );
37913770}
37923771
37933772static int trace__bpf_prog_sys_exit_fd (struct trace * trace , int e_machine , int id )
37943773{
37953774 struct syscall * sc = trace__syscall_info (trace , NULL , e_machine , id );
3796- return sc ? bpf_program__fd (sc -> bpf_prog .sys_exit ) : bpf_program__fd (trace -> skel -> progs . syscall_unaugmented );
3775+ return sc ? bpf_program__fd (sc -> bpf_prog .sys_exit ) : bpf_program__fd (unaugmented_prog );
37973776}
37983777
37993778static int trace__bpf_sys_enter_beauty_map (struct trace * trace , int e_machine , int key , unsigned int * beauty_array )
@@ -3903,7 +3882,7 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
39033882 bool is_candidate = false;
39043883
39053884 if (pair == NULL || pair -> id == sc -> id ||
3906- pair -> bpf_prog .sys_enter == trace -> skel -> progs . syscall_unaugmented )
3885+ pair -> bpf_prog .sys_enter == unaugmented_prog )
39073886 continue ;
39083887
39093888 for (field = sc -> args , candidate_field = pair -> args ;
@@ -3969,7 +3948,7 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
39693948 */
39703949 if (pair_prog == NULL ) {
39713950 pair_prog = trace__find_syscall_bpf_prog (trace , pair , pair -> fmt ? pair -> fmt -> bpf_prog_name .sys_enter : NULL , "enter" );
3972- if (pair_prog == trace -> skel -> progs . syscall_unaugmented )
3951+ if (pair_prog == unaugmented_prog )
39733952 goto next_candidate ;
39743953 }
39753954
@@ -3985,12 +3964,17 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
39853964
39863965static int trace__init_syscalls_bpf_prog_array_maps (struct trace * trace , int e_machine )
39873966{
3988- int map_enter_fd = bpf_map__fd ( trace -> skel -> maps . syscalls_sys_enter ) ;
3989- int map_exit_fd = bpf_map__fd ( trace -> skel -> maps . syscalls_sys_exit ) ;
3990- int beauty_map_fd = bpf_map__fd ( trace -> skel -> maps . beauty_map_enter ) ;
3967+ int map_enter_fd ;
3968+ int map_exit_fd ;
3969+ int beauty_map_fd ;
39913970 int err = 0 ;
39923971 unsigned int beauty_array [6 ];
39933972
3973+ if (augmented_syscalls__get_map_fds (& map_enter_fd , & map_exit_fd , & beauty_map_fd ) < 0 )
3974+ return -1 ;
3975+
3976+ unaugmented_prog = augmented_syscalls__unaugmented ();
3977+
39943978 for (int i = 0 , num_idx = syscalltbl__num_idx (e_machine ); i < num_idx ; ++ i ) {
39953979 int prog_fd , key = syscalltbl__id_at_idx (e_machine , i );
39963980
@@ -4060,7 +4044,7 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_m
40604044 * For now we're just reusing the sys_enter prog, and if it
40614045 * already has an augmenter, we don't need to find one.
40624046 */
4063- if (sc -> bpf_prog .sys_enter != trace -> skel -> progs . syscall_unaugmented )
4047+ if (sc -> bpf_prog .sys_enter != unaugmented_prog )
40644048 continue ;
40654049
40664050 /*
@@ -4085,7 +4069,13 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_m
40854069
40864070 return err ;
40874071}
4088- #endif // HAVE_BPF_SKEL
4072+ #else // !HAVE_LIBBPF_SUPPORT
4073+ static int trace__init_syscalls_bpf_prog_array_maps (struct trace * trace __maybe_unused ,
4074+ int e_machine __maybe_unused )
4075+ {
4076+ return -1 ;
4077+ }
4078+ #endif // HAVE_LIBBPF_SUPPORT
40894079
40904080static int trace__set_ev_qualifier_filter (struct trace * trace )
40914081{
@@ -4094,24 +4084,6 @@ static int trace__set_ev_qualifier_filter(struct trace *trace)
40944084 return 0 ;
40954085}
40964086
4097- static int bpf_map__set_filter_pids (struct bpf_map * map __maybe_unused ,
4098- size_t npids __maybe_unused , pid_t * pids __maybe_unused )
4099- {
4100- int err = 0 ;
4101- #ifdef HAVE_LIBBPF_SUPPORT
4102- bool value = true;
4103- int map_fd = bpf_map__fd (map );
4104- size_t i ;
4105-
4106- for (i = 0 ; i < npids ; ++ i ) {
4107- err = bpf_map_update_elem (map_fd , & pids [i ], & value , BPF_ANY );
4108- if (err )
4109- break ;
4110- }
4111- #endif
4112- return err ;
4113- }
4114-
41154087static int trace__set_filter_loop_pids (struct trace * trace )
41164088{
41174089 unsigned int nr = 1 , err ;
@@ -4140,8 +4112,8 @@ static int trace__set_filter_loop_pids(struct trace *trace)
41404112 thread__put (thread );
41414113
41424114 err = evlist__append_tp_filter_pids (trace -> evlist , nr , pids );
4143- if (!err && trace -> filter_pids . map )
4144- err = bpf_map__set_filter_pids ( trace -> filter_pids . map , nr , pids );
4115+ if (!err )
4116+ err = augmented_syscalls__set_filter_pids ( nr , pids );
41454117
41464118 return err ;
41474119}
@@ -4158,8 +4130,8 @@ static int trace__set_filter_pids(struct trace *trace)
41584130 if (trace -> filter_pids .nr > 0 ) {
41594131 err = evlist__append_tp_filter_pids (trace -> evlist , trace -> filter_pids .nr ,
41604132 trace -> filter_pids .entries );
4161- if (!err && trace -> filter_pids . map ) {
4162- err = bpf_map__set_filter_pids ( trace -> filter_pids . map , trace -> filter_pids .nr ,
4133+ if (!err ) {
4134+ err = augmented_syscalls__set_filter_pids ( trace -> filter_pids .nr ,
41634135 trace -> filter_pids .entries );
41644136 }
41654137 } else if (perf_thread_map__pid (trace -> evlist -> core .threads , 0 ) == -1 ) {
@@ -4482,41 +4454,18 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
44824454 err = evlist__open (evlist );
44834455 if (err < 0 )
44844456 goto out_error_open ;
4485- #ifdef HAVE_BPF_SKEL
4486- if (trace -> syscalls .events .bpf_output ) {
4487- struct perf_cpu cpu ;
44884457
4489- /*
4490- * Set up the __augmented_syscalls__ BPF map to hold for each
4491- * CPU the bpf-output event's file descriptor.
4492- */
4493- perf_cpu_map__for_each_cpu (cpu , i , trace -> syscalls .events .bpf_output -> core .cpus ) {
4494- int mycpu = cpu .cpu ;
4495-
4496- bpf_map__update_elem (trace -> skel -> maps .__augmented_syscalls__ ,
4497- & mycpu , sizeof (mycpu ),
4498- xyarray__entry (trace -> syscalls .events .bpf_output -> core .fd ,
4499- mycpu , 0 ),
4500- sizeof (__u32 ), BPF_ANY );
4501- }
4502- }
4458+ augmented_syscalls__setup_bpf_output ();
45034459
4504- if (trace -> skel )
4505- trace -> filter_pids .map = trace -> skel -> maps .pids_filtered ;
4506- #endif
45074460 err = trace__set_filter_pids (trace );
45084461 if (err < 0 )
45094462 goto out_error_mem ;
45104463
4511- #ifdef HAVE_BPF_SKEL
4512- if (trace -> skel && trace -> skel -> progs .sys_enter ) {
4513- /*
4514- * TODO: Initialize for all host binary machine types, not just
4515- * those matching the perf binary.
4516- */
4517- trace__init_syscalls_bpf_prog_array_maps (trace , EM_HOST );
4518- }
4519- #endif
4464+ /*
4465+ * TODO: Initialize for all host binary machine types, not just
4466+ * those matching the perf binary.
4467+ */
4468+ trace__init_syscalls_bpf_prog_array_maps (trace , EM_HOST );
45204469
45214470 if (trace -> ev_qualifier_ids .nr > 0 ) {
45224471 err = trace__set_ev_qualifier_filter (trace );
@@ -5379,18 +5328,6 @@ static void trace__exit(struct trace *trace)
53795328#endif
53805329}
53815330
5382- #ifdef HAVE_BPF_SKEL
5383- static int bpf__setup_bpf_output (struct evlist * evlist )
5384- {
5385- int err = parse_event (evlist , "bpf-output/no-inherit=1,name=__augmented_syscalls__/" );
5386-
5387- if (err )
5388- pr_debug ("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n" );
5389-
5390- return err ;
5391- }
5392- #endif
5393-
53945331int cmd_trace (int argc , const char * * argv )
53955332{
53965333 const char * trace_usage [] = {
@@ -5587,7 +5524,6 @@ int cmd_trace(int argc, const char **argv)
55875524 "cgroup monitoring only available in system-wide mode" );
55885525 }
55895526
5590- #ifdef HAVE_BPF_SKEL
55915527 if (!trace .trace_syscalls )
55925528 goto skip_augmentation ;
55935529
@@ -5606,42 +5542,17 @@ int cmd_trace(int argc, const char **argv)
56065542 goto skip_augmentation ;
56075543 }
56085544
5609- trace .skel = augmented_raw_syscalls_bpf__open ();
5610- if (!trace .skel ) {
5611- pr_debug ("Failed to open augmented syscalls BPF skeleton" );
5612- } else {
5613- /*
5614- * Disable attaching the BPF programs except for sys_enter and
5615- * sys_exit that tail call into this as necessary.
5616- */
5617- struct bpf_program * prog ;
5545+ err = augmented_syscalls__prepare ();
5546+ if (err < 0 )
5547+ goto skip_augmentation ;
56185548
5619- bpf_object__for_each_program (prog , trace .skel -> obj ) {
5620- if (prog != trace .skel -> progs .sys_enter && prog != trace .skel -> progs .sys_exit )
5621- bpf_program__set_autoattach (prog , /*autoattach=*/ false);
5622- }
5549+ trace__add_syscall_newtp (& trace );
56235550
5624- err = augmented_raw_syscalls_bpf__load (trace .skel );
5551+ err = augmented_syscalls__create_bpf_output (trace .evlist );
5552+ if (err == 0 )
5553+ trace .syscalls .events .bpf_output = evlist__last (trace .evlist );
56255554
5626- if (err < 0 ) {
5627- libbpf_strerror (err , bf , sizeof (bf ));
5628- pr_debug ("Failed to load augmented syscalls BPF skeleton: %s\n" , bf );
5629- } else {
5630- augmented_raw_syscalls_bpf__attach (trace .skel );
5631- trace__add_syscall_newtp (& trace );
5632- }
5633- }
5634-
5635- err = bpf__setup_bpf_output (trace .evlist );
5636- if (err ) {
5637- libbpf_strerror (err , bf , sizeof (bf ));
5638- pr_err ("ERROR: Setup BPF output event failed: %s\n" , bf );
5639- goto out ;
5640- }
5641- trace .syscalls .events .bpf_output = evlist__last (trace .evlist );
5642- assert (evsel__name_is (trace .syscalls .events .bpf_output , "__augmented_syscalls__" ));
56435555skip_augmentation :
5644- #endif
56455556 err = -1 ;
56465557
56475558 if (trace .trace_pgfaults ) {
@@ -5833,8 +5744,6 @@ int cmd_trace(int argc, const char **argv)
58335744 fclose (trace .output );
58345745out :
58355746 trace__exit (& trace );
5836- #ifdef HAVE_BPF_SKEL
5837- augmented_raw_syscalls_bpf__destroy (trace .skel );
5838- #endif
5747+ augmented_syscalls__cleanup ();
58395748 return err ;
58405749}
0 commit comments