Skip to content

Commit 5e6da6b

Browse files
captain5050acmel
authored andcommitted
perf trace: Migrate BPF augmentation to use a skeleton
Previously a BPF event of augmented_raw_syscalls.c could be used to enable augmentation of syscalls by perf trace. As BPF events are no longer supported, switch to using a BPF skeleton which when attached explicitly opens the sysenter and sysexit tracepoints. The dump map is removed as debugging wasn't supported by the augmentation and bpf_printk can be used when necessary. Remove tools/perf/examples/bpf/augmented_raw_syscalls.c so that the rename/migration to a BPF skeleton captures that this was the source. Committer notes: Some minor stylistic changes to help visualizing the diff. Use libbpf_strerror when failing to load the augmented raw syscalls BPF. Use bpf_object__for_each_program(prog, trace.skel->obj) to disable auto attachment for all but the sys_enter, sys_exit tracepoints, to avoid having to add extra lines as we go adding support for more pointer receiving syscalls. Committer testing: # perf trace -e open* --max-events=10 0.000 ( 0.022 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/proc/meminfo", flags: RDONLY|CLOEXEC) = 11 208.833 ( ): gnome-terminal/3223 openat(dfd: CWD, filename: "/proc/51250/cmdline") ... 249.993 ( 0.024 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/proc/meminfo", flags: RDONLY|CLOEXEC) = 11 250.118 ( 0.030 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/[email protected]/memory.pressure", flags: RDONLY|CLOEXEC) = 11 250.205 ( 0.016 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/[email protected]/memory.current", flags: RDONLY|CLOEXEC) = 11 250.244 ( 0.014 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/[email protected]/memory.min", flags: RDONLY|CLOEXEC) = 11 250.282 ( 0.014 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/[email protected]/memory.low", flags: RDONLY|CLOEXEC) = 11 250.320 ( 0.014 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/[email protected]/memory.swap.current", flags: RDONLY|CLOEXEC) = 11 250.355 ( 0.014 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1000.slice/[email protected]/memory.stat", flags: RDONLY|CLOEXEC) = 11 250.717 ( 0.016 ms): systemd-oomd/1151 openat(dfd: CWD, filename: "/sys/fs/cgroup/user.slice/user-1001.slice/[email protected]/memory.pressure", flags: RDONLY|CLOEXEC) = 11 # # perf trace -e *nanosleep* --max-events=10 ? ( ): SCTP timer/28304 ... [continued]: clock_nanosleep()) = 0 0.007 (10.058 ms): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) = 0 10.069 ( ): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) ... 10.069 (10.056 ms): SCTP timer/28304 ... [continued]: clock_nanosleep()) = 0 17.059 ( ): podman/3572 nanosleep(rqtp: 0x7fc4f4d75be0) ... 17.059 (10.061 ms): podman/3572 ... [continued]: nanosleep()) = 0 20.131 (10.059 ms): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) = 0 30.195 (10.038 ms): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) = 0 40.238 (10.057 ms): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) = 0 50.301 ( ): SCTP timer/28304 clock_nanosleep(rqtp: { .tv_sec: 0, .tv_nsec: 10000000 }, rmtp: 0x7f0466b78de0) ... # # perf trace -e perf_event* -- perf stat -e instructions,cycles,cache-misses sleep 0.1 0.000 ( 0.011 ms): perf/51331 perf_event_open(attr_uptr: { type: 0 (PERF_TYPE_HARDWARE), size: 136, config: 0x1 (PERF_COUNT_HW_INSTRUCTIONS), sample_type: IDENTIFIER, read_format: TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING, disabled: 1, inherit: 1, enable_on_exec: 1, exclude_guest: 1 }, pid: 51332 (perf), cpu: -1, group_fd: -1, flags: FD_CLOEXEC) = 3 0.013 ( 0.003 ms): perf/51331 perf_event_open(attr_uptr: { type: 0 (PERF_TYPE_HARDWARE), size: 136, config: 0 (PERF_COUNT_HW_CPU_CYCLES), sample_type: IDENTIFIER, read_format: TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING, disabled: 1, inherit: 1, enable_on_exec: 1, exclude_guest: 1 }, pid: 51332 (perf), cpu: -1, group_fd: -1, flags: FD_CLOEXEC) = 4 0.017 ( 0.002 ms): perf/51331 perf_event_open(attr_uptr: { type: 0 (PERF_TYPE_HARDWARE), size: 136, config: 0x3 (PERF_COUNT_HW_CACHE_MISSES), sample_type: IDENTIFIER, read_format: TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING, disabled: 1, inherit: 1, enable_on_exec: 1, exclude_guest: 1 }, pid: 51332 (perf), cpu: -1, group_fd: -1, flags: FD_CLOEXEC) = 5 Performance counter stats for 'sleep 0.1': 1,495,051 instructions # 1.11 insn per cycle 1,347,641 cycles 35,424 cache-misses 0.100935279 seconds time elapsed 0.000924000 seconds user 0.000000000 seconds sys # # perf trace -e connect* ssh localhost 0.000 ( 0.012 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 0.118 ( 0.004 ms): ssh/51346 connect(fd: 6, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 0.399 ( 0.007 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 0.426 ( 0.003 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 0.754 ( 0.009 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: INET, port: 22, addr: 127.0.0.1 }, addrlen: 16) = 0 0.771 ( 0.010 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: INET6, port: 22, addr: ::1 }, addrlen: 28) = 0 0.798 ( 0.053 ms): ssh/51346 connect(fd: 4, uservaddr: { .family: INET6, port: 22, addr: ::1 }, addrlen: 28) = 0 0.870 ( 0.004 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 0.904 ( 0.003 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 0.930 ( 0.003 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 0.957 ( 0.003 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 0.981 ( 0.003 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 1.006 ( 0.004 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 1.036 ( 0.005 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/lib/sss/pipes/nss }, addrlen: 110) = -1 ECONNREFUSED (Connection refused) 65.077 ( 0.022 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/run/.heim_org.h5l.kcm-socket }, addrlen: 110) = 0 66.608 ( 0.014 ms): ssh/51346 connect(fd: 5, uservaddr: { .family: LOCAL, path: /var/run/.heim_org.h5l.kcm-socket }, addrlen: 110) = 0 root@localhost's password: # # perf trace -e sendto* ping -c 2 localhost PING localhost(localhost (::1)) 56 data bytes 64 bytes from localhost (::1): icmp_seq=1 ttl=64 time=0.024 ms 0.000 ( 0.011 ms): ping/51357 sendto(fd: 5, buff: 0x7ffcca35e620, len: 20, addr: { .family: NETLINK }, addr_len: 0xc) = 20 0.135 ( 0.026 ms): ping/51357 sendto(fd: 4, buff: 0x5601398f7b20, len: 64, addr: { .family: INET6, port: 58, addr: ::1 }, addr_len: 0x1c) = 64 1014.929 ( 0.050 ms): ping/51357 sendto(fd: 4, buff: 0x5601398f7b20, len: 64, flags: CONFIRM, addr: { .family: INET6, port: 58, addr: ::1 }, addr_len: 0x1c) = 64 64 bytes from localhost (::1): icmp_seq=2 ttl=64 time=0.046 ms --- localhost ping statistics --- 2 packets transmitted, 2 received, 0% packet loss, time 1015ms rtt min/avg/max/mdev = 0.024/0.035/0.046/0.011 ms # Signed-off-by: Ian Rogers <[email protected]> Acked-by: Jiri Olsa <[email protected]> Tested-by: Arnaldo Carvalho de Melo <[email protected]> Cc: Adrian Hunter <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Andrii Nakryiko <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Athira Rajeev <[email protected]> Cc: Brendan Gregg <[email protected]> Cc: Carsten Haitzler <[email protected]> Cc: Eduard Zingerman <[email protected]> Cc: Fangrui Song <[email protected]> Cc: He Kuang <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: James Clark <[email protected]> Cc: Kan Liang <[email protected]> Cc: Leo Yan <[email protected]> Cc: Madhavan Srinivasan <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Namhyung Kim <[email protected]> Cc: Nathan Chancellor <[email protected]> Cc: Naveen N. Rao <[email protected]> Cc: Nick Desaulniers <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Ravi Bangoria <[email protected]> Cc: Rob Herring <[email protected]> Cc: Tiezhu Yang <[email protected]> Cc: Tom Rix <[email protected]> Cc: Wang Nan <[email protected]> Cc: Wang ShaoBo <[email protected]> Cc: Yang Jihong <[email protected]> Cc: Yonghong Song <[email protected]> Cc: YueHaibing <[email protected]> Cc: [email protected] Cc: [email protected] Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
1 parent 3d6dfae commit 5e6da6b

File tree

3 files changed

+110
-77
lines changed

3 files changed

+110
-77
lines changed

tools/perf/Makefile.perf

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1038,6 +1038,7 @@ SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h
10381038
SKELETONS += $(SKEL_OUT)/off_cpu.skel.h $(SKEL_OUT)/lock_contention.skel.h
10391039
SKELETONS += $(SKEL_OUT)/kwork_trace.skel.h $(SKEL_OUT)/sample_filter.skel.h
10401040
SKELETONS += $(SKEL_OUT)/bench_uprobe.skel.h
1041+
SKELETONS += $(SKEL_OUT)/augmented_raw_syscalls.skel.h
10411042

10421043
$(SKEL_TMP_OUT) $(LIBAPI_OUTPUT) $(LIBBPF_OUTPUT) $(LIBPERF_OUTPUT) $(LIBSUBCMD_OUTPUT) $(LIBSYMBOL_OUTPUT):
10431044
$(Q)$(MKDIR) -p $@

tools/perf/builtin-trace.c

Lines changed: 95 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@
1919
#ifdef HAVE_LIBBPF_SUPPORT
2020
#include <bpf/bpf.h>
2121
#include <bpf/libbpf.h>
22+
#ifdef HAVE_BPF_SKEL
23+
#include "bpf_skel/augmented_raw_syscalls.skel.h"
24+
#endif
2225
#endif
2326
#include "util/bpf_map.h"
2427
#include "util/rlimit.h"
@@ -127,25 +130,19 @@ struct trace {
127130
struct syscalltbl *sctbl;
128131
struct {
129132
struct syscall *table;
130-
struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
131-
struct bpf_map *sys_enter,
132-
*sys_exit;
133-
} prog_array;
134133
struct {
135134
struct evsel *sys_enter,
136-
*sys_exit,
137-
*augmented;
135+
*sys_exit,
136+
*bpf_output;
138137
} events;
139-
struct bpf_program *unaugmented_prog;
140138
} syscalls;
141-
struct {
142-
struct bpf_map *map;
143-
} dump;
139+
#ifdef HAVE_BPF_SKEL
140+
struct augmented_raw_syscalls_bpf *skel;
141+
#endif
144142
struct record_opts opts;
145143
struct evlist *evlist;
146144
struct machine *host;
147145
struct thread *current;
148-
struct bpf_object *bpf_obj;
149146
struct cgroup *cgroup;
150147
u64 base_time;
151148
FILE *output;
@@ -415,6 +412,7 @@ static int evsel__init_syscall_tp(struct evsel *evsel)
415412
if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
416413
evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
417414
return -ENOENT;
415+
418416
return 0;
419417
}
420418

@@ -2845,7 +2843,7 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
28452843
if (thread)
28462844
trace__fprintf_comm_tid(trace, thread, trace->output);
28472845

2848-
if (evsel == trace->syscalls.events.augmented) {
2846+
if (evsel == trace->syscalls.events.bpf_output) {
28492847
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
28502848
struct syscall *sc = trace__syscall_info(trace, evsel, id);
28512849

@@ -3278,24 +3276,16 @@ static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
32783276
goto out;
32793277
}
32803278

3281-
#ifdef HAVE_LIBBPF_SUPPORT
3282-
static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
3283-
{
3284-
if (trace->bpf_obj == NULL)
3285-
return NULL;
3286-
3287-
return bpf_object__find_map_by_name(trace->bpf_obj, name);
3288-
}
3289-
3279+
#ifdef HAVE_BPF_SKEL
32903280
static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
32913281
{
32923282
struct bpf_program *pos, *prog = NULL;
32933283
const char *sec_name;
32943284

3295-
if (trace->bpf_obj == NULL)
3285+
if (trace->skel->obj == NULL)
32963286
return NULL;
32973287

3298-
bpf_object__for_each_program(pos, trace->bpf_obj) {
3288+
bpf_object__for_each_program(pos, trace->skel->obj) {
32993289
sec_name = bpf_program__section_name(pos);
33003290
if (sec_name && !strcmp(sec_name, name)) {
33013291
prog = pos;
@@ -3313,12 +3303,12 @@ static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, str
33133303

33143304
if (prog_name == NULL) {
33153305
char default_prog_name[256];
3316-
scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
3306+
scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name);
33173307
prog = trace__find_bpf_program_by_title(trace, default_prog_name);
33183308
if (prog != NULL)
33193309
goto out_found;
33203310
if (sc->fmt && sc->fmt->alias) {
3321-
scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
3311+
scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias);
33223312
prog = trace__find_bpf_program_by_title(trace, default_prog_name);
33233313
if (prog != NULL)
33243314
goto out_found;
@@ -3336,7 +3326,7 @@ static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, str
33363326
pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
33373327
prog_name, type, sc->name);
33383328
out_unaugmented:
3339-
return trace->syscalls.unaugmented_prog;
3329+
return trace->skel->progs.syscall_unaugmented;
33403330
}
33413331

33423332
static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
@@ -3353,13 +3343,13 @@ static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
33533343
static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
33543344
{
33553345
struct syscall *sc = trace__syscall_info(trace, NULL, id);
3356-
return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3346+
return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
33573347
}
33583348

33593349
static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
33603350
{
33613351
struct syscall *sc = trace__syscall_info(trace, NULL, id);
3362-
return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3352+
return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
33633353
}
33643354

33653355
static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
@@ -3384,7 +3374,7 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
33843374
bool is_candidate = false;
33853375

33863376
if (pair == NULL || pair == sc ||
3387-
pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
3377+
pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
33883378
continue;
33893379

33903380
for (field = sc->args, candidate_field = pair->args;
@@ -3437,7 +3427,7 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
34373427
*/
34383428
if (pair_prog == NULL) {
34393429
pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3440-
if (pair_prog == trace->syscalls.unaugmented_prog)
3430+
if (pair_prog == trace->skel->progs.syscall_unaugmented)
34413431
goto next_candidate;
34423432
}
34433433

@@ -3452,8 +3442,8 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
34523442

34533443
static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
34543444
{
3455-
int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3456-
map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3445+
int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
3446+
int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
34573447
int err = 0, key;
34583448

34593449
for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
@@ -3515,7 +3505,7 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
35153505
* For now we're just reusing the sys_enter prog, and if it
35163506
* already has an augmenter, we don't need to find one.
35173507
*/
3518-
if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3508+
if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
35193509
continue;
35203510

35213511
/*
@@ -3538,22 +3528,9 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
35383528
break;
35393529
}
35403530

3541-
35423531
return err;
35433532
}
3544-
3545-
#else // HAVE_LIBBPF_SUPPORT
3546-
static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
3547-
const char *name __maybe_unused)
3548-
{
3549-
return NULL;
3550-
}
3551-
3552-
static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3553-
{
3554-
return 0;
3555-
}
3556-
#endif // HAVE_LIBBPF_SUPPORT
3533+
#endif // HAVE_BPF_SKEL
35573534

35583535
static int trace__set_ev_qualifier_filter(struct trace *trace)
35593536
{
@@ -3917,13 +3894,31 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
39173894
err = evlist__open(evlist);
39183895
if (err < 0)
39193896
goto out_error_open;
3897+
#ifdef HAVE_BPF_SKEL
3898+
{
3899+
struct perf_cpu cpu;
39203900

3901+
/*
3902+
* Set up the __augmented_syscalls__ BPF map to hold for each
3903+
* CPU the bpf-output event's file descriptor.
3904+
*/
3905+
perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
3906+
bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
3907+
&cpu.cpu, sizeof(int),
3908+
xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
3909+
cpu.cpu, 0),
3910+
sizeof(__u32), BPF_ANY);
3911+
}
3912+
}
3913+
#endif
39213914
err = trace__set_filter_pids(trace);
39223915
if (err < 0)
39233916
goto out_error_mem;
39243917

3925-
if (trace->syscalls.prog_array.sys_enter)
3918+
#ifdef HAVE_BPF_SKEL
3919+
if (trace->skel->progs.sys_enter)
39263920
trace__init_syscalls_bpf_prog_array_maps(trace);
3921+
#endif
39273922

39283923
if (trace->ev_qualifier_ids.nr > 0) {
39293924
err = trace__set_ev_qualifier_filter(trace);
@@ -3956,9 +3951,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
39563951
if (err < 0)
39573952
goto out_error_apply_filters;
39583953

3959-
if (trace->dump.map)
3960-
bpf_map__fprintf(trace->dump.map, trace->output);
3961-
39623954
err = evlist__mmap(evlist, trace->opts.mmap_pages);
39633955
if (err < 0)
39643956
goto out_error_mmap;
@@ -4655,6 +4647,18 @@ static void trace__exit(struct trace *trace)
46554647
zfree(&trace->perfconfig_events);
46564648
}
46574649

4650+
#ifdef HAVE_BPF_SKEL
4651+
static int bpf__setup_bpf_output(struct evlist *evlist)
4652+
{
4653+
int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
4654+
4655+
if (err)
4656+
pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n");
4657+
4658+
return err;
4659+
}
4660+
#endif
4661+
46584662
int cmd_trace(int argc, const char **argv)
46594663
{
46604664
const char *trace_usage[] = {
@@ -4686,7 +4690,6 @@ int cmd_trace(int argc, const char **argv)
46864690
.max_stack = UINT_MAX,
46874691
.max_events = ULONG_MAX,
46884692
};
4689-
const char *map_dump_str = NULL;
46904693
const char *output_name = NULL;
46914694
const struct option trace_options[] = {
46924695
OPT_CALLBACK('e', "event", &trace, "event",
@@ -4720,9 +4723,6 @@ int cmd_trace(int argc, const char **argv)
47204723
OPT_CALLBACK(0, "duration", &trace, "float",
47214724
"show only events with duration > N.M ms",
47224725
trace__set_duration),
4723-
#ifdef HAVE_LIBBPF_SUPPORT
4724-
OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
4725-
#endif
47264726
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
47274727
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
47284728
OPT_BOOLEAN('T', "time", &trace.full_time,
@@ -4849,16 +4849,44 @@ int cmd_trace(int argc, const char **argv)
48494849
"cgroup monitoring only available in system-wide mode");
48504850
}
48514851

4852-
err = -1;
4852+
#ifdef HAVE_BPF_SKEL
4853+
trace.skel = augmented_raw_syscalls_bpf__open();
4854+
if (!trace.skel) {
4855+
pr_debug("Failed to open augmented syscalls BPF skeleton");
4856+
} else {
4857+
/*
4858+
* Disable attaching the BPF programs except for sys_enter and
4859+
* sys_exit that tail call into this as necessary.
4860+
*/
4861+
struct bpf_program *prog;
48534862

4854-
if (map_dump_str) {
4855-
trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
4856-
if (trace.dump.map == NULL) {
4857-
pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
4858-
goto out;
4863+
bpf_object__for_each_program(prog, trace.skel->obj) {
4864+
if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
4865+
bpf_program__set_autoattach(prog, /*autoattach=*/false);
4866+
}
4867+
4868+
err = augmented_raw_syscalls_bpf__load(trace.skel);
4869+
4870+
if (err < 0) {
4871+
libbpf_strerror(err, bf, sizeof(bf));
4872+
pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf);
4873+
} else {
4874+
augmented_raw_syscalls_bpf__attach(trace.skel);
4875+
trace__add_syscall_newtp(&trace);
48594876
}
48604877
}
48614878

4879+
err = bpf__setup_bpf_output(trace.evlist);
4880+
if (err) {
4881+
libbpf_strerror(err, bf, sizeof(bf));
4882+
pr_err("ERROR: Setup BPF output event failed: %s\n", bf);
4883+
goto out;
4884+
}
4885+
trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
4886+
assert(!strcmp(evsel__name(trace.syscalls.events.bpf_output), "__augmented_syscalls__"));
4887+
#endif
4888+
err = -1;
4889+
48624890
if (trace.trace_pgfaults) {
48634891
trace.opts.sample_address = true;
48644892
trace.opts.sample_time = true;
@@ -4909,7 +4937,7 @@ int cmd_trace(int argc, const char **argv)
49094937
* buffers that are being copied from kernel to userspace, think 'read'
49104938
* syscall.
49114939
*/
4912-
if (trace.syscalls.events.augmented) {
4940+
if (trace.syscalls.events.bpf_output) {
49134941
evlist__for_each_entry(trace.evlist, evsel) {
49144942
bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
49154943

@@ -4918,9 +4946,9 @@ int cmd_trace(int argc, const char **argv)
49184946
goto init_augmented_syscall_tp;
49194947
}
49204948

4921-
if (trace.syscalls.events.augmented->priv == NULL &&
4949+
if (trace.syscalls.events.bpf_output->priv == NULL &&
49224950
strstr(evsel__name(evsel), "syscalls:sys_enter")) {
4923-
struct evsel *augmented = trace.syscalls.events.augmented;
4951+
struct evsel *augmented = trace.syscalls.events.bpf_output;
49244952
if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
49254953
evsel__init_augmented_syscall_tp_args(augmented))
49264954
goto out;
@@ -5025,5 +5053,8 @@ int cmd_trace(int argc, const char **argv)
50255053
fclose(trace.output);
50265054
out:
50275055
trace__exit(&trace);
5056+
#ifdef HAVE_BPF_SKEL
5057+
augmented_raw_syscalls_bpf__destroy(trace.skel);
5058+
#endif
50285059
return err;
50295060
}

0 commit comments

Comments
 (0)