Skip to content

Commit 944138f

Browse files
namhyungacmel
authored andcommitted
perf stat: Enable BPF counter with --for-each-cgroup
Recently bperf was added to use BPF to count perf events for various purposes. This is an extension for the approach and targetting to cgroup usages. Unlike the other bperf, it doesn't share the events with other processes but it'd reduce unnecessary events (and the overhead of multiplexing) for each monitored cgroup within the perf session. When --for-each-cgroup is used with --bpf-counters, it will open cgroup-switches event per cpu internally and attach the new BPF program to read given perf_events and to aggregate the results for cgroups. It's only called when task is switched to a task in a different cgroup. Signed-off-by: Namhyung Kim <[email protected]> Acked-by: Song Liu <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Ian Rogers <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Stephane Eranian <[email protected]> Link: http://lore.kernel.org/lkml/[email protected] Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
1 parent 892ba7f commit 944138f

File tree

7 files changed

+523
-1
lines changed

7 files changed

+523
-1
lines changed

tools/perf/Makefile.perf

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1017,6 +1017,7 @@ SKEL_OUT := $(abspath $(OUTPUT)util/bpf_skel)
10171017
SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp)
10181018
SKELETONS := $(SKEL_OUT)/bpf_prog_profiler.skel.h
10191019
SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h
1020+
SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h
10201021

10211022
ifdef BUILD_BPF_SKEL
10221023
BPFTOOL := $(SKEL_TMP_OUT)/bootstrap/bpftool
@@ -1030,7 +1031,21 @@ $(BPFTOOL): | $(SKEL_TMP_OUT)
10301031
CFLAGS= $(MAKE) -C ../bpf/bpftool \
10311032
OUTPUT=$(SKEL_TMP_OUT)/ bootstrap
10321033

1033-
$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) | $(SKEL_TMP_OUT)
1034+
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
1035+
$(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
1036+
../../vmlinux \
1037+
/sys/kernel/btf/vmlinux \
1038+
/boot/vmlinux-$(shell uname -r)
1039+
VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
1040+
1041+
$(SKEL_OUT)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL)
1042+
ifeq ($(VMLINUX_H),)
1043+
$(QUIET_GEN)$(BPFTOOL) btf dump file $< format c > $@
1044+
else
1045+
$(Q)cp "$(VMLINUX_H)" $@
1046+
endif
1047+
1048+
$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) $(SKEL_OUT)/vmlinux.h | $(SKEL_TMP_OUT)
10341049
$(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) \
10351050
-c $(filter util/bpf_skel/%.bpf.c,$^) -o $@ && $(LLVM_STRIP) -g $@
10361051

tools/perf/util/Build

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ perf-y += clockid.o
141141
perf-$(CONFIG_LIBBPF) += bpf-loader.o
142142
perf-$(CONFIG_LIBBPF) += bpf_map.o
143143
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
144+
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o
144145
perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
145146
perf-$(CONFIG_LIBELF) += symbol-elf.o
146147
perf-$(CONFIG_LIBELF) += probe-file.o

tools/perf/util/bpf_counter.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include "evsel.h"
1919
#include "evlist.h"
2020
#include "target.h"
21+
#include "cgroup.h"
2122
#include "cpumap.h"
2223
#include "thread_map.h"
2324

@@ -742,6 +743,8 @@ struct bpf_counter_ops bperf_ops = {
742743
.destroy = bperf__destroy,
743744
};
744745

746+
extern struct bpf_counter_ops bperf_cgrp_ops;
747+
745748
static inline bool bpf_counter_skip(struct evsel *evsel)
746749
{
747750
return list_empty(&evsel->bpf_counter_list) &&
@@ -759,6 +762,8 @@ int bpf_counter__load(struct evsel *evsel, struct target *target)
759762
{
760763
if (target->bpf_str)
761764
evsel->bpf_counter_ops = &bpf_program_profiler_ops;
765+
else if (cgrp_event_expanded && target->use_bpf)
766+
evsel->bpf_counter_ops = &bperf_cgrp_ops;
762767
else if (target->use_bpf || evsel->bpf_counter ||
763768
evsel__match_bpf_counter_events(evsel->name))
764769
evsel->bpf_counter_ops = &bperf_ops;
Lines changed: 307 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,307 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
/* Copyright (c) 2021 Facebook */
4+
/* Copyright (c) 2021 Google */
5+
6+
#include <assert.h>
7+
#include <limits.h>
8+
#include <unistd.h>
9+
#include <sys/file.h>
10+
#include <sys/time.h>
11+
#include <sys/resource.h>
12+
#include <linux/err.h>
13+
#include <linux/zalloc.h>
14+
#include <linux/perf_event.h>
15+
#include <api/fs/fs.h>
16+
#include <perf/bpf_perf.h>
17+
18+
#include "affinity.h"
19+
#include "bpf_counter.h"
20+
#include "cgroup.h"
21+
#include "counts.h"
22+
#include "debug.h"
23+
#include "evsel.h"
24+
#include "evlist.h"
25+
#include "target.h"
26+
#include "cpumap.h"
27+
#include "thread_map.h"
28+
29+
#include "bpf_skel/bperf_cgroup.skel.h"
30+
31+
static struct perf_event_attr cgrp_switch_attr = {
32+
.type = PERF_TYPE_SOFTWARE,
33+
.config = PERF_COUNT_SW_CGROUP_SWITCHES,
34+
.size = sizeof(cgrp_switch_attr),
35+
.sample_period = 1,
36+
.disabled = 1,
37+
};
38+
39+
static struct evsel *cgrp_switch;
40+
static struct bperf_cgroup_bpf *skel;
41+
42+
#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
43+
44+
static int bperf_load_program(struct evlist *evlist)
45+
{
46+
struct bpf_link *link;
47+
struct evsel *evsel;
48+
struct cgroup *cgrp, *leader_cgrp;
49+
__u32 i, cpu;
50+
__u32 nr_cpus = evlist->core.all_cpus->nr;
51+
int total_cpus = cpu__max_cpu();
52+
int map_size, map_fd;
53+
int prog_fd, err;
54+
55+
skel = bperf_cgroup_bpf__open();
56+
if (!skel) {
57+
pr_err("Failed to open cgroup skeleton\n");
58+
return -1;
59+
}
60+
61+
skel->rodata->num_cpus = total_cpus;
62+
skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
63+
64+
BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
65+
66+
/* we need one copy of events per cpu for reading */
67+
map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
68+
bpf_map__resize(skel->maps.events, map_size);
69+
bpf_map__resize(skel->maps.cgrp_idx, nr_cgroups);
70+
/* previous result is saved in a per-cpu array */
71+
map_size = evlist->core.nr_entries / nr_cgroups;
72+
bpf_map__resize(skel->maps.prev_readings, map_size);
73+
/* cgroup result needs all events (per-cpu) */
74+
map_size = evlist->core.nr_entries;
75+
bpf_map__resize(skel->maps.cgrp_readings, map_size);
76+
77+
set_max_rlimit();
78+
79+
err = bperf_cgroup_bpf__load(skel);
80+
if (err) {
81+
pr_err("Failed to load cgroup skeleton\n");
82+
goto out;
83+
}
84+
85+
if (cgroup_is_v2("perf_event") > 0)
86+
skel->bss->use_cgroup_v2 = 1;
87+
88+
err = -1;
89+
90+
cgrp_switch = evsel__new(&cgrp_switch_attr);
91+
if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
92+
pr_err("Failed to open cgroup switches event\n");
93+
goto out;
94+
}
95+
96+
for (i = 0; i < nr_cpus; i++) {
97+
link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
98+
FD(cgrp_switch, i));
99+
if (IS_ERR(link)) {
100+
pr_err("Failed to attach cgroup program\n");
101+
err = PTR_ERR(link);
102+
goto out;
103+
}
104+
}
105+
106+
/*
107+
* Update cgrp_idx map from cgroup-id to event index.
108+
*/
109+
cgrp = NULL;
110+
i = 0;
111+
112+
evlist__for_each_entry(evlist, evsel) {
113+
if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
114+
leader_cgrp = evsel->cgrp;
115+
evsel->cgrp = NULL;
116+
117+
/* open single copy of the events w/o cgroup */
118+
err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1);
119+
if (err) {
120+
pr_err("Failed to open first cgroup events\n");
121+
goto out;
122+
}
123+
124+
map_fd = bpf_map__fd(skel->maps.events);
125+
for (cpu = 0; cpu < nr_cpus; cpu++) {
126+
int fd = FD(evsel, cpu);
127+
__u32 idx = evsel->idx * total_cpus +
128+
evlist->core.all_cpus->map[cpu];
129+
130+
err = bpf_map_update_elem(map_fd, &idx, &fd,
131+
BPF_ANY);
132+
if (err < 0) {
133+
pr_err("Failed to update perf_event fd\n");
134+
goto out;
135+
}
136+
}
137+
138+
evsel->cgrp = leader_cgrp;
139+
}
140+
evsel->supported = true;
141+
142+
if (evsel->cgrp == cgrp)
143+
continue;
144+
145+
cgrp = evsel->cgrp;
146+
147+
if (read_cgroup_id(cgrp) < 0) {
148+
pr_err("Failed to get cgroup id\n");
149+
err = -1;
150+
goto out;
151+
}
152+
153+
map_fd = bpf_map__fd(skel->maps.cgrp_idx);
154+
err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
155+
if (err < 0) {
156+
pr_err("Failed to update cgroup index map\n");
157+
goto out;
158+
}
159+
160+
i++;
161+
}
162+
163+
/*
164+
* bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
165+
* whether the kernel support it
166+
*/
167+
prog_fd = bpf_program__fd(skel->progs.trigger_read);
168+
err = bperf_trigger_reading(prog_fd, 0);
169+
if (err) {
170+
pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n"
171+
"Therefore, --for-each-cgroup might show inaccurate readings\n");
172+
err = 0;
173+
}
174+
175+
out:
176+
return err;
177+
}
178+
179+
static int bperf_cgrp__load(struct evsel *evsel,
180+
struct target *target __maybe_unused)
181+
{
182+
static bool bperf_loaded = false;
183+
184+
evsel->bperf_leader_prog_fd = -1;
185+
evsel->bperf_leader_link_fd = -1;
186+
187+
if (!bperf_loaded && bperf_load_program(evsel->evlist))
188+
return -1;
189+
190+
bperf_loaded = true;
191+
/* just to bypass bpf_counter_skip() */
192+
evsel->follower_skel = (struct bperf_follower_bpf *)skel;
193+
194+
return 0;
195+
}
196+
197+
static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
198+
int cpu __maybe_unused, int fd __maybe_unused)
199+
{
200+
/* nothing to do */
201+
return 0;
202+
}
203+
204+
/*
205+
* trigger the leader prog on each cpu, so the cgrp_reading map could get
206+
* the latest results.
207+
*/
208+
static int bperf_cgrp__sync_counters(struct evlist *evlist)
209+
{
210+
int i, cpu;
211+
int nr_cpus = evlist->core.all_cpus->nr;
212+
int prog_fd = bpf_program__fd(skel->progs.trigger_read);
213+
214+
for (i = 0; i < nr_cpus; i++) {
215+
cpu = evlist->core.all_cpus->map[i];
216+
bperf_trigger_reading(prog_fd, cpu);
217+
}
218+
219+
return 0;
220+
}
221+
222+
static int bperf_cgrp__enable(struct evsel *evsel)
223+
{
224+
if (evsel->idx)
225+
return 0;
226+
227+
bperf_cgrp__sync_counters(evsel->evlist);
228+
229+
skel->bss->enabled = 1;
230+
return 0;
231+
}
232+
233+
static int bperf_cgrp__disable(struct evsel *evsel)
234+
{
235+
if (evsel->idx)
236+
return 0;
237+
238+
bperf_cgrp__sync_counters(evsel->evlist);
239+
240+
skel->bss->enabled = 0;
241+
return 0;
242+
}
243+
244+
static int bperf_cgrp__read(struct evsel *evsel)
245+
{
246+
struct evlist *evlist = evsel->evlist;
247+
int i, cpu, nr_cpus = evlist->core.all_cpus->nr;
248+
int total_cpus = cpu__max_cpu();
249+
struct perf_counts_values *counts;
250+
struct bpf_perf_event_value *values;
251+
int reading_map_fd, err = 0;
252+
__u32 idx;
253+
254+
if (evsel->idx)
255+
return 0;
256+
257+
bperf_cgrp__sync_counters(evsel->evlist);
258+
259+
values = calloc(total_cpus, sizeof(*values));
260+
if (values == NULL)
261+
return -ENOMEM;
262+
263+
reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
264+
265+
evlist__for_each_entry(evlist, evsel) {
266+
idx = evsel->idx;
267+
err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
268+
if (err) {
269+
pr_err("bpf map lookup falied: idx=%u, event=%s, cgrp=%s\n",
270+
idx, evsel__name(evsel), evsel->cgrp->name);
271+
goto out;
272+
}
273+
274+
for (i = 0; i < nr_cpus; i++) {
275+
cpu = evlist->core.all_cpus->map[i];
276+
277+
counts = perf_counts(evsel->counts, i, 0);
278+
counts->val = values[cpu].counter;
279+
counts->ena = values[cpu].enabled;
280+
counts->run = values[cpu].running;
281+
}
282+
}
283+
284+
out:
285+
free(values);
286+
return err;
287+
}
288+
289+
static int bperf_cgrp__destroy(struct evsel *evsel)
290+
{
291+
if (evsel->idx)
292+
return 0;
293+
294+
bperf_cgroup_bpf__destroy(skel);
295+
evsel__delete(cgrp_switch); // it'll destroy on_switch progs too
296+
297+
return 0;
298+
}
299+
300+
struct bpf_counter_ops bperf_cgrp_ops = {
301+
.load = bperf_cgrp__load,
302+
.enable = bperf_cgrp__enable,
303+
.disable = bperf_cgrp__disable,
304+
.read = bperf_cgrp__read,
305+
.install_pe = bperf_cgrp__install_pe,
306+
.destroy = bperf_cgrp__destroy,
307+
};

0 commit comments

Comments
 (0)