Skip to content

Commit 9eac561

Browse files
captain5050acmel
authored andcommitted
perf stat: Don't skip failing group events
Pass errno to stat_handle_error() rather than reading errno after it has potentially been clobbered. Move "skippable" handling first as a skippable event (from the perf stat default list) should always just be skipped. Remove logic to skip rather than fail events in a group when they aren't the group leader. The original logic was added in commit cb5ef60 ("perf stat: Error out unsupported group leader immediately") due to error handling and opening being together and an assertion being raised. Not failing this case causes broken groups to not report values, particularly for topdown events. Closes: https://lore.kernel.org/lkml/[email protected]/ Reported-by: Dapeng Mi <[email protected]> Reviewed-by: Dapeng Mi <[email protected]> Signed-off-by: Ian Rogers <[email protected]> Tested-by: Dapeng Mi <[email protected]> Cc: Adrian Hunter <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: Howard Chu <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: James Clark <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Kan Liang <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Namhyung Kim <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Falcon <[email protected]> Cc: Yoshihiro Furudera <[email protected]> Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
1 parent 7970e20 commit 9eac561

File tree

1 file changed

+21
-27
lines changed

1 file changed

+21
-27
lines changed

tools/perf/builtin-stat.c

Lines changed: 21 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -613,33 +613,40 @@ enum counter_recovery {
613613
COUNTER_FATAL,
614614
};
615615

616-
static enum counter_recovery stat_handle_error(struct evsel *counter)
616+
static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
617617
{
618618
char msg[BUFSIZ];
619+
620+
if (counter->skippable) {
621+
if (verbose > 0) {
622+
ui__warning("skipping event %s that kernel failed to open .\n",
623+
evsel__name(counter));
624+
}
625+
counter->supported = false;
626+
counter->errored = true;
627+
return COUNTER_SKIP;
628+
}
629+
619630
/*
620631
* PPC returns ENXIO for HW counters until 2.6.37
621632
* (behavior changed with commit b0a873e).
622633
*/
623-
if (errno == EINVAL || errno == ENOSYS ||
624-
errno == ENOENT || errno == ENXIO) {
625-
if (verbose > 0)
634+
if (err == EINVAL || err == ENOSYS || err == ENOENT || err == ENXIO) {
635+
if (verbose > 0) {
626636
ui__warning("%s event is not supported by the kernel.\n",
627637
evsel__name(counter));
638+
}
628639
counter->supported = false;
629640
/*
630641
* errored is a sticky flag that means one of the counter's
631642
* cpu event had a problem and needs to be reexamined.
632643
*/
633644
counter->errored = true;
634-
635-
if ((evsel__leader(counter) != counter) ||
636-
!(counter->core.leader->nr_members > 1))
637-
return COUNTER_SKIP;
638-
} else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) {
645+
} else if (evsel__fallback(counter, &target, err, msg, sizeof(msg))) {
639646
if (verbose > 0)
640647
ui__warning("%s\n", msg);
641648
return COUNTER_RETRY;
642-
} else if (target__has_per_thread(&target) && errno != EOPNOTSUPP &&
649+
} else if (target__has_per_thread(&target) && err != EOPNOTSUPP &&
643650
evsel_list->core.threads &&
644651
evsel_list->core.threads->err_thread != -1) {
645652
/*
@@ -651,29 +658,16 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
651658
evsel_list->core.threads->err_thread = -1;
652659
return COUNTER_RETRY;
653660
}
654-
} else if (counter->skippable) {
655-
if (verbose > 0)
656-
ui__warning("skipping event %s that kernel failed to open .\n",
657-
evsel__name(counter));
658-
counter->supported = false;
659-
counter->errored = true;
660-
return COUNTER_SKIP;
661-
}
662-
663-
if (errno == EOPNOTSUPP) {
661+
} else if (err == EOPNOTSUPP) {
664662
if (verbose > 0) {
665663
ui__warning("%s event is not supported by the kernel.\n",
666664
evsel__name(counter));
667665
}
668666
counter->supported = false;
669667
counter->errored = true;
670-
671-
if ((evsel__leader(counter) != counter) ||
672-
!(counter->core.leader->nr_members > 1))
673-
return COUNTER_SKIP;
674668
}
675669

676-
evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
670+
evsel__open_strerror(counter, &target, err, msg, sizeof(msg));
677671
ui__error("%s\n", msg);
678672

679673
if (child_pid != -1)
@@ -761,7 +755,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
761755
continue;
762756
}
763757

764-
switch (stat_handle_error(counter)) {
758+
switch (stat_handle_error(counter, errno)) {
765759
case COUNTER_FATAL:
766760
err = -1;
767761
goto err_out;
@@ -803,7 +797,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
803797
if (create_perf_stat_counter(counter, &stat_config, &target,
804798
evlist_cpu_itr.cpu_map_idx) < 0) {
805799

806-
switch (stat_handle_error(counter)) {
800+
switch (stat_handle_error(counter, errno)) {
807801
case COUNTER_FATAL:
808802
err = -1;
809803
goto err_out;

0 commit comments

Comments
 (0)