Skip to content

Commit 9f0c4fa

Browse files
Kan LiangPeter Zijlstra
authored andcommitted
perf/core: Add a new PERF_EV_CAP_SIBLING event capability
Current perf assumes that events in a group are independent. Close an event doesn't impact the value of the other events in the same group. If the closed event is a member, after the event closure, other events are still running like a group. If the closed event is a leader, other events are running as singleton events. Add PERF_EV_CAP_SIBLING to allow events to indicate they require being part of a group, and when the leader dies they cannot exist independently. Suggested-by: Peter Zijlstra <[email protected]> Signed-off-by: Kan Liang <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 58da7db commit 9f0c4fa

File tree

2 files changed

+37
-5
lines changed

2 files changed

+37
-5
lines changed

include/linux/perf_event.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -576,9 +576,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
576576
* PERF_EV_CAP_SOFTWARE: Is a software event.
577577
* PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
578578
* from any CPU in the package where it is active.
579+
* PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
580+
* cannot be a group leader. If an event with this flag is detached from the
581+
* group it is scheduled out and moved into an unrecoverable ERROR state.
579582
*/
580583
#define PERF_EV_CAP_SOFTWARE BIT(0)
581584
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
585+
#define PERF_EV_CAP_SIBLING BIT(2)
582586

583587
#define SWEVENT_HLIST_BITS 8
584588
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)

kernel/events/core.c

Lines changed: 33 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2133,8 +2133,24 @@ static inline struct list_head *get_event_list(struct perf_event *event)
21332133
return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active;
21342134
}
21352135

2136+
/*
2137+
* Events that have PERF_EV_CAP_SIBLING require being part of a group and
2138+
* cannot exist on their own, schedule them out and move them into the ERROR
2139+
* state. Also see _perf_event_enable(), it will not be able to recover
2140+
* this ERROR state.
2141+
*/
2142+
static inline void perf_remove_sibling_event(struct perf_event *event)
2143+
{
2144+
struct perf_event_context *ctx = event->ctx;
2145+
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2146+
2147+
event_sched_out(event, cpuctx, ctx);
2148+
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
2149+
}
2150+
21362151
static void perf_group_detach(struct perf_event *event)
21372152
{
2153+
struct perf_event *leader = event->group_leader;
21382154
struct perf_event *sibling, *tmp;
21392155
struct perf_event_context *ctx = event->ctx;
21402156

@@ -2153,7 +2169,7 @@ static void perf_group_detach(struct perf_event *event)
21532169
/*
21542170
* If this is a sibling, remove it from its group.
21552171
*/
2156-
if (event->group_leader != event) {
2172+
if (leader != event) {
21572173
list_del_init(&event->sibling_list);
21582174
event->group_leader->nr_siblings--;
21592175
goto out;
@@ -2166,6 +2182,9 @@ static void perf_group_detach(struct perf_event *event)
21662182
*/
21672183
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
21682184

2185+
if (sibling->event_caps & PERF_EV_CAP_SIBLING)
2186+
perf_remove_sibling_event(sibling);
2187+
21692188
sibling->group_leader = sibling;
21702189
list_del_init(&sibling->sibling_list);
21712190

@@ -2183,10 +2202,10 @@ static void perf_group_detach(struct perf_event *event)
21832202
}
21842203

21852204
out:
2186-
perf_event__header_size(event->group_leader);
2187-
2188-
for_each_sibling_event(tmp, event->group_leader)
2205+
for_each_sibling_event(tmp, leader)
21892206
perf_event__header_size(tmp);
2207+
2208+
perf_event__header_size(leader);
21902209
}
21912210

21922211
static bool is_orphaned_event(struct perf_event *event)
@@ -2979,6 +2998,7 @@ static void _perf_event_enable(struct perf_event *event)
29792998
raw_spin_lock_irq(&ctx->lock);
29802999
if (event->state >= PERF_EVENT_STATE_INACTIVE ||
29813000
event->state < PERF_EVENT_STATE_ERROR) {
3001+
out:
29823002
raw_spin_unlock_irq(&ctx->lock);
29833003
return;
29843004
}
@@ -2990,8 +3010,16 @@ static void _perf_event_enable(struct perf_event *event)
29903010
* has gone back into error state, as distinct from the task having
29913011
* been scheduled away before the cross-call arrived.
29923012
*/
2993-
if (event->state == PERF_EVENT_STATE_ERROR)
3013+
if (event->state == PERF_EVENT_STATE_ERROR) {
3014+
/*
3015+
* Detached SIBLING events cannot leave ERROR state.
3016+
*/
3017+
if (event->event_caps & PERF_EV_CAP_SIBLING &&
3018+
event->group_leader == event)
3019+
goto out;
3020+
29943021
event->state = PERF_EVENT_STATE_OFF;
3022+
}
29953023
raw_spin_unlock_irq(&ctx->lock);
29963024

29973025
event_function_call(event, __perf_event_enable, NULL);

0 commit comments

Comments
 (0)