@@ -2149,8 +2149,9 @@ perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
2149
2149
}
2150
2150
2151
2151
static void put_event (struct perf_event * event );
2152
- static void event_sched_out (struct perf_event * event ,
2153
- struct perf_event_context * ctx );
2152
+ static void __event_disable (struct perf_event * event ,
2153
+ struct perf_event_context * ctx ,
2154
+ enum perf_event_state state );
2154
2155
2155
2156
static void perf_put_aux_event (struct perf_event * event )
2156
2157
{
@@ -2183,8 +2184,7 @@ static void perf_put_aux_event(struct perf_event *event)
2183
2184
* state so that we don't try to schedule it again. Note
2184
2185
* that perf_event_enable() will clear the ERROR status.
2185
2186
*/
2186
- event_sched_out (iter , ctx );
2187
- perf_event_set_state (event , PERF_EVENT_STATE_ERROR );
2187
+ __event_disable (iter , ctx , PERF_EVENT_STATE_ERROR );
2188
2188
}
2189
2189
}
2190
2190
@@ -2242,18 +2242,6 @@ static inline struct list_head *get_event_list(struct perf_event *event)
2242
2242
& event -> pmu_ctx -> flexible_active ;
2243
2243
}
2244
2244
2245
- /*
2246
- * Events that have PERF_EV_CAP_SIBLING require being part of a group and
2247
- * cannot exist on their own, schedule them out and move them into the ERROR
2248
- * state. Also see _perf_event_enable(), it will not be able to recover
2249
- * this ERROR state.
2250
- */
2251
- static inline void perf_remove_sibling_event (struct perf_event * event )
2252
- {
2253
- event_sched_out (event , event -> ctx );
2254
- perf_event_set_state (event , PERF_EVENT_STATE_ERROR );
2255
- }
2256
-
2257
2245
static void perf_group_detach (struct perf_event * event )
2258
2246
{
2259
2247
struct perf_event * leader = event -> group_leader ;
@@ -2289,8 +2277,15 @@ static void perf_group_detach(struct perf_event *event)
2289
2277
*/
2290
2278
list_for_each_entry_safe (sibling , tmp , & event -> sibling_list , sibling_list ) {
2291
2279
2280
+ /*
2281
+ * Events that have PERF_EV_CAP_SIBLING require being part of
2282
+ * a group and cannot exist on their own, schedule them out
2283
+ * and move them into the ERROR state. Also see
2284
+ * _perf_event_enable(), it will not be able to recover this
2285
+ * ERROR state.
2286
+ */
2292
2287
if (sibling -> event_caps & PERF_EV_CAP_SIBLING )
2293
- perf_remove_sibling_event (sibling );
2288
+ __event_disable (sibling , ctx , PERF_EVENT_STATE_ERROR );
2294
2289
2295
2290
sibling -> group_leader = sibling ;
2296
2291
list_del_init (& sibling -> sibling_list );
@@ -2562,6 +2557,15 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
2562
2557
event_function_call (event , __perf_remove_from_context , (void * )flags );
2563
2558
}
2564
2559
2560
+ static void __event_disable (struct perf_event * event ,
2561
+ struct perf_event_context * ctx ,
2562
+ enum perf_event_state state )
2563
+ {
2564
+ event_sched_out (event , ctx );
2565
+ perf_cgroup_event_disable (event , ctx );
2566
+ perf_event_set_state (event , state );
2567
+ }
2568
+
2565
2569
/*
2566
2570
* Cross CPU call to disable a performance event
2567
2571
*/
@@ -2576,13 +2580,18 @@ static void __perf_event_disable(struct perf_event *event,
2576
2580
perf_pmu_disable (event -> pmu_ctx -> pmu );
2577
2581
ctx_time_update_event (ctx , event );
2578
2582
2583
+ /*
2584
+ * When disabling a group leader, the whole group becomes ineligible
2585
+ * to run, so schedule out the full group.
2586
+ */
2579
2587
if (event == event -> group_leader )
2580
2588
group_sched_out (event , ctx );
2581
- else
2582
- event_sched_out (event , ctx );
2583
2589
2584
- perf_event_set_state (event , PERF_EVENT_STATE_OFF );
2585
- perf_cgroup_event_disable (event , ctx );
2590
+ /*
2591
+ * But only mark the leader OFF; the siblings will remain
2592
+ * INACTIVE.
2593
+ */
2594
+ __event_disable (event , ctx , PERF_EVENT_STATE_OFF );
2586
2595
2587
2596
perf_pmu_enable (event -> pmu_ctx -> pmu );
2588
2597
}
0 commit comments