Skip to content

Commit bd27568

Browse files
author
Peter Zijlstra
committed
perf: Rewrite core context handling
There have been various issues and limitations with the way perf uses (task) contexts to track events. Most notable is the single hardware PMU task context, which has resulted in a number of yucky things (both proposed and merged). Notably: - HW breakpoint PMU - ARM big.little PMU / Intel ADL PMU - Intel Branch Monitoring PMU - AMD IBS PMU - S390 cpum_cf PMU - PowerPC trace_imc PMU *Current design:* Currently we have a per task and per cpu perf_event_contexts: task_struct::perf_events_ctxp[] <-> perf_event_context <-> perf_cpu_context ^ | ^ | ^ `---------------------------------' | `--> pmu ---' v ^ perf_event ------' Each task has an array of pointers to a perf_event_context. Each perf_event_context has a direct relation to a PMU and a group of events for that PMU. The task related perf_event_context's have a pointer back to that task. Each PMU has a per-cpu pointer to a per-cpu perf_cpu_context, which includes a perf_event_context, which again has a direct relation to that PMU, and a group of events for that PMU. The perf_cpu_context also tracks which task context is currently associated with that CPU and includes a few other things like the hrtimer for rotation etc. Each perf_event is then associated with its PMU and one perf_event_context. *Proposed design:* New design proposed by this patch reduce to a single task context and a single CPU context but adds some intermediate data-structures: task_struct::perf_event_ctxp -> perf_event_context <- perf_cpu_context ^ | ^ ^ `---------------------------' | | | | perf_cpu_pmu_context <--. | `----. ^ | | | | | | v v | | ,--> perf_event_pmu_context | | | | | | | v v | perf_event ---> pmu ----------------' With the new design, perf_event_context will hold all events for all pmus in the (respective pinned/flexible) rbtrees. This can be achieved by adding pmu to rbtree key: {cpu, pmu, cgroup, group_index} Each perf_event_context carries a list of perf_event_pmu_context which is used to hold per-pmu-per-context state. For example, it keeps track of currently active events for that pmu, a pmu specific task_ctx_data, a flag to tell whether rotation is required or not etc. Additionally, perf_cpu_pmu_context is used to hold per-pmu-per-cpu state like hrtimer details to drive the event rotation, a pointer to perf_event_pmu_context of currently running task and some other ancillary information. Each perf_event is associated to it's pmu, perf_event_context and perf_event_pmu_context. Further optimizations to current implementation are possible. For example, ctx_resched() can be optimized to reschedule only single pmu events. Much thanks to Ravi for picking this up and pushing it towards completion. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Co-developed-by: Ravi Bangoria <[email protected]> Signed-off-by: Ravi Bangoria <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 247f34f commit bd27568

File tree

16 files changed

+1178
-1094
lines changed

16 files changed

+1178
-1094
lines changed

arch/arm64/kernel/perf_event.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -806,10 +806,14 @@ static void armv8pmu_disable_event(struct perf_event *event)
806806

807807
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
808808
{
809-
struct perf_event_context *task_ctx =
810-
this_cpu_ptr(cpu_pmu->pmu.pmu_cpu_context)->task_ctx;
809+
struct perf_event_context *ctx;
810+
int nr_user = 0;
811811

812-
if (sysctl_perf_user_access && task_ctx && task_ctx->nr_user)
812+
ctx = perf_cpu_task_ctx();
813+
if (ctx)
814+
nr_user = ctx->nr_user;
815+
816+
if (sysctl_perf_user_access && nr_user)
813817
armv8pmu_enable_user_access(cpu_pmu);
814818
else
815819
armv8pmu_disable_user_access();
@@ -1019,10 +1023,10 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
10191023
return 0;
10201024
}
10211025

1022-
static int armv8pmu_filter_match(struct perf_event *event)
1026+
static bool armv8pmu_filter(struct pmu *pmu, int cpu)
10231027
{
1024-
unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
1025-
return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
1028+
struct arm_pmu *armpmu = to_arm_pmu(pmu);
1029+
return !cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus);
10261030
}
10271031

10281032
static void armv8pmu_reset(void *info)
@@ -1253,7 +1257,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
12531257
cpu_pmu->stop = armv8pmu_stop;
12541258
cpu_pmu->reset = armv8pmu_reset;
12551259
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
1256-
cpu_pmu->filter_match = armv8pmu_filter_match;
1260+
cpu_pmu->filter = armv8pmu_filter;
12571261

12581262
cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
12591263

arch/powerpc/perf/core-book3s.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
132132

133133
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
134134
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
135-
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
135+
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {}
136136
static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
137137
static void pmao_restore_workaround(bool ebb) { }
138138
#endif /* CONFIG_PPC32 */
@@ -424,7 +424,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
424424
cpuhw->bhrb_context = event->ctx;
425425
}
426426
cpuhw->bhrb_users++;
427-
perf_sched_cb_inc(event->ctx->pmu);
427+
perf_sched_cb_inc(event->pmu);
428428
}
429429

430430
static void power_pmu_bhrb_disable(struct perf_event *event)
@@ -436,7 +436,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
436436

437437
WARN_ON_ONCE(!cpuhw->bhrb_users);
438438
cpuhw->bhrb_users--;
439-
perf_sched_cb_dec(event->ctx->pmu);
439+
perf_sched_cb_dec(event->pmu);
440440

441441
if (!cpuhw->disabled && !cpuhw->bhrb_users) {
442442
/* BHRB cannot be turned off when other
@@ -451,7 +451,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
451451
/* Called from ctxsw to prevent one process's branch entries to
452452
* mingle with the other process's entries during context switch.
453453
*/
454-
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
454+
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
455455
{
456456
if (!ppmu->bhrb_nr)
457457
return;

arch/s390/kernel/perf_pai_crypto.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,7 @@ static int paicrypt_push_sample(void)
379379
/* Called on schedule-in and schedule-out. No access to event structure,
380380
* but for sampling only event CRYPTO_ALL is allowed.
381381
*/
382-
static void paicrypt_sched_task(struct perf_event_context *ctx, bool sched_in)
382+
static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
383383
{
384384
/* We started with a clean page on event installation. So read out
385385
* results on schedule_out and if page was dirty, clear values.

arch/s390/kernel/perf_pai_ext.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -471,7 +471,7 @@ static int paiext_push_sample(void)
471471
/* Called on schedule-in and schedule-out. No access to event structure,
472472
* but for sampling only event NNPA_ALL is allowed.
473473
*/
474-
static void paiext_sched_task(struct perf_event_context *ctx, bool sched_in)
474+
static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
475475
{
476476
/* We started with a clean page on event installation. So read out
477477
* results on schedule_out and if page was dirty, clear values.

arch/x86/events/amd/brs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -384,7 +384,7 @@ static void amd_brs_poison_buffer(void)
384384
* On ctxswin, sched_in = true, called after the PMU has started
385385
* On ctxswout, sched_in = false, called before the PMU is stopped
386386
*/
387-
void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in)
387+
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
388388
{
389389
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
390390

arch/x86/events/amd/lbr.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -352,7 +352,7 @@ void amd_pmu_lbr_add(struct perf_event *event)
352352
cpuc->br_sel = reg->reg;
353353
}
354354

355-
perf_sched_cb_inc(event->ctx->pmu);
355+
perf_sched_cb_inc(event->pmu);
356356

357357
if (!cpuc->lbr_users++ && !event->total_time_running)
358358
amd_pmu_lbr_reset();
@@ -370,10 +370,10 @@ void amd_pmu_lbr_del(struct perf_event *event)
370370

371371
cpuc->lbr_users--;
372372
WARN_ON_ONCE(cpuc->lbr_users < 0);
373-
perf_sched_cb_dec(event->ctx->pmu);
373+
perf_sched_cb_dec(event->pmu);
374374
}
375375

376-
void amd_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
376+
void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
377377
{
378378
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
379379

arch/x86/events/core.c

Lines changed: 14 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,8 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
9090
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
9191
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
9292

93+
DEFINE_STATIC_CALL_NULL(x86_pmu_filter, *x86_pmu.filter);
94+
9395
/*
9496
* This one is magic, it will get called even when PMU init fails (because
9597
* there is no PMU), in which case it should simply return NULL.
@@ -2031,6 +2033,7 @@ static void x86_pmu_static_call_update(void)
20312033
static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
20322034

20332035
static_call_update(x86_pmu_guest_get_msrs, x86_pmu.guest_get_msrs);
2036+
static_call_update(x86_pmu_filter, x86_pmu.filter);
20342037
}
20352038

20362039
static void _x86_pmu_read(struct perf_event *event)
@@ -2052,23 +2055,6 @@ void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
20522055
pr_info("... event mask: %016Lx\n", intel_ctrl);
20532056
}
20542057

2055-
/*
2056-
* The generic code is not hybrid friendly. The hybrid_pmu->pmu
2057-
* of the first registered PMU is unconditionally assigned to
2058-
* each possible cpuctx->ctx.pmu.
2059-
* Update the correct hybrid PMU to the cpuctx->ctx.pmu.
2060-
*/
2061-
void x86_pmu_update_cpu_context(struct pmu *pmu, int cpu)
2062-
{
2063-
struct perf_cpu_context *cpuctx;
2064-
2065-
if (!pmu->pmu_cpu_context)
2066-
return;
2067-
2068-
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
2069-
cpuctx->ctx.pmu = pmu;
2070-
}
2071-
20722058
static int __init init_hw_perf_events(void)
20732059
{
20742060
struct x86_pmu_quirk *quirk;
@@ -2195,9 +2181,6 @@ static int __init init_hw_perf_events(void)
21952181
(hybrid_pmu->cpu_type == hybrid_big) ? PERF_TYPE_RAW : -1);
21962182
if (err)
21972183
break;
2198-
2199-
if (cpu_type == hybrid_pmu->cpu_type)
2200-
x86_pmu_update_cpu_context(&hybrid_pmu->pmu, raw_smp_processor_id());
22012184
}
22022185

22032186
if (i < x86_pmu.num_hybrid_pmus) {
@@ -2646,15 +2629,15 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
26462629
NULL,
26472630
};
26482631

2649-
static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
2632+
static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
26502633
{
2651-
static_call_cond(x86_pmu_sched_task)(ctx, sched_in);
2634+
static_call_cond(x86_pmu_sched_task)(pmu_ctx, sched_in);
26522635
}
26532636

2654-
static void x86_pmu_swap_task_ctx(struct perf_event_context *prev,
2655-
struct perf_event_context *next)
2637+
static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
2638+
struct perf_event_pmu_context *next_epc)
26562639
{
2657-
static_call_cond(x86_pmu_swap_task_ctx)(prev, next);
2640+
static_call_cond(x86_pmu_swap_task_ctx)(prev_epc, next_epc);
26582641
}
26592642

26602643
void perf_check_microcode(void)
@@ -2689,12 +2672,13 @@ static int x86_pmu_aux_output_match(struct perf_event *event)
26892672
return 0;
26902673
}
26912674

2692-
static int x86_pmu_filter_match(struct perf_event *event)
2675+
static bool x86_pmu_filter(struct pmu *pmu, int cpu)
26932676
{
2694-
if (x86_pmu.filter_match)
2695-
return x86_pmu.filter_match(event);
2677+
bool ret = false;
26962678

2697-
return 1;
2679+
static_call_cond(x86_pmu_filter)(pmu, cpu, &ret);
2680+
2681+
return ret;
26982682
}
26992683

27002684
static struct pmu pmu = {
@@ -2725,7 +2709,7 @@ static struct pmu pmu = {
27252709

27262710
.aux_output_match = x86_pmu_aux_output_match,
27272711

2728-
.filter_match = x86_pmu_filter_match,
2712+
.filter = x86_pmu_filter,
27292713
};
27302714

27312715
void arch_perf_update_userpage(struct perf_event *event,

arch/x86/events/intel/core.c

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -4536,8 +4536,6 @@ static bool init_hybrid_pmu(int cpu)
45364536
cpumask_set_cpu(cpu, &pmu->supported_cpus);
45374537
cpuc->pmu = &pmu->pmu;
45384538

4539-
x86_pmu_update_cpu_context(&pmu->pmu, cpu);
4540-
45414539
return true;
45424540
}
45434541

@@ -4671,17 +4669,17 @@ static void intel_pmu_cpu_dead(int cpu)
46714669
cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
46724670
}
46734671

4674-
static void intel_pmu_sched_task(struct perf_event_context *ctx,
4672+
static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
46754673
bool sched_in)
46764674
{
4677-
intel_pmu_pebs_sched_task(ctx, sched_in);
4678-
intel_pmu_lbr_sched_task(ctx, sched_in);
4675+
intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
4676+
intel_pmu_lbr_sched_task(pmu_ctx, sched_in);
46794677
}
46804678

4681-
static void intel_pmu_swap_task_ctx(struct perf_event_context *prev,
4682-
struct perf_event_context *next)
4679+
static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
4680+
struct perf_event_pmu_context *next_epc)
46834681
{
4684-
intel_pmu_lbr_swap_task_ctx(prev, next);
4682+
intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
46854683
}
46864684

46874685
static int intel_pmu_check_period(struct perf_event *event, u64 value)
@@ -4705,12 +4703,11 @@ static int intel_pmu_aux_output_match(struct perf_event *event)
47054703
return is_intel_pt_event(event);
47064704
}
47074705

4708-
static int intel_pmu_filter_match(struct perf_event *event)
4706+
static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
47094707
{
4710-
struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4711-
unsigned int cpu = smp_processor_id();
4708+
struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
47124709

4713-
return cpumask_test_cpu(cpu, &pmu->supported_cpus);
4710+
*ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
47144711
}
47154712

47164713
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
@@ -6412,7 +6409,7 @@ __init int intel_pmu_init(void)
64126409
static_call_update(intel_pmu_set_topdown_event_period,
64136410
&adl_set_topdown_event_period);
64146411

6415-
x86_pmu.filter_match = intel_pmu_filter_match;
6412+
x86_pmu.filter = intel_pmu_filter;
64166413
x86_pmu.get_event_constraints = adl_get_event_constraints;
64176414
x86_pmu.hw_config = adl_hw_config;
64186415
x86_pmu.limit_period = spr_limit_period;

arch/x86/events/intel/ds.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1059,7 +1059,7 @@ static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
10591059
return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
10601060
}
10611061

1062-
void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
1062+
void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
10631063
{
10641064
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
10651065

@@ -1167,7 +1167,7 @@ static void
11671167
pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
11681168
struct perf_event *event, bool add)
11691169
{
1170-
struct pmu *pmu = event->ctx->pmu;
1170+
struct pmu *pmu = event->pmu;
11711171
/*
11721172
* Make sure we get updated with the first PEBS
11731173
* event. It will trigger also during removal, but

arch/x86/events/intel/lbr.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -515,21 +515,21 @@ static void __intel_pmu_lbr_save(void *ctx)
515515
cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
516516
}
517517

518-
void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
519-
struct perf_event_context *next)
518+
void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
519+
struct perf_event_pmu_context *next_epc)
520520
{
521521
void *prev_ctx_data, *next_ctx_data;
522522

523-
swap(prev->task_ctx_data, next->task_ctx_data);
523+
swap(prev_epc->task_ctx_data, next_epc->task_ctx_data);
524524

525525
/*
526-
* Architecture specific synchronization makes sense in
527-
* case both prev->task_ctx_data and next->task_ctx_data
526+
* Architecture specific synchronization makes sense in case
527+
* both prev_epc->task_ctx_data and next_epc->task_ctx_data
528528
* pointers are allocated.
529529
*/
530530

531-
prev_ctx_data = next->task_ctx_data;
532-
next_ctx_data = prev->task_ctx_data;
531+
prev_ctx_data = next_epc->task_ctx_data;
532+
next_ctx_data = prev_epc->task_ctx_data;
533533

534534
if (!prev_ctx_data || !next_ctx_data)
535535
return;
@@ -538,7 +538,7 @@ void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
538538
task_context_opt(next_ctx_data)->lbr_callstack_users);
539539
}
540540

541-
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
541+
void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
542542
{
543543
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
544544
void *task_ctx;
@@ -551,7 +551,7 @@ void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
551551
* the task was scheduled out, restore the stack. Otherwise flush
552552
* the LBR stack.
553553
*/
554-
task_ctx = ctx ? ctx->task_ctx_data : NULL;
554+
task_ctx = pmu_ctx ? pmu_ctx->task_ctx_data : NULL;
555555
if (task_ctx) {
556556
if (sched_in)
557557
__intel_pmu_lbr_restore(task_ctx);
@@ -587,8 +587,8 @@ void intel_pmu_lbr_add(struct perf_event *event)
587587

588588
cpuc->br_sel = event->hw.branch_reg.reg;
589589

590-
if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
591-
task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
590+
if (branch_user_callstack(cpuc->br_sel) && event->pmu_ctx->task_ctx_data)
591+
task_context_opt(event->pmu_ctx->task_ctx_data)->lbr_callstack_users++;
592592

593593
/*
594594
* Request pmu::sched_task() callback, which will fire inside the
@@ -611,7 +611,7 @@ void intel_pmu_lbr_add(struct perf_event *event)
611611
*/
612612
if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
613613
cpuc->lbr_pebs_users++;
614-
perf_sched_cb_inc(event->ctx->pmu);
614+
perf_sched_cb_inc(event->pmu);
615615
if (!cpuc->lbr_users++ && !event->total_time_running)
616616
intel_pmu_lbr_reset();
617617
}
@@ -664,8 +664,8 @@ void intel_pmu_lbr_del(struct perf_event *event)
664664
return;
665665

666666
if (branch_user_callstack(cpuc->br_sel) &&
667-
event->ctx->task_ctx_data)
668-
task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
667+
event->pmu_ctx->task_ctx_data)
668+
task_context_opt(event->pmu_ctx->task_ctx_data)->lbr_callstack_users--;
669669

670670
if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
671671
cpuc->lbr_select = 0;
@@ -675,7 +675,7 @@ void intel_pmu_lbr_del(struct perf_event *event)
675675
cpuc->lbr_users--;
676676
WARN_ON_ONCE(cpuc->lbr_users < 0);
677677
WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
678-
perf_sched_cb_dec(event->ctx->pmu);
678+
perf_sched_cb_dec(event->pmu);
679679
}
680680

681681
static inline bool vlbr_exclude_host(void)

0 commit comments

Comments
 (0)