@@ -3187,12 +3187,18 @@ void perf_event_exec(void)
31873187 rcu_read_unlock ();
31883188}
31893189
3190+ struct perf_read_data {
3191+ struct perf_event * event ;
3192+ bool group ;
3193+ };
3194+
31903195/*
31913196 * Cross CPU call to read the hardware event
31923197 */
31933198static void __perf_event_read (void * info )
31943199{
3195- struct perf_event * event = info ;
3200+ struct perf_read_data * data = info ;
3201+ struct perf_event * sub , * event = data -> event ;
31963202 struct perf_event_context * ctx = event -> ctx ;
31973203 struct perf_cpu_context * cpuctx = __get_cpu_context (ctx );
31983204
@@ -3211,9 +3217,21 @@ static void __perf_event_read(void *info)
32113217 update_context_time (ctx );
32123218 update_cgrp_time_from_event (event );
32133219 }
3220+
32143221 update_event_times (event );
32153222 if (event -> state == PERF_EVENT_STATE_ACTIVE )
32163223 event -> pmu -> read (event );
3224+
3225+ if (!data -> group )
3226+ goto unlock ;
3227+
3228+ list_for_each_entry (sub , & event -> sibling_list , group_entry ) {
3229+ update_event_times (sub );
3230+ if (sub -> state == PERF_EVENT_STATE_ACTIVE )
3231+ sub -> pmu -> read (sub );
3232+ }
3233+
3234+ unlock :
32173235 raw_spin_unlock (& ctx -> lock );
32183236}
32193237
@@ -3278,15 +3296,19 @@ u64 perf_event_read_local(struct perf_event *event)
32783296 return val ;
32793297}
32803298
3281- static void perf_event_read (struct perf_event * event )
3299+ static void perf_event_read (struct perf_event * event , bool group )
32823300{
32833301 /*
32843302 * If event is enabled and currently active on a CPU, update the
32853303 * value in the event structure:
32863304 */
32873305 if (event -> state == PERF_EVENT_STATE_ACTIVE ) {
3306+ struct perf_read_data data = {
3307+ .event = event ,
3308+ .group = group ,
3309+ };
32883310 smp_call_function_single (event -> oncpu ,
3289- __perf_event_read , event , 1 );
3311+ __perf_event_read , & data , 1 );
32903312 } else if (event -> state == PERF_EVENT_STATE_INACTIVE ) {
32913313 struct perf_event_context * ctx = event -> ctx ;
32923314 unsigned long flags ;
@@ -3301,7 +3323,10 @@ static void perf_event_read(struct perf_event *event)
33013323 update_context_time (ctx );
33023324 update_cgrp_time_from_event (event );
33033325 }
3304- update_event_times (event );
3326+ if (group )
3327+ update_group_times (event );
3328+ else
3329+ update_event_times (event );
33053330 raw_spin_unlock_irqrestore (& ctx -> lock , flags );
33063331 }
33073332}
@@ -3733,7 +3758,7 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
37333758
37343759 mutex_lock (& event -> child_mutex );
37353760
3736- perf_event_read (event );
3761+ perf_event_read (event , false );
37373762 total += perf_event_count (event );
37383763
37393764 * enabled += event -> total_time_enabled +
@@ -3742,7 +3767,7 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
37423767 atomic64_read (& event -> child_total_time_running );
37433768
37443769 list_for_each_entry (child , & event -> child_list , child_list ) {
3745- perf_event_read (child );
3770+ perf_event_read (child , false );
37463771 total += perf_event_count (child );
37473772 * enabled += child -> total_time_enabled ;
37483773 * running += child -> total_time_running ;
@@ -3903,7 +3928,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
39033928
39043929static void _perf_event_reset (struct perf_event * event )
39053930{
3906- perf_event_read (event );
3931+ perf_event_read (event , false );
39073932 local64_set (& event -> count , 0 );
39083933 perf_event_update_userpage (event );
39093934}
0 commit comments