@@ -3184,12 +3184,18 @@ void perf_event_exec(void)
31843184 rcu_read_unlock ();
31853185}
31863186
3187+ struct perf_read_data {
3188+ struct perf_event * event ;
3189+ bool group ;
3190+ };
3191+
31873192/*
31883193 * Cross CPU call to read the hardware event
31893194 */
31903195static void __perf_event_read (void * info )
31913196{
3192- struct perf_event * event = info ;
3197+ struct perf_read_data * data = info ;
3198+ struct perf_event * sub , * event = data -> event ;
31933199 struct perf_event_context * ctx = event -> ctx ;
31943200 struct perf_cpu_context * cpuctx = __get_cpu_context (ctx );
31953201
@@ -3208,9 +3214,21 @@ static void __perf_event_read(void *info)
32083214 update_context_time (ctx );
32093215 update_cgrp_time_from_event (event );
32103216 }
3217+
32113218 update_event_times (event );
32123219 if (event -> state == PERF_EVENT_STATE_ACTIVE )
32133220 event -> pmu -> read (event );
3221+
3222+ if (!data -> group )
3223+ goto unlock ;
3224+
3225+ list_for_each_entry (sub , & event -> sibling_list , group_entry ) {
3226+ update_event_times (sub );
3227+ if (sub -> state == PERF_EVENT_STATE_ACTIVE )
3228+ sub -> pmu -> read (sub );
3229+ }
3230+
3231+ unlock :
32143232 raw_spin_unlock (& ctx -> lock );
32153233}
32163234
@@ -3275,15 +3293,19 @@ u64 perf_event_read_local(struct perf_event *event)
32753293 return val ;
32763294}
32773295
3278- static void perf_event_read (struct perf_event * event )
3296+ static void perf_event_read (struct perf_event * event , bool group )
32793297{
32803298 /*
32813299 * If event is enabled and currently active on a CPU, update the
32823300 * value in the event structure:
32833301 */
32843302 if (event -> state == PERF_EVENT_STATE_ACTIVE ) {
3303+ struct perf_read_data data = {
3304+ .event = event ,
3305+ .group = group ,
3306+ };
32853307 smp_call_function_single (event -> oncpu ,
3286- __perf_event_read , event , 1 );
3308+ __perf_event_read , & data , 1 );
32873309 } else if (event -> state == PERF_EVENT_STATE_INACTIVE ) {
32883310 struct perf_event_context * ctx = event -> ctx ;
32893311 unsigned long flags ;
@@ -3298,7 +3320,10 @@ static void perf_event_read(struct perf_event *event)
32983320 update_context_time (ctx );
32993321 update_cgrp_time_from_event (event );
33003322 }
3301- update_event_times (event );
3323+ if (group )
3324+ update_group_times (event );
3325+ else
3326+ update_event_times (event );
33023327 raw_spin_unlock_irqrestore (& ctx -> lock , flags );
33033328 }
33043329}
@@ -3817,7 +3842,7 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
38173842
38183843 mutex_lock (& event -> child_mutex );
38193844
3820- perf_event_read (event );
3845+ perf_event_read (event , false );
38213846 total += perf_event_count (event );
38223847
38233848 * enabled += event -> total_time_enabled +
@@ -3826,7 +3851,7 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
38263851 atomic64_read (& event -> child_total_time_running );
38273852
38283853 list_for_each_entry (child , & event -> child_list , child_list ) {
3829- perf_event_read (child );
3854+ perf_event_read (child , false );
38303855 total += perf_event_count (child );
38313856 * enabled += child -> total_time_enabled ;
38323857 * running += child -> total_time_running ;
@@ -3987,7 +4012,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
39874012
39884013static void _perf_event_reset (struct perf_event * event )
39894014{
3990- perf_event_read (event );
4015+ perf_event_read (event , false );
39914016 local64_set (& event -> count , 0 );
39924017 perf_event_update_userpage (event );
39934018}
0 commit comments