@@ -3167,14 +3167,44 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
31673167	}
31683168}
31693169
3170+ static  int  setup_cpus_switch_event (struct  perf_sched  * sched )
3171+ {
3172+ 	unsigned int   i ;
3173+ 
3174+ 	sched -> cpu_last_switched  =  calloc (MAX_CPUS , sizeof (* (sched -> cpu_last_switched )));
3175+ 	if  (!sched -> cpu_last_switched )
3176+ 		return  -1 ;
3177+ 
3178+ 	sched -> curr_pid  =  malloc (MAX_CPUS  *  sizeof (* (sched -> curr_pid )));
3179+ 	if  (!sched -> curr_pid ) {
3180+ 		zfree (& sched -> cpu_last_switched );
3181+ 		return  -1 ;
3182+ 	}
3183+ 
3184+ 	for  (i  =  0 ; i  <  MAX_CPUS ; i ++ )
3185+ 		sched -> curr_pid [i ] =  -1 ;
3186+ 
3187+ 	return  0 ;
3188+ }
3189+ 
3190+ static  void  free_cpus_switch_event (struct  perf_sched  * sched )
3191+ {
3192+ 	zfree (& sched -> curr_pid );
3193+ 	zfree (& sched -> cpu_last_switched );
3194+ }
3195+ 
31703196static  int  perf_sched__lat (struct  perf_sched  * sched )
31713197{
3198+ 	int  rc  =  -1 ;
31723199	struct  rb_node  * next ;
31733200
31743201	setup_pager ();
31753202
3203+ 	if  (setup_cpus_switch_event (sched ))
3204+ 		return  rc ;
3205+ 
31763206	if  (perf_sched__read_events (sched ))
3177- 		return   -1 ;
3207+ 		goto  out_free_cpus_switch_event ;
31783208
31793209	perf_sched__merge_lat (sched );
31803210	perf_sched__sort_lat (sched );
@@ -3203,7 +3233,11 @@ static int perf_sched__lat(struct perf_sched *sched)
32033233	print_bad_events (sched );
32043234	printf ("\n" );
32053235
3206- 	return  0 ;
3236+ 	rc  =  0 ;
3237+ 
3238+ out_free_cpus_switch_event :
3239+ 	free_cpus_switch_event (sched );
3240+ 	return  rc ;
32073241}
32083242
32093243static  int  setup_map_cpus (struct  perf_sched  * sched )
@@ -3270,9 +3304,12 @@ static int perf_sched__map(struct perf_sched *sched)
32703304	if  (!sched -> curr_thread )
32713305		return  rc ;
32723306
3273- 	if  (setup_map_cpus (sched ))
3307+ 	if  (setup_cpus_switch_event (sched ))
32743308		goto out_free_curr_thread ;
32753309
3310+ 	if  (setup_map_cpus (sched ))
3311+ 		goto out_free_cpus_switch_event ;
3312+ 
32763313	if  (setup_color_pids (sched ))
32773314		goto out_put_map_cpus ;
32783315
@@ -3296,6 +3333,9 @@ static int perf_sched__map(struct perf_sched *sched)
32963333	zfree (& sched -> map .comp_cpus );
32973334	perf_cpu_map__put (sched -> map .cpus );
32983335
3336+ out_free_cpus_switch_event :
3337+ 	free_cpus_switch_event (sched );
3338+ 
32993339out_free_curr_thread :
33003340	zfree (& sched -> curr_thread );
33013341	return  rc ;
@@ -3309,14 +3349,18 @@ static int perf_sched__replay(struct perf_sched *sched)
33093349	mutex_init (& sched -> start_work_mutex );
33103350	mutex_init (& sched -> work_done_wait_mutex );
33113351
3352+ 	ret  =  setup_cpus_switch_event (sched );
3353+ 	if  (ret )
3354+ 		goto out_mutex_destroy ;
3355+ 
33123356	calibrate_run_measurement_overhead (sched );
33133357	calibrate_sleep_measurement_overhead (sched );
33143358
33153359	test_calibrations (sched );
33163360
33173361	ret  =  perf_sched__read_events (sched );
33183362	if  (ret )
3319- 		goto out_mutex_destroy ;
3363+ 		goto out_free_cpus_switch_event ;
33203364
33213365	printf ("nr_run_events:        %ld\n" , sched -> nr_run_events );
33223366	printf ("nr_sleep_events:      %ld\n" , sched -> nr_sleep_events );
@@ -3342,6 +3386,9 @@ static int perf_sched__replay(struct perf_sched *sched)
33423386	sched -> thread_funcs_exit  =  true;
33433387	destroy_tasks (sched );
33443388
3389+ out_free_cpus_switch_event :
3390+ 	free_cpus_switch_event (sched );
3391+ 
33453392out_mutex_destroy :
33463393	mutex_destroy (& sched -> start_work_mutex );
33473394	mutex_destroy (& sched -> work_done_wait_mutex );
@@ -3580,21 +3627,7 @@ int cmd_sched(int argc, const char **argv)
35803627		.switch_event 	    =  replay_switch_event ,
35813628		.fork_event 	    =  replay_fork_event ,
35823629	};
3583- 	unsigned int   i ;
3584- 	int  ret  =  0 ;
3585- 
3586- 	sched .cpu_last_switched  =  calloc (MAX_CPUS , sizeof (* sched .cpu_last_switched ));
3587- 	if  (!sched .cpu_last_switched ) {
3588- 		ret  =  - ENOMEM ;
3589- 		goto out ;
3590- 	}
3591- 	sched .curr_pid  =  malloc (MAX_CPUS  *  sizeof (* sched .curr_pid ));
3592- 	if  (!sched .curr_pid ) {
3593- 		ret  =  - ENOMEM ;
3594- 		goto out ;
3595- 	}
3596- 	for  (i  =  0 ; i  <  MAX_CPUS ; i ++ )
3597- 		sched .curr_pid [i ] =  -1 ;
3630+ 	int  ret ;
35983631
35993632	argc  =  parse_options_subcommand (argc , argv , sched_options , sched_subcommands ,
36003633					sched_usage , PARSE_OPT_STOP_AT_NON_OPTION );
@@ -3605,9 +3638,9 @@ int cmd_sched(int argc, const char **argv)
36053638	 * Aliased to 'perf script' for now: 
36063639	 */ 
36073640	if  (!strcmp (argv [0 ], "script" )) {
3608- 		ret   =  cmd_script (argc , argv );
3641+ 		return  cmd_script (argc , argv );
36093642	} else  if  (strlen (argv [0 ]) >  2  &&  strstarts ("record" , argv [0 ])) {
3610- 		ret   =  __cmd_record (argc , argv );
3643+ 		return  __cmd_record (argc , argv );
36113644	} else  if  (strlen (argv [0 ]) >  2  &&  strstarts ("latency" , argv [0 ])) {
36123645		sched .tp_handler  =  & lat_ops ;
36133646		if  (argc  >  1 ) {
@@ -3616,7 +3649,7 @@ int cmd_sched(int argc, const char **argv)
36163649				usage_with_options (latency_usage , latency_options );
36173650		}
36183651		setup_sorting (& sched , latency_options , latency_usage );
3619- 		ret   =  perf_sched__lat (& sched );
3652+ 		return  perf_sched__lat (& sched );
36203653	} else  if  (!strcmp (argv [0 ], "map" )) {
36213654		if  (argc ) {
36223655			argc  =  parse_options (argc , argv , map_options , map_usage , 0 );
@@ -3625,15 +3658,15 @@ int cmd_sched(int argc, const char **argv)
36253658		}
36263659		sched .tp_handler  =  & map_ops ;
36273660		setup_sorting (& sched , latency_options , latency_usage );
3628- 		ret   =  perf_sched__map (& sched );
3661+ 		return  perf_sched__map (& sched );
36293662	} else  if  (strlen (argv [0 ]) >  2  &&  strstarts ("replay" , argv [0 ])) {
36303663		sched .tp_handler  =  & replay_ops ;
36313664		if  (argc ) {
36323665			argc  =  parse_options (argc , argv , replay_options , replay_usage , 0 );
36333666			if  (argc )
36343667				usage_with_options (replay_usage , replay_options );
36353668		}
3636- 		ret   =  perf_sched__replay (& sched );
3669+ 		return  perf_sched__replay (& sched );
36373670	} else  if  (!strcmp (argv [0 ], "timehist" )) {
36383671		if  (argc ) {
36393672			argc  =  parse_options (argc , argv , timehist_options ,
@@ -3649,21 +3682,16 @@ int cmd_sched(int argc, const char **argv)
36493682				parse_options_usage (NULL , timehist_options , "w" , true);
36503683			if  (sched .show_next )
36513684				parse_options_usage (NULL , timehist_options , "n" , true);
3652- 			ret  =  - EINVAL ;
3653- 			goto out ;
3685+ 			return  - EINVAL ;
36543686		}
36553687		ret  =  symbol__validate_sym_arguments ();
36563688		if  (ret )
3657- 			goto  out ;
3689+ 			return   ret ;
36583690
3659- 		ret   =  perf_sched__timehist (& sched );
3691+ 		return  perf_sched__timehist (& sched );
36603692	} else  {
36613693		usage_with_options (sched_usage , sched_options );
36623694	}
36633695
3664- out :
3665- 	free (sched .curr_pid );
3666- 	free (sched .cpu_last_switched );
3667- 
3668- 	return  ret ;
3696+ 	return  0 ;
36693697}
0 commit comments