@@ -701,9 +701,7 @@ void init_entity_runnable_average(struct sched_entity *se)
701701}
702702
703703static inline u64 cfs_rq_clock_task (struct cfs_rq * cfs_rq );
704- static int update_cfs_rq_load_avg (u64 now , struct cfs_rq * cfs_rq , bool update_freq );
705- static void update_tg_load_avg (struct cfs_rq * cfs_rq , int force );
706- static void attach_entity_load_avg (struct cfs_rq * cfs_rq , struct sched_entity * se );
704+ static void attach_entity_cfs_rq (struct sched_entity * se );
707705
708706/*
709707 * With new tasks being created, their initial util_avgs are extrapolated
@@ -735,7 +733,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
735733 struct cfs_rq * cfs_rq = cfs_rq_of (se );
736734 struct sched_avg * sa = & se -> avg ;
737735 long cap = (long )(SCHED_CAPACITY_SCALE - cfs_rq -> avg .util_avg ) / 2 ;
738- u64 now = cfs_rq_clock_task (cfs_rq );
739736
740737 if (cap > 0 ) {
741738 if (cfs_rq -> avg .util_avg != 0 ) {
@@ -763,14 +760,12 @@ void post_init_entity_util_avg(struct sched_entity *se)
763760 * such that the next switched_to_fair() has the
764761 * expected state.
765762 */
766- se -> avg .last_update_time = now ;
763+ se -> avg .last_update_time = cfs_rq_clock_task ( cfs_rq ) ;
767764 return ;
768765 }
769766 }
770767
771- update_cfs_rq_load_avg (now , cfs_rq , false);
772- attach_entity_load_avg (cfs_rq , se );
773- update_tg_load_avg (cfs_rq , false);
768+ attach_entity_cfs_rq (se );
774769}
775770
776771#else /* !CONFIG_SMP */
@@ -8783,30 +8778,19 @@ static inline bool vruntime_normalized(struct task_struct *p)
87838778 return false;
87848779}
87858780
8786- static void detach_task_cfs_rq (struct task_struct * p )
8781+ static void detach_entity_cfs_rq (struct sched_entity * se )
87878782{
8788- struct sched_entity * se = & p -> se ;
87898783 struct cfs_rq * cfs_rq = cfs_rq_of (se );
87908784 u64 now = cfs_rq_clock_task (cfs_rq );
87918785
8792- if (!vruntime_normalized (p )) {
8793- /*
8794- * Fix up our vruntime so that the current sleep doesn't
8795- * cause 'unlimited' sleep bonus.
8796- */
8797- place_entity (cfs_rq , se , 0 );
8798- se -> vruntime -= cfs_rq -> min_vruntime ;
8799- }
8800-
88018786 /* Catch up with the cfs_rq and remove our load when we leave */
88028787 update_cfs_rq_load_avg (now , cfs_rq , false);
88038788 detach_entity_load_avg (cfs_rq , se );
88048789 update_tg_load_avg (cfs_rq , false);
88058790}
88068791
8807- static void attach_task_cfs_rq (struct task_struct * p )
8792+ static void attach_entity_cfs_rq (struct sched_entity * se )
88088793{
8809- struct sched_entity * se = & p -> se ;
88108794 struct cfs_rq * cfs_rq = cfs_rq_of (se );
88118795 u64 now = cfs_rq_clock_task (cfs_rq );
88128796
@@ -8818,10 +8802,35 @@ static void attach_task_cfs_rq(struct task_struct *p)
88188802 se -> depth = se -> parent ? se -> parent -> depth + 1 : 0 ;
88198803#endif
88208804
8821- /* Synchronize task with its cfs_rq */
8805+ /* Synchronize entity with its cfs_rq */
88228806 update_cfs_rq_load_avg (now , cfs_rq , false);
88238807 attach_entity_load_avg (cfs_rq , se );
88248808 update_tg_load_avg (cfs_rq , false);
8809+ }
8810+
8811+ static void detach_task_cfs_rq (struct task_struct * p )
8812+ {
8813+ struct sched_entity * se = & p -> se ;
8814+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
8815+
8816+ if (!vruntime_normalized (p )) {
8817+ /*
8818+ * Fix up our vruntime so that the current sleep doesn't
8819+ * cause 'unlimited' sleep bonus.
8820+ */
8821+ place_entity (cfs_rq , se , 0 );
8822+ se -> vruntime -= cfs_rq -> min_vruntime ;
8823+ }
8824+
8825+ detach_entity_cfs_rq (se );
8826+ }
8827+
8828+ static void attach_task_cfs_rq (struct task_struct * p )
8829+ {
8830+ struct sched_entity * se = & p -> se ;
8831+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
8832+
8833+ attach_entity_cfs_rq (se );
88258834
88268835 if (!vruntime_normalized (p ))
88278836 se -> vruntime += cfs_rq -> min_vruntime ;
0 commit comments