@@ -4642,24 +4642,43 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
46424642 hrtimer_cancel (& cfs_b -> slack_timer );
46434643}
46444644
4645+ /*
4646+ * Both these cpu hotplug callbacks race against unregister_fair_sched_group()
4647+ *
4648+ * The race is harmless, since modifying bandwidth settings of unhooked group
4649+ * bits doesn't do much.
4650+ */
4651+
4652+ /* cpu online calback */
46454653static void __maybe_unused update_runtime_enabled (struct rq * rq )
46464654{
4647- struct cfs_rq * cfs_rq ;
4655+ struct task_group * tg ;
46484656
4649- for_each_leaf_cfs_rq (rq , cfs_rq ) {
4650- struct cfs_bandwidth * cfs_b = & cfs_rq -> tg -> cfs_bandwidth ;
4657+ lockdep_assert_held (& rq -> lock );
4658+
4659+ rcu_read_lock ();
4660+ list_for_each_entry_rcu (tg , & task_groups , list ) {
4661+ struct cfs_bandwidth * cfs_b = & tg -> cfs_bandwidth ;
4662+ struct cfs_rq * cfs_rq = tg -> cfs_rq [cpu_of (rq )];
46514663
46524664 raw_spin_lock (& cfs_b -> lock );
46534665 cfs_rq -> runtime_enabled = cfs_b -> quota != RUNTIME_INF ;
46544666 raw_spin_unlock (& cfs_b -> lock );
46554667 }
4668+ rcu_read_unlock ();
46564669}
46574670
4671+ /* cpu offline callback */
46584672static void __maybe_unused unthrottle_offline_cfs_rqs (struct rq * rq )
46594673{
4660- struct cfs_rq * cfs_rq ;
4674+ struct task_group * tg ;
4675+
4676+ lockdep_assert_held (& rq -> lock );
4677+
4678+ rcu_read_lock ();
4679+ list_for_each_entry_rcu (tg , & task_groups , list ) {
4680+ struct cfs_rq * cfs_rq = tg -> cfs_rq [cpu_of (rq )];
46614681
4662- for_each_leaf_cfs_rq (rq , cfs_rq ) {
46634682 if (!cfs_rq -> runtime_enabled )
46644683 continue ;
46654684
@@ -4677,6 +4696,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
46774696 if (cfs_rq_throttled (cfs_rq ))
46784697 unthrottle_cfs_rq (cfs_rq );
46794698 }
4699+ rcu_read_unlock ();
46804700}
46814701
46824702#else /* CONFIG_CFS_BANDWIDTH */
0 commit comments