Skip to content

Commit 3335d6a

Browse files
committed
sched/uclamp: Optimize sched_uclamp_used static key enabling
JIRA: https://issues.redhat.com/browse/RHEL-110301 commit 4bc4582 Author: Xuewen Yan <[email protected]> Date: Wed Feb 19 17:37:47 2025 +0800 sched/uclamp: Optimize sched_uclamp_used static key enabling Repeat calls of static_branch_enable() to an already enabled static key introduce overhead, because it calls cpus_read_lock(). Users may frequently set the uclamp value of tasks, triggering the repeat enabling of the sched_uclamp_used static key. Optimize this and avoid repeat calls to static_branch_enable() by checking whether it's enabled already. [ mingo: Rewrote the changelog for legibility ] Signed-off-by: Xuewen Yan <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Reviewed-by: Christian Loehle <[email protected]> Reviewed-by: Vincent Guittot <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Phil Auld <[email protected]>
1 parent 411440d commit 3335d6a

File tree

3 files changed

+18
-4
lines changed

3 files changed

+18
-4
lines changed

kernel/sched/core.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1918,12 +1918,12 @@ static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
19181918
}
19191919

19201920
if (update_root_tg) {
1921-
static_branch_enable(&sched_uclamp_used);
1921+
sched_uclamp_enable();
19221922
uclamp_update_root_tg();
19231923
}
19241924

19251925
if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1926-
static_branch_enable(&sched_uclamp_used);
1926+
sched_uclamp_enable();
19271927
uclamp_sync_util_min_rt_default();
19281928
}
19291929

@@ -9235,7 +9235,7 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
92359235
if (req.ret)
92369236
return req.ret;
92379237

9238-
static_branch_enable(&sched_uclamp_used);
9238+
sched_uclamp_enable();
92399239

92409240
guard(mutex)(&uclamp_mutex);
92419241
guard(rcu)();

kernel/sched/sched.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3253,6 +3253,18 @@ static inline bool uclamp_is_used(void)
32533253
return static_branch_likely(&sched_uclamp_used);
32543254
}
32553255

3256+
/*
3257+
* Enabling static branches would get the cpus_read_lock(),
3258+
* check whether uclamp_is_used before enable it to avoid always
3259+
* calling cpus_read_lock(). Because we never disable this
3260+
* static key once enable it.
3261+
*/
3262+
static inline void sched_uclamp_enable(void)
3263+
{
3264+
if (!uclamp_is_used())
3265+
static_branch_enable(&sched_uclamp_used);
3266+
}
3267+
32563268
static inline unsigned long uclamp_rq_get(struct rq *rq,
32573269
enum uclamp_id clamp_id)
32583270
{
@@ -3332,6 +3344,8 @@ static inline bool uclamp_is_used(void)
33323344
return false;
33333345
}
33343346

3347+
static inline void sched_uclamp_enable(void) {}
3348+
33353349
static inline unsigned long
33363350
uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id)
33373351
{

kernel/sched/syscalls.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ static int uclamp_validate(struct task_struct *p,
365365
* blocking operation which obviously cannot be done while holding
366366
* scheduler locks.
367367
*/
368-
static_branch_enable(&sched_uclamp_used);
368+
sched_uclamp_enable();
369369

370370
return 0;
371371
}

0 commit comments

Comments
 (0)