Skip to content

Commit 70b4cf8

Browse files
anna-marialxKAGA-KOKO
authored andcommitted
timers: Split out "get next timer interrupt" functionality
The functionality for getting the next timer interrupt in get_next_timer_interrupt() is split into a separate function fetch_next_timer_interrupt() to be usable by other call sites. This is preparatory work for the conversion of the NOHZ timer placement to a pull at expiry time model. No functional change. Signed-off-by: Anna-Maria Behnsen <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Frederic Weisbecker <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 21927fc commit 70b4cf8

File tree

1 file changed

+38
-26
lines changed

1 file changed

+38
-26
lines changed

kernel/time/timer.c

Lines changed: 38 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2033,30 +2033,13 @@ static unsigned long next_timer_interrupt(struct timer_base *base,
20332033
return base->next_expiry;
20342034
}
20352035

2036-
static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
2037-
bool *idle)
2036+
static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem,
2037+
struct timer_base *base_local,
2038+
struct timer_base *base_global,
2039+
struct timer_events *tevt)
20382040
{
2039-
struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
20402041
unsigned long nextevt, nextevt_local, nextevt_global;
2041-
struct timer_base *base_local, *base_global;
20422042
bool local_first;
2043-
u64 expires;
2044-
2045-
/*
2046-
* Pretend that there is no timer pending if the cpu is offline.
2047-
* Possible pending timers will be migrated later to an active cpu.
2048-
*/
2049-
if (cpu_is_offline(smp_processor_id())) {
2050-
if (idle)
2051-
*idle = true;
2052-
return tevt.local;
2053-
}
2054-
2055-
base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
2056-
base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
2057-
2058-
raw_spin_lock(&base_local->lock);
2059-
raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
20602043

20612044
nextevt_local = next_timer_interrupt(base_local, basej);
20622045
nextevt_global = next_timer_interrupt(base_global, basej);
@@ -2074,8 +2057,8 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
20742057
/* If we missed a tick already, force 0 delta */
20752058
if (time_before(nextevt, basej))
20762059
nextevt = basej;
2077-
tevt.local = basem + (u64)(nextevt - basej) * TICK_NSEC;
2078-
goto forward;
2060+
tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC;
2061+
return nextevt;
20792062
}
20802063

20812064
/*
@@ -2085,12 +2068,41 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
20852068
* ignored. If the global queue is empty, nothing to do either.
20862069
*/
20872070
if (!local_first && base_global->timers_pending)
2088-
tevt.global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
2071+
tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC;
20892072

20902073
if (base_local->timers_pending)
2091-
tevt.local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
2074+
tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC;
2075+
2076+
return nextevt;
2077+
}
2078+
2079+
static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
2080+
bool *idle)
2081+
{
2082+
struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX };
2083+
struct timer_base *base_local, *base_global;
2084+
unsigned long nextevt;
2085+
u64 expires;
2086+
2087+
/*
2088+
* Pretend that there is no timer pending if the cpu is offline.
2089+
* Possible pending timers will be migrated later to an active cpu.
2090+
*/
2091+
if (cpu_is_offline(smp_processor_id())) {
2092+
if (idle)
2093+
*idle = true;
2094+
return tevt.local;
2095+
}
2096+
2097+
base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]);
2098+
base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]);
2099+
2100+
raw_spin_lock(&base_local->lock);
2101+
raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING);
2102+
2103+
nextevt = fetch_next_timer_interrupt(basej, basem, base_local,
2104+
base_global, &tevt);
20922105

2093-
forward:
20942106
/*
20952107
* We have a fresh next event. Check whether we can forward the
20962108
* base.

0 commit comments

Comments
 (0)