@@ -635,11 +635,16 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
635635
636636 /*
637637 * We might have to IPI the remote CPU if the base is idle and the
638- * timer is not deferrable. If the other CPU is on the way to idle
639- * then it can't set base->is_idle as we hold the base lock:
638+ * timer is pinned. If it is a non pinned timer, it is only queued
639+ * on the remote CPU, when timer was running during queueing. Then
640+ * everything is handled by remote CPU anyway. If the other CPU is
641+ * on the way to idle then it can't set base->is_idle as we hold
642+ * the base lock:
640643 */
641- if (base -> is_idle )
644+ if (base -> is_idle ) {
645+ WARN_ON_ONCE (!(timer -> flags & TIMER_PINNED ));
642646 wake_up_nohz_cpu (base -> cpu );
647+ }
643648}
644649
645650/*
@@ -986,17 +991,6 @@ static inline struct timer_base *get_timer_base(u32 tflags)
986991 return get_timer_cpu_base (tflags , tflags & TIMER_CPUMASK );
987992}
988993
989- static inline struct timer_base *
990- get_target_base (struct timer_base * base , unsigned tflags )
991- {
992- #if defined(CONFIG_SMP ) && defined(CONFIG_NO_HZ_COMMON )
993- if (static_branch_likely (& timers_migration_enabled ) &&
994- !(tflags & TIMER_PINNED ))
995- return get_timer_cpu_base (tflags , get_nohz_timer_target ());
996- #endif
997- return get_timer_this_cpu_base (tflags );
998- }
999-
1000994static inline void __forward_timer_base (struct timer_base * base ,
1001995 unsigned long basej )
1002996{
@@ -1151,7 +1145,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
11511145 if (!ret && (options & MOD_TIMER_PENDING_ONLY ))
11521146 goto out_unlock ;
11531147
1154- new_base = get_target_base ( base , timer -> flags );
1148+ new_base = get_timer_this_cpu_base ( timer -> flags );
11551149
11561150 if (base != new_base ) {
11571151 /*
@@ -2297,7 +2291,7 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem,
22972291 * granularity skew (by design).
22982292 */
22992293 if (!base_local -> is_idle && time_after (nextevt , basej + 1 )) {
2300- base_local -> is_idle = base_global -> is_idle = true;
2294+ base_local -> is_idle = true;
23012295 trace_timer_base_idle (true, base_local -> cpu );
23022296 }
23032297 * idle = base_local -> is_idle ;
@@ -2363,13 +2357,13 @@ u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle)
23632357void timer_clear_idle (void )
23642358{
23652359 /*
2366- * We do this unlocked. The worst outcome is a remote enqueue sending
2367- * a pointless IPI, but taking the lock would just make the window for
2368- * sending the IPI a few instructions smaller for the cost of taking
2369- * the lock in the exit from idle path.
2360+ * We do this unlocked. The worst outcome is a remote pinned timer
2361+ * enqueue sending a pointless IPI, but taking the lock would just
2362+ * make the window for sending the IPI a few instructions smaller
2363+ * for the cost of taking the lock in the exit from idle
2364+ * path. Required for BASE_LOCAL only.
23702365 */
23712366 __this_cpu_write (timer_bases [BASE_LOCAL ].is_idle , false);
2372- __this_cpu_write (timer_bases [BASE_GLOBAL ].is_idle , false);
23732367 trace_timer_base_idle (false, smp_processor_id ());
23742368
23752369 /* Activate without holding the timer_base->lock */
0 commit comments