@@ -546,8 +546,11 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
546546}
547547
548548/*
549- * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
550- * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
549+ * Recomputes cpu_base::*next_timer and returns the earliest expires_next
550+ * but does not set cpu_base::*expires_next, that is done by
551+ * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
552+ * cpu_base::*expires_next right away, reprogramming logic would no longer
553+ * work.
551554 *
552555 * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
553556 * those timers will get run whenever the softirq gets handled, at the end of
@@ -588,6 +591,37 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
588591 return expires_next ;
589592}
590593
594+ static ktime_t hrtimer_update_next_event (struct hrtimer_cpu_base * cpu_base )
595+ {
596+ ktime_t expires_next , soft = KTIME_MAX ;
597+
598+ /*
599+ * If the soft interrupt has already been activated, ignore the
600+ * soft bases. They will be handled in the already raised soft
601+ * interrupt.
602+ */
603+ if (!cpu_base -> softirq_activated ) {
604+ soft = __hrtimer_get_next_event (cpu_base , HRTIMER_ACTIVE_SOFT );
605+ /*
606+ * Update the soft expiry time. clock_settime() might have
607+ * affected it.
608+ */
609+ cpu_base -> softirq_expires_next = soft ;
610+ }
611+
612+ expires_next = __hrtimer_get_next_event (cpu_base , HRTIMER_ACTIVE_HARD );
613+ /*
614+ * If a softirq timer is expiring first, update cpu_base->next_timer
615+ * and program the hardware with the soft expiry time.
616+ */
617+ if (expires_next > soft ) {
618+ cpu_base -> next_timer = cpu_base -> softirq_next_timer ;
619+ expires_next = soft ;
620+ }
621+
622+ return expires_next ;
623+ }
624+
591625static inline ktime_t hrtimer_update_base (struct hrtimer_cpu_base * base )
592626{
593627 ktime_t * offs_real = & base -> clock_base [HRTIMER_BASE_REALTIME ].offset ;
@@ -628,23 +662,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
628662{
629663 ktime_t expires_next ;
630664
631- /*
632- * Find the current next expiration time.
633- */
634- expires_next = __hrtimer_get_next_event (cpu_base , HRTIMER_ACTIVE_ALL );
635-
636- if (cpu_base -> next_timer && cpu_base -> next_timer -> is_soft ) {
637- /*
638- * When the softirq is activated, hrtimer has to be
639- * programmed with the first hard hrtimer because soft
640- * timer interrupt could occur too late.
641- */
642- if (cpu_base -> softirq_activated )
643- expires_next = __hrtimer_get_next_event (cpu_base ,
644- HRTIMER_ACTIVE_HARD );
645- else
646- cpu_base -> softirq_expires_next = expires_next ;
647- }
665+ expires_next = hrtimer_update_next_event (cpu_base );
648666
649667 if (skip_equal && expires_next == cpu_base -> expires_next )
650668 return ;
@@ -1644,8 +1662,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
16441662
16451663 __hrtimer_run_queues (cpu_base , now , flags , HRTIMER_ACTIVE_HARD );
16461664
1647- /* Reevaluate the clock bases for the next expiry */
1648- expires_next = __hrtimer_get_next_event (cpu_base , HRTIMER_ACTIVE_ALL );
1665+ /* Reevaluate the clock bases for the [soft] next expiry */
1666+ expires_next = hrtimer_update_next_event (cpu_base );
16491667 /*
16501668 * Store the new expiry value so the migration code can verify
16511669 * against it.
0 commit comments