|
59 | 59 |
|
60 | 60 | #include "tick-internal.h" |
61 | 61 |
|
| 62 | +/* |
| 63 | + * Masks for selecting the soft and hard context timers from |
| 64 | + * cpu_base->active |
| 65 | + */ |
| 66 | +#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT) |
| 67 | +#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1) |
| 68 | +#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT) |
| 69 | +#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD) |
| 70 | + |
62 | 71 | /* |
63 | 72 | * The timer bases: |
64 | 73 | * |
@@ -507,13 +516,24 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, |
507 | 516 | return expires_next; |
508 | 517 | } |
509 | 518 |
|
510 | | -static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) |
| 519 | +/* |
| 520 | + * Recomputes cpu_base::*next_timer and returns the earliest expires_next but |
| 521 | + * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram. |
| 522 | + * |
| 523 | + * @active_mask must be one of: |
| 524 | + * - HRTIMER_ACTIVE, |
| 525 | + * - HRTIMER_ACTIVE_SOFT, or |
| 526 | + * - HRTIMER_ACTIVE_HARD. |
| 527 | + */ |
| 528 | +static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, |
| 529 | + unsigned int active_mask) |
511 | 530 | { |
512 | | - unsigned int active = cpu_base->active_bases; |
| 531 | + unsigned int active; |
513 | 532 | ktime_t expires_next = KTIME_MAX; |
514 | 533 |
|
515 | 534 | cpu_base->next_timer = NULL; |
516 | 535 |
|
| 536 | + active = cpu_base->active_bases & active_mask; |
517 | 537 | expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next); |
518 | 538 |
|
519 | 539 | return expires_next; |
@@ -553,7 +573,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) |
553 | 573 | { |
554 | 574 | ktime_t expires_next; |
555 | 575 |
|
556 | | - expires_next = __hrtimer_get_next_event(cpu_base); |
| 576 | + expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); |
557 | 577 |
|
558 | 578 | if (skip_equal && expires_next == cpu_base->expires_next) |
559 | 579 | return; |
@@ -1074,7 +1094,7 @@ u64 hrtimer_get_next_event(void) |
1074 | 1094 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
1075 | 1095 |
|
1076 | 1096 | if (!__hrtimer_hres_active(cpu_base)) |
1077 | | - expires = __hrtimer_get_next_event(cpu_base); |
| 1097 | + expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); |
1078 | 1098 |
|
1079 | 1099 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
1080 | 1100 |
|
@@ -1248,10 +1268,10 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, |
1248 | 1268 | } |
1249 | 1269 |
|
1250 | 1270 | static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, |
1251 | | - unsigned long flags) |
| 1271 | + unsigned long flags, unsigned int active_mask) |
1252 | 1272 | { |
1253 | 1273 | struct hrtimer_clock_base *base; |
1254 | | - unsigned int active = cpu_base->active_bases; |
| 1274 | + unsigned int active = cpu_base->active_bases & active_mask; |
1255 | 1275 |
|
1256 | 1276 | for_each_active_base(base, cpu_base, active) { |
1257 | 1277 | struct timerqueue_node *node; |
@@ -1314,10 +1334,10 @@ void hrtimer_interrupt(struct clock_event_device *dev) |
1314 | 1334 | */ |
1315 | 1335 | cpu_base->expires_next = KTIME_MAX; |
1316 | 1336 |
|
1317 | | - __hrtimer_run_queues(cpu_base, now, flags); |
| 1337 | + __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); |
1318 | 1338 |
|
1319 | 1339 | /* Reevaluate the clock bases for the next expiry */ |
1320 | | - expires_next = __hrtimer_get_next_event(cpu_base); |
| 1340 | + expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); |
1321 | 1341 | /* |
1322 | 1342 | * Store the new expiry value so the migration code can verify |
1323 | 1343 | * against it. |
@@ -1421,7 +1441,7 @@ void hrtimer_run_queues(void) |
1421 | 1441 |
|
1422 | 1442 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
1423 | 1443 | now = hrtimer_update_base(cpu_base); |
1424 | | - __hrtimer_run_queues(cpu_base, now, flags); |
| 1444 | + __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); |
1425 | 1445 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
1426 | 1446 | } |
1427 | 1447 |
|
|
0 commit comments