summaryrefslogtreecommitdiff
path: root/kernel/time/hrtimer.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@kernel.org>2026-02-24 17:38:28 +0100
committerPeter Zijlstra <peterz@infradead.org>2026-02-27 16:40:14 +0100
commiteddffab8282e388dddf032f3295fcec87eb08095 (patch)
treee90eefaeb90373f178d265a2ce7b4d52734c3e2b /kernel/time/hrtimer.c
parentb95c4442b02162904e9012e670b602ebeb3c6c1b (diff)
hrtimer: Keep track of first expiring timer per clock base
Evaluating the next expiry time of all clock bases is cache line expensive as the expiry time of the first expiring timer is not cached in the base and requires to access the timer itself, which is definitely in a different cache line. It's way more efficient to keep track of the expiry time on enqueue and dequeue operations as the relevant data is already in the cache at that point. Signed-off-by: Thomas Gleixner <tglx@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://patch.msgid.link/20260224163431.404839710@kernel.org
Diffstat (limited to 'kernel/time/hrtimer.c')
-rw-r--r--kernel/time/hrtimer.c37
1 files changed, 34 insertions, 3 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e9592cb1e39a..d70899a9ddc1 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1107,7 +1107,18 @@ static bool enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *ba
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->is_queued, HRTIMER_STATE_ENQUEUED);
- return timerqueue_add(&base->active, &timer->node);
+ if (!timerqueue_add(&base->active, &timer->node))
+ return false;
+
+ base->expires_next = hrtimer_get_expires(timer);
+ return true;
+}
+
+static inline void base_update_next_timer(struct hrtimer_clock_base *base)
+{
+ struct timerqueue_node *next = timerqueue_getnext(&base->active);
+
+ base->expires_next = next ? next->expires : KTIME_MAX;
}
/*
@@ -1122,6 +1133,7 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *b
bool newstate, bool reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
+ bool was_first;
lockdep_assert_held(&cpu_base->lock);
@@ -1131,9 +1143,17 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *b
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->is_queued, newstate);
+ was_first = &timer->node == timerqueue_getnext(&base->active);
+
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
+ /* Nothing to update if this was not the first timer in the base */
+ if (!was_first)
+ return;
+
+ base_update_next_timer(base);
+
/*
* If reprogram is false don't update cpu_base->next_timer and do not
* touch the clock event device.
@@ -1182,9 +1202,12 @@ static inline bool
remove_and_enqueue_same_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
const enum hrtimer_mode mode, ktime_t expires, u64 delta_ns)
{
+ bool was_first = false;
+
/* Remove it from the timer queue if active */
if (timer->is_queued) {
debug_hrtimer_deactivate(timer);
+ was_first = &timer->node == timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
}
@@ -1197,8 +1220,16 @@ remove_and_enqueue_same_base(struct hrtimer *timer, struct hrtimer_clock_base *b
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->is_queued, HRTIMER_STATE_ENQUEUED);
- /* Returns true if this is the first expiring timer */
- return timerqueue_add(&base->active, &timer->node);
+ /* If it's the first expiring timer now or again, update base */
+ if (timerqueue_add(&base->active, &timer->node)) {
+ base->expires_next = expires;
+ return true;
+ }
+
+ if (was_first)
+ base_update_next_timer(base);
+
+ return false;
}
static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,