From 3d48c9fd78dd0b1809669ec49c4d0997b8127512 Mon Sep 17 00:00:00 2001 From: Mikhail Gavrilov Date: Fri, 27 Mar 2026 17:41:56 +0500 Subject: dma-debug: suppress cacheline overlap warning when arch has no DMA alignment requirement When CONFIG_DMA_API_DEBUG is enabled, the DMA debug infrastructure tracks active mappings per cacheline and warns if two different DMA mappings share the same cacheline ("cacheline tracking EEXIST, overlapping mappings aren't supported"). On x86_64, ARCH_KMALLOC_MINALIGN defaults to 8, so small kmalloc allocations (e.g. the 8-byte hub->buffer and hub->status in the USB hub driver) frequently land in the same 64-byte cacheline. When both are DMA-mapped, this triggers a false positive warning. This has been reported repeatedly since v5.14 (when the EEXIST check was added) across various USB host controllers and devices including xhci_hcd with USB hubs, USB audio devices, and USB ethernet adapters. The cacheline overlap is only a real concern on architectures that require DMA buffer alignment to cacheline boundaries (i.e. where ARCH_DMA_MINALIGN >= L1_CACHE_BYTES). On architectures like x86_64 where dma_get_cache_alignment() returns 1, the hardware is cache-coherent and overlapping cacheline mappings are harmless. Suppress the EEXIST warning when dma_get_cache_alignment() is less than L1_CACHE_BYTES, indicating the architecture does not require cacheline-aligned DMA buffers. Verified with a kernel module reproducer that performs two kmalloc(8) allocations back-to-back and DMA-maps both: Before: allocations share a cacheline, EEXIST fires within ~50 pairs After: same cacheline pair found, but no warning emitted Fixes: 2b4bbc6231d7 ("dma-debug: report -EEXIST errors in add_dma_entry") Link: https://bugzilla.kernel.org/show_bug.cgi?id=215740 Suggested-by: Harry Yoo Tested-by: Mikhail Gavrilov Signed-off-by: Mikhail Gavrilov Signed-off-by: Marek Szyprowski Link: https://lore.kernel.org/r/20260327124156.24820-1-mikhail.v.gavrilov@gmail.com --- kernel/dma/debug.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 0677918f06a8..1a725edbbbf6 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -615,6 +615,7 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !(entry->is_cache_clean && overlap_cache_clean) && + dma_get_cache_alignment() >= L1_CACHE_BYTES && !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && is_swiotlb_active(entry->dev))) { err_printk(entry->dev, entry, -- cgit v1.2.3 From 703ccb63ae9f7444d6ff876d024e17f628103c69 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Tue, 31 Mar 2026 18:07:39 -0700 Subject: workqueue: Add pool_workqueue to pending_pwqs list when unplugging multiple inactive works MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In unplug_oldest_pwq(), the first inactive work item on the pool_workqueue is activated correctly. However, if multiple inactive works exist on the same pool_workqueue, subsequent works fail to activate because wq_node_nr_active.pending_pwqs is empty — the list insertion is skipped when the pool_workqueue is plugged. Fix this by checking for additional inactive works in unplug_oldest_pwq() and updating wq_node_nr_active.pending_pwqs accordingly. Fixes: 4c065dbce1e8 ("workqueue: Enable unbound cpumask update on ordered workqueues") Cc: stable@vger.kernel.org Cc: Carlos Santa Cc: Ryan Neph Cc: Lai Jiangshan Cc: Waiman Long Cc: linux-kernel@vger.kernel.org Signed-off-by: Matthew Brost Signed-off-by: Tejun Heo Acked-by: Waiman Long --- kernel/workqueue.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index eda756556341..c6ea96d5b716 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1849,8 +1849,20 @@ static void unplug_oldest_pwq(struct workqueue_struct *wq) raw_spin_lock_irq(&pwq->pool->lock); if (pwq->plugged) { pwq->plugged = false; - if (pwq_activate_first_inactive(pwq, true)) + if (pwq_activate_first_inactive(pwq, true)) { + /* + * While plugged, queueing skips activation which + * includes bumping the nr_active count and adding the + * pwq to nna->pending_pwqs if the count can't be + * obtained. We need to restore both for the pwq being + * unplugged. The first call activates the first + * inactive work item and the second, if there are more + * inactive, puts the pwq on pending_pwqs. + */ + pwq_activate_first_inactive(pwq, false); + kick_pool(pwq->pool); + } } raw_spin_unlock_irq(&pwq->pool->lock); } -- cgit v1.2.3 From adfc80dd0d7831335b5105fb3d8747094bf42878 Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Sat, 4 Apr 2026 18:40:58 -0600 Subject: prctl: rename branch landing pad implementation functions to be more explicit Per Linus' comments about the unreadability of abbreviations such as "indir_br_lp", rename the three prctl() implementation functions to be more explicit. This involves renaming "indir_br_lp_status" in the function names to "branch_landing_pad_state". While here, add _prctl_ into the function names, following the speculation control prctl implementation functions. Link: https://lore.kernel.org/linux-riscv/CAHk-=whhSLGZAx3N5jJpb4GLFDqH_QvS07D+6BnkPWmCEzTAgw@mail.gmail.com/ Cc: Deepak Gupta Cc: Linus Torvalds Cc: Mark Brown Signed-off-by: Paul Walmsley --- kernel/sys.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index c86eba9aa7e9..a5e0c187bbbf 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2388,17 +2388,18 @@ int __weak arch_lock_shadow_stack_status(struct task_struct *t, unsigned long st return -EINVAL; } -int __weak arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *status) +int __weak arch_prctl_get_branch_landing_pad_state(struct task_struct *t, + unsigned long __user *state) { return -EINVAL; } -int __weak arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status) +int __weak arch_prctl_set_branch_landing_pad_state(struct task_struct *t, unsigned long state) { return -EINVAL; } -int __weak arch_lock_indir_br_lp_status(struct task_struct *t, unsigned long status) +int __weak arch_prctl_lock_branch_landing_pad_state(struct task_struct *t) { return -EINVAL; } @@ -2891,17 +2892,17 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, case PR_GET_INDIR_BR_LP_STATUS: if (arg3 || arg4 || arg5) return -EINVAL; - error = arch_get_indir_br_lp_status(me, (unsigned long __user *)arg2); + error = arch_prctl_get_branch_landing_pad_state(me, (unsigned long __user *)arg2); break; case PR_SET_INDIR_BR_LP_STATUS: if (arg3 || arg4 || arg5) return -EINVAL; - error = arch_set_indir_br_lp_status(me, arg2); + error = arch_prctl_set_branch_landing_pad_state(me, arg2); break; case PR_LOCK_INDIR_BR_LP_STATUS: - if (arg3 || arg4 || arg5) + if (arg2 || arg3 || arg4 || arg5) return -EINVAL; - error = arch_lock_indir_br_lp_status(me, arg2); + error = arch_prctl_lock_branch_landing_pad_state(me); break; default: trace_task_prctl_unknown(option, arg2, arg3, arg4, arg5); -- cgit v1.2.3 From 08ee1559052be302f1d3752f48360b89517d9f8d Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Sat, 4 Apr 2026 18:40:58 -0600 Subject: prctl: cfi: change the branch landing pad prctl()s to be more descriptive Per Linus' comments requesting the replacement of "INDIR_BR_LP" in the indirect branch tracking prctl()s with something more readable, and suggesting the use of the speculation control prctl()s as an exemplar, reimplement the prctl()s and related constants that control per-task forward-edge control flow integrity. This primarily involves two changes. First, the prctls are restructured to resemble the style of the speculative execution workaround control prctls PR_{GET,SET}_SPECULATION_CTRL, to make them easier to extend in the future. Second, the "indir_br_lp" abbrevation is expanded to "branch_landing_pads" to be less telegraphic. The kselftest and documentation is adjusted accordingly. Link: https://lore.kernel.org/linux-riscv/CAHk-=whhSLGZAx3N5jJpb4GLFDqH_QvS07D+6BnkPWmCEzTAgw@mail.gmail.com/ Cc: Deepak Gupta Cc: Linus Torvalds Cc: Mark Brown Signed-off-by: Paul Walmsley --- kernel/sys.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index a5e0c187bbbf..62e842055cc9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2889,20 +2889,23 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, return -EINVAL; error = rseq_slice_extension_prctl(arg2, arg3); break; - case PR_GET_INDIR_BR_LP_STATUS: - if (arg3 || arg4 || arg5) + case PR_GET_CFI: + if (arg2 != PR_CFI_BRANCH_LANDING_PADS) return -EINVAL; - error = arch_prctl_get_branch_landing_pad_state(me, (unsigned long __user *)arg2); - break; - case PR_SET_INDIR_BR_LP_STATUS: - if (arg3 || arg4 || arg5) + if (arg4 || arg5) return -EINVAL; - error = arch_prctl_set_branch_landing_pad_state(me, arg2); + error = arch_prctl_get_branch_landing_pad_state(me, (unsigned long __user *)arg3); break; - case PR_LOCK_INDIR_BR_LP_STATUS: - if (arg2 || arg3 || arg4 || arg5) + case PR_SET_CFI: + if (arg2 != PR_CFI_BRANCH_LANDING_PADS) return -EINVAL; - error = arch_prctl_lock_branch_landing_pad_state(me); + if (arg4 || arg5) + return -EINVAL; + error = arch_prctl_set_branch_landing_pad_state(me, arg3); + if (error) + break; + if (arg3 & PR_CFI_LOCK && !(arg3 & PR_CFI_DISABLE)) + error = arch_prctl_lock_branch_landing_pad_state(me); break; default: trace_task_prctl_unknown(option, arg2, arg3, arg4, arg5); -- cgit v1.2.3 From 4346be6577aaa04586167402ae87bbdbe32484a4 Mon Sep 17 00:00:00 2001 From: Pengpeng Hou Date: Thu, 2 Apr 2026 00:03:15 +0800 Subject: tracing/probe: reject non-closed empty immediate strings parse_probe_arg() accepts quoted immediate strings and passes the body after the opening quote to __parse_imm_string(). That helper currently computes strlen(str) and immediately dereferences str[len - 1], which underflows when the body is empty and not closed with double-quotation. Reject empty non-closed immediate strings before checking for the closing quote. Link: https://lore.kernel.org/all/20260401160315.88518-1-pengpeng@iscas.ac.cn/ Fixes: a42e3c4de964 ("tracing/probe: Add immediate string parameter support") Signed-off-by: Pengpeng Hou Reviewed-by: Steven Rostedt (Google) Signed-off-by: Masami Hiramatsu (Google) --- kernel/trace/trace_probe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index e0a5dc86c07e..e1c73065dae5 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -1068,7 +1068,7 @@ static int __parse_imm_string(char *str, char **pbuf, int offs) { size_t len = strlen(str); - if (str[len - 1] != '"') { + if (!len || str[len - 1] != '"') { trace_probe_log_err(offs + len, IMMSTR_NO_CLOSE); return -EINVAL; } -- cgit v1.2.3 From 307e0c5859b0aecc34180468b1aa76684adcf539 Mon Sep 17 00:00:00 2001 From: Leo Timmins Date: Wed, 25 Mar 2026 12:46:07 +0800 Subject: liveupdate: propagate file deserialization failures luo_session_deserialize() ignored the return value from luo_file_deserialize(). As a result, a session could be left partially restored even though the /dev/liveupdate open path treats deserialization failures as fatal. Propagate the error so a failed file deserialization aborts session deserialization instead of silently continuing. Link: https://lkml.kernel.org/r/20260325044608.8407-1-leotimmins1974@gmail.com Link: https://lkml.kernel.org/r/20260325044608.8407-2-leotimmins1974@gmail.com Fixes: 16cec0d26521 ("liveupdate: luo_session: add ioctls for file preservation") Signed-off-by: Leo Timmins Reviewed-by: Pasha Tatashin Reviewed-by: Pratyush Yadav Cc: Mike Rapoport Cc: Signed-off-by: Andrew Morton --- kernel/liveupdate/luo_session.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/liveupdate/luo_session.c b/kernel/liveupdate/luo_session.c index 783677295640..25ae704d7787 100644 --- a/kernel/liveupdate/luo_session.c +++ b/kernel/liveupdate/luo_session.c @@ -558,8 +558,13 @@ int luo_session_deserialize(void) } scoped_guard(mutex, &session->mutex) { - luo_file_deserialize(&session->file_set, - &sh->ser[i].file_set_ser); + err = luo_file_deserialize(&session->file_set, + &sh->ser[i].file_set_ser); + } + if (err) { + pr_warn("Failed to deserialize files for session [%s] %pe\n", + session->name, ERR_PTR(err)); + return err; } } -- cgit v1.2.3 From 82b915051d32a68ea3bbe261c93f5620699ff047 Mon Sep 17 00:00:00 2001 From: Josh Snyder Date: Thu, 2 Apr 2026 16:23:38 -0700 Subject: tick/nohz: Fix inverted return value in check_tick_dependency() fast path Commit 56534673cea7f ("tick/nohz: Optimize check_tick_dependency() with early return") added a fast path that returns !val when the tick_stop tracepoint is disabled. This is inverted: the slow path returns true when a dependency IS found (val != 0), but !val returns true when val is zero (no dependency). The result is that can_stop_full_tick() sees "dependency found" when there are none, and the tick never stops on nohz_full CPUs. Fix this by returning !!val instead of !val, matching the slow-path semantics. Fixes: 56534673cea7f ("tick/nohz: Optimize check_tick_dependency() with early return") Signed-off-by: Josh Snyder Signed-off-by: Thomas Gleixner Assisted-by: Claude:claude-opus-4-6 Link: https://patch.msgid.link/20260402-fix-idle-tick2-v1-1-eecb589649d3@code406.com --- kernel/time/tick-sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f7907fadd63f..36449f0010a4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -345,7 +345,7 @@ static bool check_tick_dependency(atomic_t *dep) int val = atomic_read(dep); if (likely(!tracepoint_enabled(tick_stop))) - return !val; + return !!val; if (val & TICK_DEP_MASK_POSIX_TIMER) { trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); -- cgit v1.2.3 From 14a857056466be9d3d907a94e92a704ac1be149b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 4 Apr 2026 12:22:44 +0200 Subject: sched/deadline: Use revised wakeup rule for dl_server John noted that commit 115135422562 ("sched/deadline: Fix 'stuck' dl_server") unfixed the issue from commit a3a70caf7906 ("sched/deadline: Fix dl_server behaviour"). The issue in commit 115135422562 was for wakeups of the server after the deadline; in which case you *have* to start a new period. The case for a3a70caf7906 is wakeups before the deadline. Now, because the server is effectively running a least-laxity policy, it means that any wakeup during the runnable phase means dl_entity_overflow() will be true. This means we need to adjust the runtime to allow it to still run until the existing deadline expires. Use the revised wakeup rule for dl_defer entities. Fixes: 115135422562 ("sched/deadline: Fix 'stuck' dl_server") Reported-by: John Stultz Signed-off-by: Peter Zijlstra (Intel) Acked-by: Juri Lelli Tested-by: John Stultz Link: https://patch.msgid.link/20260404102244.GB22575@noisy.programming.kicks-ass.net --- kernel/sched/deadline.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d08b00429323..674de6a48551 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1027,7 +1027,7 @@ static void update_dl_entity(struct sched_dl_entity *dl_se) if (dl_time_before(dl_se->deadline, rq_clock(rq)) || dl_entity_overflow(dl_se, rq_clock(rq))) { - if (unlikely(!dl_is_implicit(dl_se) && + if (unlikely((!dl_is_implicit(dl_se) || dl_se->dl_defer) && !dl_time_before(dl_se->deadline, rq_clock(rq)) && !is_dl_boosted(dl_se))) { update_dl_revised_wakeup(dl_se, rq); -- cgit v1.2.3 From d6e152d905bdb1f32f9d99775e2f453350399a6a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Apr 2026 10:54:17 +0200 Subject: clockevents: Prevent timer interrupt starvation Calvin reported an odd NMI watchdog lockup which claims that the CPU locked up in user space. He provided a reproducer, which sets up a timerfd based timer and then rearms it in a loop with an absolute expiry time of 1ns. As the expiry time is in the past, the timer ends up as the first expiring timer in the per CPU hrtimer base and the clockevent device is programmed with the minimum delta value. If the machine is fast enough, this ends up in a endless loop of programming the delta value to the minimum value defined by the clock event device, before the timer interrupt can fire, which starves the interrupt and consequently triggers the lockup detector because the hrtimer callback of the lockup mechanism is never invoked. As a first step to prevent this, avoid reprogramming the clock event device when: - a forced minimum delta event is pending - the new expiry delta is less then or equal to the minimum delta Thanks to Calvin for providing the reproducer and to Borislav for testing and providing data from his Zen5 machine. The problem is not limited to Zen5, but depending on the underlying clock event device (e.g. TSC deadline timer on Intel) and the CPU speed not necessarily observable. This change serves only as the last resort and further changes will be made to prevent this scenario earlier in the call chain as far as possible. [ tglx: Updated to restore the old behaviour vs. !force and delta <= 0 and fixed up the tick-broadcast handlers as pointed out by Borislav ] Fixes: d316c57ff6bf ("[PATCH] clockevents: add core functionality") Reported-by: Calvin Owens Signed-off-by: Thomas Gleixner Tested-by: Calvin Owens Tested-by: Borislav Petkov Link: https://lore.kernel.org/lkml/acMe-QZUel-bBYUh@mozart.vkv.me/ Link: https://patch.msgid.link/20260407083247.562657657@kernel.org --- kernel/time/clockevents.c | 27 +++++++++++++++++++-------- kernel/time/hrtimer.c | 1 + kernel/time/tick-broadcast.c | 8 +++++++- kernel/time/tick-common.c | 1 + kernel/time/tick-sched.c | 1 + 5 files changed, 29 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index eaae1ce9f060..38570998a19b 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -172,6 +172,7 @@ void clockevents_shutdown(struct clock_event_device *dev) { clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); dev->next_event = KTIME_MAX; + dev->next_event_forced = 0; } /** @@ -305,7 +306,6 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, { unsigned long long clc; int64_t delta; - int rc; if (WARN_ON_ONCE(expires < 0)) return -ETIME; @@ -324,16 +324,27 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, return dev->set_next_ktime(expires, dev); delta = ktime_to_ns(ktime_sub(expires, ktime_get())); - if (delta <= 0) - return force ? clockevents_program_min_delta(dev) : -ETIME; - delta = min(delta, (int64_t) dev->max_delta_ns); - delta = max(delta, (int64_t) dev->min_delta_ns); + /* Required for tick_periodic() during early boot */ + if (delta <= 0 && !force) + return -ETIME; + + if (delta > (int64_t)dev->min_delta_ns) { + delta = min(delta, (int64_t) dev->max_delta_ns); + clc = ((unsigned long long) delta * dev->mult) >> dev->shift; + if (!dev->set_next_event((unsigned long) clc, dev)) + return 0; + } - clc = ((unsigned long long) delta * dev->mult) >> dev->shift; - rc = dev->set_next_event((unsigned long) clc, dev); + if (dev->next_event_forced) + return 0; - return (rc && force) ? clockevents_program_min_delta(dev) : rc; + if (dev->set_next_event(dev->min_delta_ticks, dev)) { + if (!force || clockevents_program_min_delta(dev)) + return -ETIME; + } + dev->next_event_forced = 1; + return 0; } /* diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 860af7a58428..1e37142fe52f 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1888,6 +1888,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; dev->next_event = KTIME_MAX; + dev->next_event_forced = 0; raw_spin_lock_irqsave(&cpu_base->lock, flags); entry_time = now = hrtimer_update_base(cpu_base); diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f63c65881364..7e57fa31ee26 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -76,8 +76,10 @@ const struct clock_event_device *tick_get_wakeup_device(int cpu) */ static void tick_broadcast_start_periodic(struct clock_event_device *bc) { - if (bc) + if (bc) { + bc->next_event_forced = 0; tick_setup_periodic(bc, 1); + } } /* @@ -403,6 +405,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) bool bc_local; raw_spin_lock(&tick_broadcast_lock); + tick_broadcast_device.evtdev->next_event_forced = 0; /* Handle spurious interrupts gracefully */ if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) { @@ -696,6 +699,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) raw_spin_lock(&tick_broadcast_lock); dev->next_event = KTIME_MAX; + tick_broadcast_device.evtdev->next_event_forced = 0; next_event = KTIME_MAX; cpumask_clear(tmpmask); now = ktime_get(); @@ -1063,6 +1067,7 @@ static void tick_broadcast_setup_oneshot(struct clock_event_device *bc, bc->event_handler = tick_handle_oneshot_broadcast; + bc->next_event_forced = 0; bc->next_event = KTIME_MAX; /* @@ -1175,6 +1180,7 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu) } /* This moves the broadcast assignment to this CPU: */ + bc->next_event_forced = 0; clockevents_program_event(bc, bc->next_event, 1); } raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index d305d8521896..6a9198a4279b 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -110,6 +110,7 @@ void tick_handle_periodic(struct clock_event_device *dev) int cpu = smp_processor_id(); ktime_t next = dev->next_event; + dev->next_event_forced = 0; tick_periodic(cpu); /* diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 36449f0010a4..d1f27df1e60e 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1513,6 +1513,7 @@ static void tick_nohz_lowres_handler(struct clock_event_device *dev) struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); dev->next_event = KTIME_MAX; + dev->next_event_forced = 0; if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART)) tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); -- cgit v1.2.3