diff options
| author | Tejun Heo <tj@kernel.org> | 2026-03-06 07:58:04 -1000 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2026-03-06 07:58:04 -1000 |
| commit | 54be8de4236a52b301825cb51c6d5fdecb2fd6b8 (patch) | |
| tree | 155e97f1dea40dd6ba382be093dc89594a0c2409 /kernel | |
| parent | 0d8c551dd5de1c157600da05a01e3147115dfbb4 (diff) | |
sched_ext: Factor out scx_link_sched() and scx_unlink_sched()
Factor out scx_link_sched() and scx_unlink_sched() functions to reduce
code duplication in the scheduler enable/disable paths.
No functional change.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/ext.c | 53 |
1 files changed, 31 insertions, 22 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index f10a9667b491..8674a5fa5437 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -4830,6 +4830,33 @@ static void refresh_watchdog(void) cancel_delayed_work_sync(&scx_watchdog_work); } +static void scx_link_sched(struct scx_sched *sch) +{ + scoped_guard(raw_spinlock_irq, &scx_sched_lock) { +#ifdef CONFIG_EXT_SUB_SCHED + struct scx_sched *parent = scx_parent(sch); + if (parent) + list_add_tail(&sch->sibling, &parent->children); +#endif /* CONFIG_EXT_SUB_SCHED */ + list_add_tail_rcu(&sch->all, &scx_sched_all); + } + + refresh_watchdog(); +} + +static void scx_unlink_sched(struct scx_sched *sch) +{ + scoped_guard(raw_spinlock_irq, &scx_sched_lock) { +#ifdef CONFIG_EXT_SUB_SCHED + if (scx_parent(sch)) + list_del_init(&sch->sibling); +#endif /* CONFIG_EXT_SUB_SCHED */ + list_del_rcu(&sch->all); + } + + refresh_watchdog(); +} + #ifdef CONFIG_EXT_SUB_SCHED static DECLARE_WAIT_QUEUE_HEAD(scx_unlink_waitq); @@ -4979,12 +5006,7 @@ static void scx_sub_disable(struct scx_sched *sch) synchronize_rcu_expedited(); disable_bypass_dsp(sch); - raw_spin_lock_irq(&scx_sched_lock); - list_del_init(&sch->sibling); - list_del_rcu(&sch->all); - raw_spin_unlock_irq(&scx_sched_lock); - - refresh_watchdog(); + scx_unlink_sched(sch); mutex_unlock(&scx_enable_mutex); @@ -5120,11 +5142,7 @@ static void scx_root_disable(struct scx_sched *sch) if (sch->ops.exit) SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei); - raw_spin_lock_irq(&scx_sched_lock); - list_del_rcu(&sch->all); - raw_spin_unlock_irq(&scx_sched_lock); - - refresh_watchdog(); + scx_unlink_sched(sch); /* * scx_root clearing must be inside cpus_read_lock(). See @@ -5888,11 +5906,7 @@ static void scx_root_enable_workfn(struct kthread_work *work) */ rcu_assign_pointer(scx_root, sch); - raw_spin_lock_irq(&scx_sched_lock); - list_add_tail_rcu(&sch->all, &scx_sched_all); - raw_spin_unlock_irq(&scx_sched_lock); - - refresh_watchdog(); + scx_link_sched(sch); scx_idle_enable(ops); @@ -6157,12 +6171,7 @@ static void scx_sub_enable_workfn(struct kthread_work *work) goto out_put_cgrp; } - raw_spin_lock_irq(&scx_sched_lock); - list_add_tail(&sch->sibling, &parent->children); - list_add_tail_rcu(&sch->all, &scx_sched_all); - raw_spin_unlock_irq(&scx_sched_lock); - - refresh_watchdog(); + scx_link_sched(sch); if (sch->level >= SCX_SUB_MAX_DEPTH) { scx_error(sch, "max nesting depth %d violated", |
