diff options
| author | K Prateek Nayak <kprateek.nayak@amd.com> | 2026-03-12 04:44:33 +0000 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2026-03-18 09:06:50 +0100 |
| commit | f1320a8dd8ba6518ddb53ea4e3efcb49dc41d257 (patch) | |
| tree | 8d15268ec9592f5ac582154e692ae8aea4f8e456 /kernel | |
| parent | fa6874dfeee06352ce7c4c271be6a25d84a38b54 (diff) | |
sched/fair: Simplify the entry condition for update_idle_cpu_scan()
Only the topmost SD_SHARE_LLC domain has the "sd->shared" assigned.
Simply use "sd->shared" as an indicator for load balancing at the highest
SD_SHARE_LLC domain in update_idle_cpu_scan() instead of relying on
llc_size.
Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Chen Yu <yu.c.chen@intel.com>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Link: https://patch.msgid.link/20260312044434.1974-9-kprateek.nayak@amd.com
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3e24d3e16522..85c22f0f8de8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11234,6 +11234,7 @@ static void update_idle_cpu_scan(struct lb_env *env, unsigned long sum_util) { struct sched_domain_shared *sd_share; + struct sched_domain *sd = env->sd; int llc_weight, pct; u64 x, y, tmp; /* @@ -11247,11 +11248,7 @@ static void update_idle_cpu_scan(struct lb_env *env, if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE) return; - llc_weight = per_cpu(sd_llc_size, env->dst_cpu); - if (env->sd->span_weight != llc_weight) - return; - - sd_share = rcu_dereference_all(per_cpu(sd_llc_shared, env->dst_cpu)); + sd_share = sd->shared; if (!sd_share) return; @@ -11285,10 +11282,11 @@ static void update_idle_cpu_scan(struct lb_env *env, */ /* equation [3] */ x = sum_util; + llc_weight = sd->span_weight; do_div(x, llc_weight); /* equation [4] */ - pct = env->sd->imbalance_pct; + pct = sd->imbalance_pct; tmp = x * x * pct * pct; do_div(tmp, 10000 * SCHED_CAPACITY_SCALE); tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE); |
