summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorK Prateek Nayak <kprateek.nayak@amd.com>2026-03-12 04:44:28 +0000
committerPeter Zijlstra <peterz@infradead.org>2026-03-18 09:06:48 +0100
commit1cc8a33ca7e8d38f962b64ece2a42c411a67bc76 (patch)
tree718c48c584e00b1eadbe857b914cb4ec613270d0
parent5a7b576b3ec1acc2694c5b58f80cd1d44a11b2c1 (diff)
sched/topology: Allocate per-CPU sched_domain_shared in s_data
The "sched_domain_shared" object is allocated for every topology level in __sdt_alloc() and is freed post sched domain rebuild if they aren't assigned during sd_init(). "sd->shared" is only assigned for SD_SHARE_LLC domains and out of all the assigned objects, only "sd_llc_shared" is ever used by the scheduler. Since only "sd_llc_shared" is ever used, and since SD_SHARE_LLC domains never overlap, allocate only a single range of per-CPU "sched_domain_shared" object with s_data instead of doing it per topology level. The subsequent commit uses the degeneration path to correctly assign the "sd->shared" to the topmost SD_SHARE_LLC domain. No functional changes are expected at this point. Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <vschneid@redhat.com> Reviewed-by: Chen Yu <yu.c.chen@intel.com> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Link: https://patch.msgid.link/20260312044434.1974-4-kprateek.nayak@amd.com
-rw-r--r--kernel/sched/topology.c48
1 files changed, 47 insertions, 1 deletions
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 6303790a4143..9006586720bf 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -782,6 +782,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
}
struct s_data {
+ struct sched_domain_shared * __percpu *sds;
struct sched_domain * __percpu *sd;
struct root_domain *rd;
};
@@ -789,6 +790,7 @@ struct s_data {
enum s_alloc {
sa_rootdomain,
sa_sd,
+ sa_sd_shared,
sa_sd_storage,
sa_none,
};
@@ -1535,6 +1537,9 @@ static void set_domain_attribute(struct sched_domain *sd,
static void __sdt_free(const struct cpumask *cpu_map);
static int __sdt_alloc(const struct cpumask *cpu_map);
+static void __sds_free(struct s_data *d, const struct cpumask *cpu_map);
+static int __sds_alloc(struct s_data *d, const struct cpumask *cpu_map);
+
static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
const struct cpumask *cpu_map)
{
@@ -1546,6 +1551,9 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
case sa_sd:
free_percpu(d->sd);
fallthrough;
+ case sa_sd_shared:
+ __sds_free(d, cpu_map);
+ fallthrough;
case sa_sd_storage:
__sdt_free(cpu_map);
fallthrough;
@@ -1561,9 +1569,11 @@ __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
if (__sdt_alloc(cpu_map))
return sa_sd_storage;
+ if (__sds_alloc(d, cpu_map))
+ return sa_sd_shared;
d->sd = alloc_percpu(struct sched_domain *);
if (!d->sd)
- return sa_sd_storage;
+ return sa_sd_shared;
d->rd = alloc_rootdomain();
if (!d->rd)
return sa_sd;
@@ -2464,6 +2474,42 @@ static void __sdt_free(const struct cpumask *cpu_map)
}
}
+static int __sds_alloc(struct s_data *d, const struct cpumask *cpu_map)
+{
+ int j;
+
+ d->sds = alloc_percpu(struct sched_domain_shared *);
+ if (!d->sds)
+ return -ENOMEM;
+
+ for_each_cpu(j, cpu_map) {
+ struct sched_domain_shared *sds;
+
+ sds = kzalloc_node(sizeof(struct sched_domain_shared),
+ GFP_KERNEL, cpu_to_node(j));
+ if (!sds)
+ return -ENOMEM;
+
+ *per_cpu_ptr(d->sds, j) = sds;
+ }
+
+ return 0;
+}
+
+static void __sds_free(struct s_data *d, const struct cpumask *cpu_map)
+{
+ int j;
+
+ if (!d->sds)
+ return;
+
+ for_each_cpu(j, cpu_map)
+ kfree(*per_cpu_ptr(d->sds, j));
+
+ free_percpu(d->sds);
+ d->sds = NULL;
+}
+
static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *child, int cpu)