summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-19 14:45:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-19 14:45:37 -0700
commitc1f49dea2b8f335813d3b348fd39117fb8efb428 (patch)
treeb3ce612d118f663cf5dd4755f01a65f42bfcaf5c
parent8c2bf4a2e5cb4b325e328cc8808858a68616067c (diff)
parent95093e5cb4c5b50a5b1a4b79f2942b62744bd66a (diff)
Merge tag 'mm-hotfixes-stable-2026-04-19-00-14' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mmHEADmaster
Pull MM fixes from Andrew Morton: "7 hotfixes. 6 are cc:stable and all are for MM. Please see the individual changelogs for details" * tag 'mm-hotfixes-stable-2026-04-19-00-14' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/damon/core: disallow non-power of two min_region_sz on damon_start() mm/vmalloc: take vmap_purge_lock in shrinker mm: call ->free_folio() directly in folio_unmap_invalidate() mm: blk-cgroup: fix use-after-free in cgwb_release_workfn() mm/zone_device: do not touch device folio after calling ->folio_free() mm/damon/core: disallow time-quota setting zero esz mm/mempolicy: fix weighted interleave auto sysfs name
-rw-r--r--mm/backing-dev.c5
-rw-r--r--mm/damon/core.c13
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/internal.h1
-rw-r--r--mm/mempolicy.c8
-rw-r--r--mm/memremap.c2
-rw-r--r--mm/truncate.c6
-rw-r--r--mm/vmalloc.c1
8 files changed, 27 insertions, 12 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 7a18fa6c7272..cecbcf9060a6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -618,12 +618,13 @@ static void cgwb_release_workfn(struct work_struct *work)
wb_shutdown(wb);
css_put(wb->memcg_css);
- css_put(wb->blkcg_css);
- mutex_unlock(&wb->bdi->cgwb_release_mutex);
/* triggers blkg destruction if no online users left */
blkcg_unpin_online(wb->blkcg_css);
+ css_put(wb->blkcg_css);
+ mutex_unlock(&wb->bdi->cgwb_release_mutex);
+
fprop_local_destroy_percpu(&wb->memcg_completions);
spin_lock_irq(&cgwb_lock);
diff --git a/mm/damon/core.c b/mm/damon/core.c
index fa9531d8e7f8..3dbbbfdeff71 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1477,6 +1477,11 @@ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive)
int i;
int err = 0;
+ for (i = 0; i < nr_ctxs; i++) {
+ if (!is_power_of_2(ctxs[i]->min_region_sz))
+ return -EINVAL;
+ }
+
mutex_lock(&damon_lock);
if ((exclusive && nr_running_ctxs) ||
(!exclusive && running_exclusive_ctxs)) {
@@ -2384,7 +2389,8 @@ static void damos_goal_tune_esz_bp_temporal(struct damos_quota *quota)
/*
* Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
*/
-static void damos_set_effective_quota(struct damos_quota *quota)
+static void damos_set_effective_quota(struct damos_quota *quota,
+ struct damon_ctx *ctx)
{
unsigned long throughput;
unsigned long esz = ULONG_MAX;
@@ -2409,6 +2415,7 @@ static void damos_set_effective_quota(struct damos_quota *quota)
else
throughput = PAGE_SIZE * 1024;
esz = min(throughput * quota->ms, esz);
+ esz = max(ctx->min_region_sz, esz);
}
if (quota->sz && quota->sz < esz)
@@ -2445,7 +2452,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
/* First charge window */
if (!quota->total_charged_sz && !quota->charged_from) {
quota->charged_from = jiffies;
- damos_set_effective_quota(quota);
+ damos_set_effective_quota(quota, c);
}
/* New charge window starts */
@@ -2460,7 +2467,7 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
quota->charged_sz = 0;
if (trace_damos_esz_enabled())
cached_esz = quota->esz;
- damos_set_effective_quota(quota);
+ damos_set_effective_quota(quota, c);
if (trace_damos_esz_enabled() && quota->esz != cached_esz)
damos_trace_esz(c, s, quota);
}
diff --git a/mm/filemap.c b/mm/filemap.c
index c568d9058ff8..4e636647100c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -228,7 +228,8 @@ void __filemap_remove_folio(struct folio *folio, void *shadow)
page_cache_delete(mapping, folio, shadow);
}
-void filemap_free_folio(struct address_space *mapping, struct folio *folio)
+static void filemap_free_folio(const struct address_space *mapping,
+ struct folio *folio)
{
void (*free_folio)(struct folio *);
diff --git a/mm/internal.h b/mm/internal.h
index 41398ecb2201..5a2ddcf68e0b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -557,7 +557,6 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
-void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 99179314a444..4e4421b22b59 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -3788,9 +3788,11 @@ static void wi_state_free(void)
}
}
-static struct kobj_attribute wi_auto_attr =
- __ATTR(auto, 0664, weighted_interleave_auto_show,
- weighted_interleave_auto_store);
+static struct kobj_attribute wi_auto_attr = {
+ .attr = { .name = "auto", .mode = 0664 },
+ .show = weighted_interleave_auto_show,
+ .store = weighted_interleave_auto_store,
+};
static void wi_cleanup(void) {
sysfs_remove_file(&wi_group->wi_kobj, &wi_auto_attr.attr);
diff --git a/mm/memremap.c b/mm/memremap.c
index ac7be07e3361..053842d45cb1 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -454,7 +454,7 @@ void free_zone_device_folio(struct folio *folio)
if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->folio_free))
break;
pgmap->ops->folio_free(folio);
- percpu_ref_put_many(&folio->pgmap->ref, nr);
+ percpu_ref_put_many(&pgmap->ref, nr);
break;
case MEMORY_DEVICE_GENERIC:
diff --git a/mm/truncate.c b/mm/truncate.c
index 2931d66c16d0..12cc89f89afc 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -622,6 +622,7 @@ static int folio_launder(struct address_space *mapping, struct folio *folio)
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
gfp_t gfp)
{
+ void (*free_folio)(struct folio *);
int ret;
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
@@ -648,9 +649,12 @@ int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping))
inode_lru_list_add(mapping->host);
+ free_folio = mapping->a_ops->free_folio;
spin_unlock(&mapping->host->i_lock);
- filemap_free_folio(mapping, folio);
+ if (free_folio)
+ free_folio(folio);
+ folio_put_refs(folio, folio_nr_pages(folio));
return 1;
failed:
xa_unlock_irq(&mapping->i_pages);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b31b208f6ecb..aa08651ec0df 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -5416,6 +5416,7 @@ vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct vmap_node *vn;
+ guard(mutex)(&vmap_purge_lock);
for_each_vmap_node(vn)
decay_va_pool_node(vn, true);