diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-04-15 10:15:39 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-04-15 10:15:39 -0700 |
| commit | 05cef13fa80de8cec481ae5a015e58bc6340ca2d (patch) | |
| tree | 2a50a6b66121d25addb8f6b1b8faf5e936ad8324 /lib | |
| parent | a8e7ef3cec99ba2487110e01d77a8a278593b3e9 (diff) | |
| parent | 44e0ebe4accd67c67134cf3b805917153041a300 (diff) | |
Merge tag 'slab-for-7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka:
- Sheaves performance improvements for systems with memoryless NUMA
nodes, developed in response to regression reports.
These mainly ensure that percpu sheaves exist and are used on cpus
that belong to these memoryless nodes (Vlastimil Babka, Hao Li).
- Cleanup API usage and constify sysfs attributes (Thomas Weißschuh)
- Disable kfree_rcu() batching on builds intended for fuzzing/debugging
that enable CONFIG_RCU_STRICT_GRACE_PERIOD (Jann Horn)
- Add a kunit test for kmalloc_nolock()/kfree_nolock() (Harry Yoo)
* tag 'slab-for-7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
slub: clarify kmem_cache_refill_sheaf() comments
lib/tests/slub_kunit: add a test case for {kmalloc,kfree}_nolock
MAINTAINERS: add lib/tests/slub_kunit.c to SLAB ALLOCATOR section
slub: use N_NORMAL_MEMORY in can_free_to_pcs to handle remote frees
slab,rcu: disable KVFREE_RCU_BATCHED for strict grace period
slab: free remote objects to sheaves on memoryless nodes
slab: create barns for online memoryless nodes
slab: decouple pointer to barn from kmem_cache_node
slab: remove alloc_full_sheaf()
mm/slab: constify sysfs attributes
mm/slab: create sysfs attribute through default_groups
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/tests/slub_kunit.c | 92 |
1 files changed, 92 insertions, 0 deletions
diff --git a/lib/tests/slub_kunit.c b/lib/tests/slub_kunit.c index 848b682a2d70..fa6d31dbca16 100644 --- a/lib/tests/slub_kunit.c +++ b/lib/tests/slub_kunit.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/rcupdate.h> #include <linux/delay.h> +#include <linux/perf_event.h> #include "../mm/slab.h" static struct kunit_resource resource; @@ -291,6 +292,94 @@ static void test_krealloc_redzone_zeroing(struct kunit *test) kmem_cache_destroy(s); } +#ifdef CONFIG_PERF_EVENTS +#define NR_ITERATIONS 1000 +#define NR_OBJECTS 1000 +static void *objects[NR_OBJECTS]; + +struct test_nolock_context { + struct kunit *test; + int callback_count; + int alloc_ok; + int alloc_fail; + struct perf_event *event; +}; + +static struct perf_event_attr hw_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, + .freq = 1, + .sample_freq = 100000, +}; + +static void overflow_handler_test_kmalloc_kfree_nolock(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + void *objp; + gfp_t gfp; + struct test_nolock_context *ctx = event->overflow_handler_context; + + /* __GFP_ACCOUNT to test kmalloc_nolock() in alloc_slab_obj_exts() */ + gfp = (ctx->callback_count % 2) ? 0 : __GFP_ACCOUNT; + objp = kmalloc_nolock(64, gfp, NUMA_NO_NODE); + + if (objp) + ctx->alloc_ok++; + else + ctx->alloc_fail++; + + kfree_nolock(objp); + ctx->callback_count++; +} + +static void test_kmalloc_kfree_nolock(struct kunit *test) +{ + int i, j; + struct test_nolock_context ctx = { .test = test }; + struct perf_event *event; + bool alloc_fail = false; + + event = perf_event_create_kernel_counter(&hw_attr, -1, current, + overflow_handler_test_kmalloc_kfree_nolock, + &ctx); + if (IS_ERR(event)) + kunit_skip(test, "Failed to create perf event"); + ctx.event = event; + perf_event_enable(ctx.event); + for (i = 0; i < NR_ITERATIONS; i++) { + for (j = 0; j < NR_OBJECTS; j++) { + gfp_t gfp = (i % 2) ? GFP_KERNEL : GFP_KERNEL_ACCOUNT; + + objects[j] = kmalloc(64, gfp); + if (!objects[j]) { + j--; + while (j >= 0) + kfree(objects[j--]); + alloc_fail = true; + goto cleanup; + } + } + for (j = 0; j < NR_OBJECTS; j++) + kfree(objects[j]); + } + +cleanup: + perf_event_disable(ctx.event); + perf_event_release_kernel(ctx.event); + + kunit_info(test, "callback_count: %d, alloc_ok: %d, alloc_fail: %d\n", + ctx.callback_count, ctx.alloc_ok, ctx.alloc_fail); + + if (alloc_fail) + kunit_skip(test, "Allocation failed"); + KUNIT_EXPECT_EQ(test, 0, slab_errors); +} +#endif + static int test_init(struct kunit *test) { slab_errors = 0; @@ -315,6 +404,9 @@ static struct kunit_case test_cases[] = { KUNIT_CASE(test_kfree_rcu_wq_destroy), KUNIT_CASE(test_leak_destroy), KUNIT_CASE(test_krealloc_redzone_zeroing), +#ifdef CONFIG_PERF_EVENTS + KUNIT_CASE_SLOW(test_kmalloc_kfree_nolock), +#endif {} }; |
