diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-01-15 15:13:05 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-01-15 15:13:05 -0800 |
| commit | 88e490913f079f0c0f9535e844077f9d31c369aa (patch) | |
| tree | 808d0d336752cd7ae68c197e8714cba2defe64fd | |
| parent | 603c05a1639f60e0c52c5fdd25cf5e0b44b9bd8e (diff) | |
| parent | be55257fab181b93af38f8c4b1b3cb453a78d742 (diff) | |
Merge tag 'ftrace-v6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull ftrace fix from Steven Rostedt:
- Fix allocation accounting on boot up
The ftrace records for each function that ftrace can attach to is
done in a group of pages. At boot up, the number of pages are
calculated and allocated. After that, the pages are filled with data.
It may allocate more than needed due to some functions not being
recorded (because they are unused weak functions), this too is
recorded.
After the data is filled in, a check is made to make sure the right
number of pages were allocated. But this was off due to the
assumption that the same number of entries fit per every page.
Because the size of an entry does not evenly divide into PAGE_SIZE,
there is a rounding error when a large number of pages is allocated
to hold the events. This causes the check to fail and triggers a
warning.
Fix the accounting by finding out how many pages are actually
allocated from the functions that allocate them and use that to see
if all the pages allocated were used and the ones not used are
properly freed.
* tag 'ftrace-v6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
ftrace: Do not over-allocate ftrace memory
| -rw-r--r-- | kernel/trace/ftrace.c | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ef2d5dca6f70..aa758efc3731 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1148,7 +1148,6 @@ struct ftrace_page { }; #define ENTRY_SIZE sizeof(struct dyn_ftrace) -#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) static struct ftrace_page *ftrace_pages_start; static struct ftrace_page *ftrace_pages; @@ -3834,7 +3833,8 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) return 0; } -static int ftrace_allocate_records(struct ftrace_page *pg, int count) +static int ftrace_allocate_records(struct ftrace_page *pg, int count, + unsigned long *num_pages) { int order; int pages; @@ -3844,7 +3844,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count) return -EINVAL; /* We want to fill as much as possible, with no empty pages */ - pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); + pages = DIV_ROUND_UP(count * ENTRY_SIZE, PAGE_SIZE); order = fls(pages) - 1; again: @@ -3859,6 +3859,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count) } ftrace_number_of_pages += 1 << order; + *num_pages += 1 << order; ftrace_number_of_groups++; cnt = (PAGE_SIZE << order) / ENTRY_SIZE; @@ -3887,12 +3888,14 @@ static void ftrace_free_pages(struct ftrace_page *pages) } static struct ftrace_page * -ftrace_allocate_pages(unsigned long num_to_init) +ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages) { struct ftrace_page *start_pg; struct ftrace_page *pg; int cnt; + *num_pages = 0; + if (!num_to_init) return NULL; @@ -3906,7 +3909,7 @@ ftrace_allocate_pages(unsigned long num_to_init) * waste as little space as possible. */ for (;;) { - cnt = ftrace_allocate_records(pg, num_to_init); + cnt = ftrace_allocate_records(pg, num_to_init, num_pages); if (cnt < 0) goto free_pages; @@ -7192,8 +7195,6 @@ static int ftrace_process_locs(struct module *mod, if (!count) return 0; - pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); - /* * Sorting mcount in vmlinux at build time depend on * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in @@ -7206,7 +7207,7 @@ static int ftrace_process_locs(struct module *mod, test_is_sorted(start, count); } - start_pg = ftrace_allocate_pages(count); + start_pg = ftrace_allocate_pages(count, &pages); if (!start_pg) return -ENOMEM; @@ -7305,27 +7306,27 @@ static int ftrace_process_locs(struct module *mod, /* We should have used all pages unless we skipped some */ if (pg_unuse) { unsigned long pg_remaining, remaining = 0; - unsigned long skip; + long skip; /* Count the number of entries unused and compare it to skipped. */ - pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index; + pg_remaining = (PAGE_SIZE << pg->order) / ENTRY_SIZE - pg->index; if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) { skip = skipped - pg_remaining; - for (pg = pg_unuse; pg; pg = pg->next) + for (pg = pg_unuse; pg && skip > 0; pg = pg->next) { remaining += 1 << pg->order; + skip -= (PAGE_SIZE << pg->order) / ENTRY_SIZE; + } pages -= remaining; - skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE); - /* * Check to see if the number of pages remaining would * just fit the number of entries skipped. */ - WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped", + WARN(pg || skip > 0, "Extra allocated pages for ftrace: %lu with %lu skipped", remaining, skipped); } /* Need to synchronize with ftrace_location_range() */ |
