summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorChenghao Duan <duanchenghao@kylinos.cn>2025-12-31 15:19:21 +0800
committerHuacai Chen <chenhuacai@loongson.cn>2025-12-31 15:19:21 +0800
commit26138762d9a27a7f1c33f467c4123c600f64a36e (patch)
tree98b9e628e67bf7638208bfa47abf48a46c4a0266 /arch
parent61319d15a56093358c6822d30659fe2941f589f1 (diff)
LoongArch: BPF: Enable trampoline-based tracing for module functions
Remove the previous restrictions that blocked the tracing of kernel module functions. Fix the issue that previously caused kernel lockups when attempting to trace module functions. Before entering the trampoline code, the return address register ra shall store the address of the next assembly instruction after the 'bl trampoline' instruction, which is the traced function address, and the register t0 shall store the parent function return address. Refine the trampoline return logic to ensure that register data remains correct when returning to both the traced function and the parent function. Before this patch was applied, the module_attach test in selftests/bpf encountered a deadlock issue. This was caused by an incorrect jump address after the trampoline execution, which resulted in an infinite loop within the module function. Cc: stable@vger.kernel.org Fixes: 677e6123e3d2 ("LoongArch: BPF: Disable trampoline for kernel module function trace") Signed-off-by: Chenghao Duan <duanchenghao@kylinos.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
Diffstat (limited to 'arch')
-rw-r--r--arch/loongarch/net/bpf_jit.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index e6aeaec46969..9f6e93343b6e 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1284,7 +1284,7 @@ static int emit_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
return 0;
}
- return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target);
+ return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_RA : LOONGARCH_GPR_ZERO, (u64)target);
}
static int emit_call(struct jit_ctx *ctx, u64 addr)
@@ -1641,14 +1641,12 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
/* To traced function */
/* Ftrace jump skips 2 NOP instructions */
- if (is_kernel_text((unsigned long)orig_call))
+ if (is_kernel_text((unsigned long)orig_call) ||
+ is_module_text_address((unsigned long)orig_call))
orig_call += LOONGARCH_FENTRY_NBYTES;
/* Direct jump skips 5 NOP instructions */
else if (is_bpf_text_address((unsigned long)orig_call))
orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
- /* Module tracing not supported - cause kernel lockups */
- else if (is_module_text_address((unsigned long)orig_call))
- return -ENOTSUPP;
if (flags & BPF_TRAMP_F_CALL_ORIG) {
move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
@@ -1741,12 +1739,16 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0);
emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, 16);
- if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ if (flags & BPF_TRAMP_F_SKIP_FRAME) {
/* return to parent function */
- emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0);
- else
- /* return to traced function */
+ move_reg(ctx, LOONGARCH_GPR_RA, LOONGARCH_GPR_T0);
emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T0, 0);
+ } else {
+ /* return to traced function */
+ move_reg(ctx, LOONGARCH_GPR_T1, LOONGARCH_GPR_RA);
+ move_reg(ctx, LOONGARCH_GPR_RA, LOONGARCH_GPR_T0);
+ emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T1, 0);
+ }
}
ret = ctx->idx;