Hi,
Here is the 3rd version of the series to change the kprobes selftest
to KUnit and add testcases for stacktrace on kretprobes, which has
been fixed recently on x86. The previous version is here;
https://lore.kernel.org/all/163430224341.459050.2369208860773018092.stgit@devnote2/T/#u
In this version, I fixed arm's trampoline code, and add the version tag.
And I also dropped the RFC patch. It may be discussed in another series.
Thank you,
---
Masami Hiramatsu (8):
kprobes: Add a test case for stacktrace from kretprobe handler
x86/unwind: Compile kretprobe fixup code only if CONFIG_KRETPROBES=y
arm64: kprobes: Record frame pointer with kretprobe instance
arm64: kprobes: Make a frame pointer on __kretprobe_trampoline
arm64: Recover kretprobe modified return address in stacktrace
ARM: clang: Do not rely on lr register for stacktrace
ARM: kprobes: Make a frame pointer on __kretprobe_trampoline
ARM: Recover kretprobe modified return address in stacktrace
Sven Schnelle (1):
kprobes: convert tests to kunit
arch/Kconfig | 8 +
arch/arm/Kconfig | 1
arch/arm/include/asm/stacktrace.h | 9 +
arch/arm/kernel/return_address.c | 4
arch/arm/kernel/stacktrace.c | 17 +
arch/arm/probes/kprobes/core.c | 28 ++
arch/arm64/Kconfig | 1
arch/arm64/include/asm/stacktrace.h | 4
arch/arm64/kernel/probes/kprobes.c | 4
arch/arm64/kernel/probes/kprobes_trampoline.S | 4
arch/arm64/kernel/stacktrace.c | 7
arch/x86/Kconfig | 1
arch/x86/include/asm/unwind.h | 6
kernel/kprobes.c | 3
kernel/test_kprobes.c | 374 ++++++++++++++-----------
lib/Kconfig.debug | 3
16 files changed, 302 insertions(+), 172 deletions(-)
--
Masami Hiramatsu (Linaro) <[email protected]>
From: Sven Schnelle <[email protected]>
This converts the kprobes testcases to use the kunit framework.
It adds a dependency on CONFIG_KUNIT, and the output will change
to TAP:
TAP version 14
1..1
# Subtest: kprobes_test
1..4
random: crng init done
ok 1 - test_kprobe
ok 2 - test_kprobes
ok 3 - test_kretprobe
ok 4 - test_kretprobes
ok 1 - kprobes_test
Note that the kprobes testcases are no longer run immediately after
kprobes initialization, but as a late initcall when kunit is
initialized. kprobes itself is initialized with an early initcall,
so the order is still correct.
Signed-off-by: Sven Schnelle <[email protected]>
Acked-by: Masami Hiramatsu <[email protected]>
Signed-off-by: Masami Hiramatsu <[email protected]>
---
kernel/kprobes.c | 3 -
kernel/test_kprobes.c | 222 +++++++++++++------------------------------------
lib/Kconfig.debug | 3 -
3 files changed, 61 insertions(+), 167 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b62af9fc3607..4676627cb066 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2581,9 +2581,6 @@ static int __init init_kprobes(void)
err = register_module_notifier(&kprobe_module_nb);
kprobes_initialized = (err == 0);
-
- if (!err)
- init_test_probes();
return err;
}
early_initcall(init_kprobes);
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 76c997fdbc9d..e78f18144145 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -5,18 +5,17 @@
* Copyright IBM Corp. 2008
*/
-#define pr_fmt(fmt) "Kprobe smoke test: " fmt
-
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/random.h>
+#include <kunit/test.h>
#define div_factor 3
static u32 rand1, preh_val, posth_val;
-static int errors, handler_errors, num_tests;
static u32 (*target)(u32 value);
static u32 (*target2)(u32 value);
+static struct kunit *current_test;
static noinline u32 kprobe_target(u32 value)
{
@@ -25,10 +24,7 @@ static noinline u32 kprobe_target(u32 value)
static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
- if (preemptible()) {
- handler_errors++;
- pr_err("pre-handler is preemptible\n");
- }
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
preh_val = (rand1 / div_factor);
return 0;
}
@@ -36,14 +32,8 @@ static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
- if (preemptible()) {
- handler_errors++;
- pr_err("post-handler is preemptible\n");
- }
- if (preh_val != (rand1 / div_factor)) {
- handler_errors++;
- pr_err("incorrect value in post_handler\n");
- }
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor));
posth_val = preh_val + div_factor;
}
@@ -53,30 +43,14 @@ static struct kprobe kp = {
.post_handler = kp_post_handler
};
-static int test_kprobe(void)
+static void test_kprobe(struct kunit *test)
{
- int ret;
-
- ret = register_kprobe(&kp);
- if (ret < 0) {
- pr_err("register_kprobe returned %d\n", ret);
- return ret;
- }
-
- ret = target(rand1);
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp));
+ target(rand1);
unregister_kprobe(&kp);
-
- if (preh_val == 0) {
- pr_err("kprobe pre_handler not called\n");
- handler_errors++;
- }
-
- if (posth_val == 0) {
- pr_err("kprobe post_handler not called\n");
- handler_errors++;
- }
-
- return 0;
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
}
static noinline u32 kprobe_target2(u32 value)
@@ -93,10 +67,7 @@ static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
- if (preh_val != (rand1 / div_factor) + 1) {
- handler_errors++;
- pr_err("incorrect value in post_handler2\n");
- }
+ KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor) + 1);
posth_val = preh_val + div_factor;
}
@@ -106,51 +77,31 @@ static struct kprobe kp2 = {
.post_handler = kp_post_handler2
};
-static int test_kprobes(void)
+static void test_kprobes(struct kunit *test)
{
- int ret;
struct kprobe *kps[2] = {&kp, &kp2};
+ current_test = test;
+
/* addr and flags should be cleard for reusing kprobe. */
kp.addr = NULL;
kp.flags = 0;
- ret = register_kprobes(kps, 2);
- if (ret < 0) {
- pr_err("register_kprobes returned %d\n", ret);
- return ret;
- }
+ KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2));
preh_val = 0;
posth_val = 0;
- ret = target(rand1);
+ target(rand1);
- if (preh_val == 0) {
- pr_err("kprobe pre_handler not called\n");
- handler_errors++;
- }
-
- if (posth_val == 0) {
- pr_err("kprobe post_handler not called\n");
- handler_errors++;
- }
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
preh_val = 0;
posth_val = 0;
- ret = target2(rand1);
-
- if (preh_val == 0) {
- pr_err("kprobe pre_handler2 not called\n");
- handler_errors++;
- }
-
- if (posth_val == 0) {
- pr_err("kprobe post_handler2 not called\n");
- handler_errors++;
- }
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
unregister_kprobes(kps, 2);
- return 0;
-
}
#ifdef CONFIG_KRETPROBES
@@ -158,10 +109,7 @@ static u32 krph_val;
static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
- if (preemptible()) {
- handler_errors++;
- pr_err("kretprobe entry handler is preemptible\n");
- }
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
krph_val = (rand1 / div_factor);
return 0;
}
@@ -170,19 +118,9 @@ static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long ret = regs_return_value(regs);
- if (preemptible()) {
- handler_errors++;
- pr_err("kretprobe return handler is preemptible\n");
- }
- if (ret != (rand1 / div_factor)) {
- handler_errors++;
- pr_err("incorrect value in kretprobe handler\n");
- }
- if (krph_val == 0) {
- handler_errors++;
- pr_err("call to kretprobe entry handler failed\n");
- }
-
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, ret, rand1 / div_factor);
+ KUNIT_EXPECT_NE(current_test, krph_val, 0);
krph_val = rand1;
return 0;
}
@@ -193,39 +131,21 @@ static struct kretprobe rp = {
.kp.symbol_name = "kprobe_target"
};
-static int test_kretprobe(void)
+static void test_kretprobe(struct kunit *test)
{
- int ret;
-
- ret = register_kretprobe(&rp);
- if (ret < 0) {
- pr_err("register_kretprobe returned %d\n", ret);
- return ret;
- }
-
- ret = target(rand1);
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_kretprobe(&rp));
+ target(rand1);
unregister_kretprobe(&rp);
- if (krph_val != rand1) {
- pr_err("kretprobe handler not called\n");
- handler_errors++;
- }
-
- return 0;
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
}
static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long ret = regs_return_value(regs);
- if (ret != (rand1 / div_factor) + 1) {
- handler_errors++;
- pr_err("incorrect value in kretprobe handler2\n");
- }
- if (krph_val == 0) {
- handler_errors++;
- pr_err("call to kretprobe entry handler failed\n");
- }
-
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
+ KUNIT_EXPECT_NE(current_test, krph_val, 0);
krph_val = rand1;
return 0;
}
@@ -236,78 +156,54 @@ static struct kretprobe rp2 = {
.kp.symbol_name = "kprobe_target2"
};
-static int test_kretprobes(void)
+static void test_kretprobes(struct kunit *test)
{
- int ret;
struct kretprobe *rps[2] = {&rp, &rp2};
+ current_test = test;
/* addr and flags should be cleard for reusing kprobe. */
rp.kp.addr = NULL;
rp.kp.flags = 0;
- ret = register_kretprobes(rps, 2);
- if (ret < 0) {
- pr_err("register_kretprobe returned %d\n", ret);
- return ret;
- }
+ KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2));
krph_val = 0;
- ret = target(rand1);
- if (krph_val != rand1) {
- pr_err("kretprobe handler not called\n");
- handler_errors++;
- }
+ target(rand1);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
krph_val = 0;
- ret = target2(rand1);
- if (krph_val != rand1) {
- pr_err("kretprobe handler2 not called\n");
- handler_errors++;
- }
+ target2(rand1);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
unregister_kretprobes(rps, 2);
- return 0;
}
#endif /* CONFIG_KRETPROBES */
-int init_test_probes(void)
+static int kprobes_test_init(struct kunit *test)
{
- int ret;
-
target = kprobe_target;
target2 = kprobe_target2;
do {
rand1 = prandom_u32();
} while (rand1 <= div_factor);
+ return 0;
+}
- pr_info("started\n");
- num_tests++;
- ret = test_kprobe();
- if (ret < 0)
- errors++;
-
- num_tests++;
- ret = test_kprobes();
- if (ret < 0)
- errors++;
-
+static struct kunit_case kprobes_testcases[] = {
+ KUNIT_CASE(test_kprobe),
+ KUNIT_CASE(test_kprobes),
#ifdef CONFIG_KRETPROBES
- num_tests++;
- ret = test_kretprobe();
- if (ret < 0)
- errors++;
-
- num_tests++;
- ret = test_kretprobes();
- if (ret < 0)
- errors++;
-#endif /* CONFIG_KRETPROBES */
+ KUNIT_CASE(test_kretprobe),
+ KUNIT_CASE(test_kretprobes),
+#endif
+ {}
+};
- if (errors)
- pr_err("BUG: %d out of %d tests failed\n", errors, num_tests);
- else if (handler_errors)
- pr_err("BUG: %d error(s) running handlers\n", handler_errors);
- else
- pr_info("passed successfully\n");
+static struct kunit_suite kprobes_test_suite = {
+ .name = "kprobes_test",
+ .init = kprobes_test_init,
+ .test_cases = kprobes_testcases,
+};
- return 0;
-}
+kunit_test_suites(&kprobes_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2a9b6dcdac4f..6ceb11a43e4c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2080,9 +2080,10 @@ config TEST_DIV64
If unsure, say N.
config KPROBES_SANITY_TEST
- bool "Kprobes sanity tests"
+ tristate "Kprobes sanity tests"
depends on DEBUG_KERNEL
depends on KPROBES
+ depends on KUNIT
help
This option provides for testing basic kprobes functionality on
boot. Samples of kprobe and kretprobe are inserted and
Add a test case for stacktrace from kretprobe handler and
nested kretprobe handlers.
This test checks both of stack trace inside kretprobe handler
and stack trace from pt_regs. Those stack trace must include
actual function return address instead of kretprobe trampoline.
The nested kretprobe stacktrace test checks whether the unwinder
can correctly unwind the call frame on the stack which has been
modified by the kretprobe.
Since the stacktrace on kretprobe is correctly fixed only on x86,
this introduces a meta kconfig ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
which tells user that the stacktrace on kretprobe is correct or not.
The test results will be shown like below;
TAP version 14
1..1
# Subtest: kprobes_test
1..6
ok 1 - test_kprobe
ok 2 - test_kprobes
ok 3 - test_kretprobe
ok 4 - test_kretprobes
ok 5 - test_stacktrace_on_kretprobe
ok 6 - test_stacktrace_on_nested_kretprobe
# kprobes_test: pass:6 fail:0 skip:0 total:6
# Totals: pass:6 fail:0 skip:0 total:6
ok 1 - kprobes_test
Signed-off-by: Masami Hiramatsu <[email protected]>
---
arch/Kconfig | 8 ++
arch/x86/Kconfig | 1
kernel/test_kprobes.c | 158 +++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 167 insertions(+)
diff --git a/arch/Kconfig b/arch/Kconfig
index 8df1c7102643..8378f83b462c 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -191,6 +191,14 @@ config HAVE_OPTPROBES
config HAVE_KPROBES_ON_FTRACE
bool
+config ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ bool
+ help
+ Since kretprobes modifies return address on the stack, the
+ stacktrace may see the kretprobe trampoline address instead
+ of correct one. If the architecture stacktrace code and
+ unwinder can adjust such entries, select this configuration.
+
config HAVE_FUNCTION_ERROR_INJECTION
bool
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ab83c22d274e..2049364b3981 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -61,6 +61,7 @@ config X86
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_INIT
+ select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64 || (X86_32 && HIGHMEM)
select ARCH_ENABLE_MEMORY_HOTREMOVE if MEMORY_HOTPLUG
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index e78f18144145..a902be1f4a96 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -17,6 +17,11 @@ static u32 (*target)(u32 value);
static u32 (*target2)(u32 value);
static struct kunit *current_test;
+static unsigned long (*internal_target)(void);
+static unsigned long (*stacktrace_target)(void);
+static unsigned long (*stacktrace_driver)(void);
+static unsigned long target_return_address[2];
+
static noinline u32 kprobe_target(u32 value)
{
return (value / div_factor);
@@ -58,6 +63,33 @@ static noinline u32 kprobe_target2(u32 value)
return (value / div_factor) + 1;
}
+static noinline unsigned long kprobe_stacktrace_internal_target(void)
+{
+ if (!target_return_address[0])
+ target_return_address[0] = (unsigned long)__builtin_return_address(0);
+ return target_return_address[0];
+}
+
+static noinline unsigned long kprobe_stacktrace_target(void)
+{
+ if (!target_return_address[1])
+ target_return_address[1] = (unsigned long)__builtin_return_address(0);
+
+ if (internal_target)
+ internal_target();
+
+ return target_return_address[1];
+}
+
+static noinline unsigned long kprobe_stacktrace_driver(void)
+{
+ if (stacktrace_target)
+ stacktrace_target();
+
+ /* This is for preventing inlining the function */
+ return (unsigned long)__builtin_return_address(0);
+}
+
static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
{
preh_val = (rand1 / div_factor) + 1;
@@ -175,12 +207,134 @@ static void test_kretprobes(struct kunit *test)
KUNIT_EXPECT_EQ(test, krph_val, rand1);
unregister_kretprobes(rps, 2);
}
+
+#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+#define STACK_BUF_SIZE 16
+static unsigned long stack_buf[STACK_BUF_SIZE];
+
+static int stacktrace_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long retval = regs_return_value(regs);
+ int i, ret;
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, retval, target_return_address[1]);
+
+ /*
+ * Test stacktrace inside the kretprobe handler, this will involves
+ * kretprobe trampoline, but must include correct return address
+ * of the target function.
+ */
+ ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+
+ for (i = 0; i < ret; i++) {
+ if (stack_buf[i] == target_return_address[1])
+ break;
+ }
+ KUNIT_EXPECT_NE(current_test, i, ret);
+
+ /*
+ * Test stacktrace from pt_regs at the return address. Thus the stack
+ * trace must start from the target return address.
+ */
+ ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
+
+ return 0;
+}
+
+static struct kretprobe rp3 = {
+ .handler = stacktrace_return_handler,
+ .kp.symbol_name = "kprobe_stacktrace_target"
+};
+
+static void test_stacktrace_on_kretprobe(struct kunit *test)
+{
+ unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
+
+ current_test = test;
+ rp3.kp.addr = NULL;
+ rp3.kp.flags = 0;
+
+ /*
+ * Run the stacktrace_driver() to record correct return address in
+ * stacktrace_target() and ensure stacktrace_driver() call is not
+ * inlined by checking the return address of stacktrace_driver()
+ * and the return address of this function is different.
+ */
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+
+ KUNIT_ASSERT_EQ(test, 0, register_kretprobe(&rp3));
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+ unregister_kretprobe(&rp3);
+}
+
+static int stacktrace_internal_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long retval = regs_return_value(regs);
+ int i, ret;
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, retval, target_return_address[0]);
+
+ /*
+ * Test stacktrace inside the kretprobe handler for nested case.
+ * The unwinder will find the kretprobe_trampoline address on the
+ * return address, and kretprobe must solve that.
+ */
+ ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+
+ for (i = 0; i < ret - 1; i++) {
+ if (stack_buf[i] == target_return_address[0]) {
+ KUNIT_EXPECT_EQ(current_test, stack_buf[i + 1], target_return_address[1]);
+ break;
+ }
+ }
+ KUNIT_EXPECT_NE(current_test, i, ret);
+
+ /* Ditto for the regs version. */
+ ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[0]);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[1], target_return_address[1]);
+
+ return 0;
+}
+
+static struct kretprobe rp4 = {
+ .handler = stacktrace_internal_return_handler,
+ .kp.symbol_name = "kprobe_stacktrace_internal_target"
+};
+
+static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
+{
+ unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
+ struct kretprobe *rps[2] = {&rp3, &rp4};
+
+ current_test = test;
+ rp3.kp.addr = NULL;
+ rp3.kp.flags = 0;
+
+ //KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+
+ KUNIT_ASSERT_EQ(test, 0, register_kretprobes(rps, 2));
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+ unregister_kretprobes(rps, 2);
+}
+#endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */
+
#endif /* CONFIG_KRETPROBES */
static int kprobes_test_init(struct kunit *test)
{
target = kprobe_target;
target2 = kprobe_target2;
+ stacktrace_target = kprobe_stacktrace_target;
+ internal_target = kprobe_stacktrace_internal_target;
+ stacktrace_driver = kprobe_stacktrace_driver;
do {
rand1 = prandom_u32();
@@ -194,6 +348,10 @@ static struct kunit_case kprobes_testcases[] = {
#ifdef CONFIG_KRETPROBES
KUNIT_CASE(test_kretprobe),
KUNIT_CASE(test_kretprobes),
+#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ KUNIT_CASE(test_stacktrace_on_kretprobe),
+ KUNIT_CASE(test_stacktrace_on_nested_kretprobe),
+#endif
#endif
{}
};
Record the frame pointer instead of stack address with kretprobe
instance as the identifier on the instance list.
Since arm64 always enable CONFIG_FRAME_POINTER, we can use the
actual frame pointer (x29).
This will allow the stacktrace code to find the original return
address from the FP alone.
Signed-off-by: Masami Hiramatsu <[email protected]>
Acked-by: Will Deacon <[email protected]>
Acked-by: Mark Rutland <[email protected]>
---
Changes in v2:
- Update changelog according to Mark's comment.
---
arch/arm64/kernel/probes/kprobes.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index e7ad6da980e8..d9dfa82c1f18 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -401,14 +401,14 @@ int __init arch_populate_kprobe_blacklist(void)
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
{
- return (void *)kretprobe_trampoline_handler(regs, (void *)kernel_stack_pointer(regs));
+ return (void *)kretprobe_trampoline_handler(regs, (void *)regs->regs[29]);
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
- ri->fp = (void *)kernel_stack_pointer(regs);
+ ri->fp = (void *)regs->regs[29];
/* replace return addr (x30) with trampoline */
regs->regs[30] = (long)&__kretprobe_trampoline;
Since the kretprobe replaces the function return address with
the kretprobe_trampoline on the stack, arm unwinder shows it
instead of the correct return address.
This finds the correct return address from the per-task
kretprobe_instances list and verify it is in between the
caller fp and callee fp.
Note that this supports both GCC and clang if CONFIG_FRAME_POINTER=y
and CONFIG_ARM_UNWIND=n. For the ARM unwinder, this is still
not working correctly.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
Changes in v2:
- Compile this code only when CONFIG_KRETPROBES=y
---
arch/arm/Kconfig | 1 +
arch/arm/include/asm/stacktrace.h | 9 +++++++++
arch/arm/kernel/return_address.c | 4 ++++
arch/arm/kernel/stacktrace.c | 14 ++++++++++++++
4 files changed, 28 insertions(+)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fc196421b2ce..bb4f1872967c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -3,6 +3,7 @@ config ARM
bool
default y
select ARCH_32BIT_OFF_T
+ select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE if HAVE_KRETPROBES && FRAME_POINTER && !ARM_UNWIND
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
index 2d76a2e29f05..8f54f9ad8a9b 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -3,6 +3,7 @@
#define __ASM_STACKTRACE_H
#include <asm/ptrace.h>
+#include <linux/llist.h>
struct stackframe {
/*
@@ -13,6 +14,10 @@ struct stackframe {
unsigned long sp;
unsigned long lr;
unsigned long pc;
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+ struct task_struct *tsk;
+#endif
};
static __always_inline
@@ -22,6 +27,10 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
frame->sp = regs->ARM_sp;
frame->lr = regs->ARM_lr;
frame->pc = regs->ARM_pc;
+#ifdef CONFIG_KRETPROBES
+ frame->kr_cur = NULL;
+ frame->tsk = current;
+#endif
}
extern int unwind_frame(struct stackframe *frame);
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 7b42ac010fdf..00c11579406c 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -42,6 +42,10 @@ void *return_address(unsigned int level)
frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)return_address;
+#ifdef CONFIG_KRETPROBES
+ frame.kr_cur = NULL;
+ frame.tsk = current;
+#endif
walk_stackframe(&frame, save_return_addr, &data);
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index db798eac7431..75e905508f27 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/stacktrace.h>
@@ -65,6 +66,11 @@ int notrace unwind_frame(struct stackframe *frame)
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
#endif
+#ifdef CONFIG_KRETPROBES
+ if (is_kretprobe_trampoline(frame->pc))
+ frame->pc = kretprobe_find_ret_addr(frame->tsk,
+ (void *)frame->fp, &frame->kr_cur);
+#endif
return 0;
}
@@ -156,6 +162,10 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)__save_stack_trace;
}
+#ifdef CONFIG_KRETPROBES
+ frame.kr_cur = NULL;
+ frame.tsk = tsk;
+#endif
walk_stackframe(&frame, save_trace, &data);
}
@@ -173,6 +183,10 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
frame.sp = regs->ARM_sp;
frame.lr = regs->ARM_lr;
frame.pc = regs->ARM_pc;
+#ifdef CONFIG_KRETPROBES
+ frame.kr_cur = NULL;
+ frame.tsk = current;
+#endif
walk_stackframe(&frame, save_trace, &data);
}
Since the kretprobe replaces the function return address with
the kretprobe_trampoline on the stack, stack unwinder shows it
instead of the correct return address.
This checks whether the next return address is the
__kretprobe_trampoline(), and if so, try to find the correct
return address from the kretprobe instance list. For this purpose
this adds 'kr_cur' loop cursor to memorize the current kretprobe
instance.
With this fix, now arm64 can enable
CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE, and pass the
kprobe self tests.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
Changes in v2:
- Add comment for kr_cur.
- Make the kretprobe related code depends on CONFIG_KRETPROBES.
- Initialize "kr_cur" directly in start_backtrace() instead
of clearing "frame" data structure by memset().
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/stacktrace.h | 4 ++++
arch/arm64/kernel/stacktrace.c | 7 +++++++
3 files changed, 12 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5c7ae4c3954b..edde5171ffb2 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -11,6 +11,7 @@ config ARM64
select ACPI_PPTT if ACPI
select ARCH_HAS_DEBUG_WX
select ARCH_BINFMT_ELF_STATE
+ select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 8aebc00c1718..a4e046ef4568 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/types.h>
+#include <linux/llist.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
@@ -59,6 +60,9 @@ struct stackframe {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph;
#endif
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+#endif
};
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 8982a2b78acf..c30624fff6ac 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -41,6 +41,9 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame->graph = 0;
#endif
+#ifdef CONFIG_KRETPROBES
+ frame->kr_cur = NULL;
+#endif
/*
* Prime the first unwind.
@@ -129,6 +132,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->pc = ret_stack->ret;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#ifdef CONFIG_KRETPROBES
+ if (is_kretprobe_trampoline(frame->pc))
+ frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
+#endif
frame->pc = ptrauth_strip_insn_pac(frame->pc);
Compile kretprobe related stacktrace entry recovery code and
unwind_state::kr_cur field only when CONFIG_KRETPROBES=y.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
arch/x86/include/asm/unwind.h | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index fca2e783e3ce..2a1f8734416d 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -16,7 +16,9 @@ struct unwind_state {
unsigned long stack_mask;
struct task_struct *task;
int graph_idx;
+#ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur;
+#endif
bool error;
#if defined(CONFIG_UNWINDER_ORC)
bool signal, full_regs;
@@ -105,9 +107,13 @@ static inline
unsigned long unwind_recover_kretprobe(struct unwind_state *state,
unsigned long addr, unsigned long *addr_p)
{
+#ifdef CONFIG_KRETPROBES
return is_kretprobe_trampoline(addr) ?
kretprobe_find_ret_addr(state->task, addr_p, &state->kr_cur) :
addr;
+#else
+ return addr;
+#endif
}
/* Recover the return address modified by kretprobe and ftrace_graph. */
Currently the stacktrace on clang compiled arm kernel uses the 'lr'
register to find the first frame address from pt_regs. However, that
is wrong after calling another function, because the 'lr' register
is used by 'bl' instruction and never be recovered.
As same as gcc arm kernel, directly use the frame pointer (r11) of
the pt_regs to find the first frame address.
Note that this fixes kretprobe stacktrace issue only with
CONFIG_UNWINDER_FRAME_POINTER=y. For the CONFIG_UNWINDER_ARM,
we need another fix.
Signed-off-by: Masami Hiramatsu <[email protected]>
Reviewed-by: Nick Desaulniers <[email protected]>
---
Changes in v2:
- Fix typos in changelog.
---
arch/arm/kernel/stacktrace.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 76ea4178a55c..db798eac7431 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -54,8 +54,7 @@ int notrace unwind_frame(struct stackframe *frame)
frame->sp = frame->fp;
frame->fp = *(unsigned long *)(fp);
- frame->pc = frame->lr;
- frame->lr = *(unsigned long *)(fp + 4);
+ frame->pc = *(unsigned long *)(fp + 4);
#else
/* check current frame pointer is within bounds */
if (fp < low + 12 || fp > high - 4)
Make a frame pointer (make the x29 register points the
address of pt_regs->regs[29]) on __kretprobe_trampoline.
This frame pointer will be used by the stacktracer when it is
called from the kretprobe handlers. In this case, the stack
tracer will unwind stack to trampoline_probe_handler() and
find the next frame pointer in the stack frame of the
__kretprobe_trampoline().
Signed-off-by: Masami Hiramatsu <[email protected]>
Acked-by: Will Deacon <[email protected]>
---
arch/arm64/kernel/probes/kprobes_trampoline.S | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
index 520ee8711db1..9a6499bed58b 100644
--- a/arch/arm64/kernel/probes/kprobes_trampoline.S
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -66,6 +66,9 @@ SYM_CODE_START(__kretprobe_trampoline)
save_all_base_regs
+ /* Setup a frame pointer. */
+ add x29, sp, #S_FP
+
mov x0, sp
bl trampoline_probe_handler
/*
@@ -74,6 +77,7 @@ SYM_CODE_START(__kretprobe_trampoline)
*/
mov lr, x0
+ /* The frame pointer (x29) is restored with other registers. */
restore_all_base_regs
add sp, sp, #PT_REGS_SIZE
Currently kretprobe on ARM just fills r0-r11 of pt_regs, but
that is not enough for the stacktrace. Moreover, from the user
kretprobe handler, stacktrace needs a frame pointer on the
__kretprobe_trampoline.
This adds a frame pointer on __kretprobe_trampoline for both gcc
and clang case. Those have different frame pointer so we need
different but similar stack on pt_regs.
Gcc makes the frame pointer (fp) to point the 'pc' address of
the {fp, ip (=sp), lr, pc}, this means {r11, r13, r14, r15}.
Thus if we save the r11 (fp) on pt_regs->r12, we can make this
set on the end of pt_regs.
On the other hand, Clang makes the frame pointer to point the
'fp' address of {fp, lr} on stack. Since the next to the
pt_regs->lr is pt_regs->sp, I reused the pair of pt_regs->fp
and pt_regs->ip.
So this stores the 'lr' on pt_regs->ip and make the fp to point
pt_regs->fp.
For both cases, saves __kretprobe_trampoline address to
pt_regs->lr, so that the stack tracer can identify this frame
pointer has been made by the __kretprobe_trampoline.
Note that if the CONFIG_FRAME_POINTER is not set, this keeps
fp as is.
Signed-off-by: Masami Hiramatsu <[email protected]>
Reviewed-by: Nick Desaulniers <[email protected]>
---
Changes in v3:
- Avoid using !sp when storing sp itself.
- Unify stmdb register list.
- Keep the current assembly code when CONFIG_FRAME_POINTER=n.
---
arch/arm/probes/kprobes/core.c | 28 ++++++++++++++++++++++++----
1 file changed, 24 insertions(+), 4 deletions(-)
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index 95f23b47ba27..4848404ba51b 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -368,16 +368,36 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
/*
* When a retprobed function returns, trampoline_handler() is called,
* calling the kretprobe's handler. We construct a struct pt_regs to
- * give a view of registers r0-r11 to the user return-handler. This is
- * not a complete pt_regs structure, but that should be plenty sufficient
- * for kretprobe handlers which should normally be interested in r0 only
- * anyway.
+ * give a view of registers r0-r11, sp, lr, and pc to the user
+ * return-handler. This is not a complete pt_regs structure, but that
+ * should be enough for stacktrace from the return handler with or
+ * without pt_regs.
*/
void __naked __kprobes __kretprobe_trampoline(void)
{
__asm__ __volatile__ (
+#ifdef CONFIG_FRAME_POINTER
+ "ldr lr, =__kretprobe_trampoline \n\t"
+ /* __kretprobe_trampoline makes a framepointer on pt_regs. */
+#ifdef CONFIG_CC_IS_CLANG
+ "stmdb sp, {sp, lr, pc} \n\t"
+ "sub sp, sp, #12 \n\t"
+ /* In clang case, pt_regs->ip = lr. */
+ "stmdb sp!, {r0 - r11, lr} \n\t"
+ /* fp points regs->r11 (fp) */
+ "add fp, sp, #44 \n\t"
+#else /* !CONFIG_CC_IS_CLANG */
+ /* In gcc case, pt_regs->ip = fp. */
+ "stmdb sp, {fp, sp, lr, pc} \n\t"
"sub sp, sp, #16 \n\t"
"stmdb sp!, {r0 - r11} \n\t"
+ /* fp points regs->r15 (pc) */
+ "add fp, sp, #60 \n\t"
+#endif /* CONFIG_CC_IS_CLANG */
+#else /* !CONFIG_FRAME_POINTER */
+ "sub sp, sp, #16 \n\t"
+ "stmdb sp!, {r0 - r11} \n\t"
+#endif /* CONFIG_FRAME_POINTER */
"mov r0, sp \n\t"
"bl trampoline_handler \n\t"
"mov lr, r0 \n\t"
On Thu, Oct 21, 2021 at 09:55:09AM +0900, Masami Hiramatsu wrote:
> Since the kretprobe replaces the function return address with
> the kretprobe_trampoline on the stack, stack unwinder shows it
> instead of the correct return address.
>
> This checks whether the next return address is the
> __kretprobe_trampoline(), and if so, try to find the correct
> return address from the kretprobe instance list. For this purpose
> this adds 'kr_cur' loop cursor to memorize the current kretprobe
> instance.
>
> With this fix, now arm64 can enable
> CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE, and pass the
> kprobe self tests.
>
> Signed-off-by: Masami Hiramatsu <[email protected]>
> ---
> Changes in v2:
> - Add comment for kr_cur.
> - Make the kretprobe related code depends on CONFIG_KRETPROBES.
> - Initialize "kr_cur" directly in start_backtrace() instead
> of clearing "frame" data structure by memset().
> ---
> arch/arm64/Kconfig | 1 +
> arch/arm64/include/asm/stacktrace.h | 4 ++++
> arch/arm64/kernel/stacktrace.c | 7 +++++++
> 3 files changed, 12 insertions(+)
Acked-by: Will Deacon <[email protected]>
I'm not sure how you're planning to merge this, so please let me know if
you want me to queue any of the arm64 bits.
Will
On Thu, 21 Oct 2021 11:15:12 +0100
Will Deacon <[email protected]> wrote:
> On Thu, Oct 21, 2021 at 09:55:09AM +0900, Masami Hiramatsu wrote:
> > Since the kretprobe replaces the function return address with
> > the kretprobe_trampoline on the stack, stack unwinder shows it
> > instead of the correct return address.
> >
> > This checks whether the next return address is the
> > __kretprobe_trampoline(), and if so, try to find the correct
> > return address from the kretprobe instance list. For this purpose
> > this adds 'kr_cur' loop cursor to memorize the current kretprobe
> > instance.
> >
> > With this fix, now arm64 can enable
> > CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE, and pass the
> > kprobe self tests.
> >
> > Signed-off-by: Masami Hiramatsu <[email protected]>
> > ---
> > Changes in v2:
> > - Add comment for kr_cur.
> > - Make the kretprobe related code depends on CONFIG_KRETPROBES.
> > - Initialize "kr_cur" directly in start_backtrace() instead
> > of clearing "frame" data structure by memset().
> > ---
> > arch/arm64/Kconfig | 1 +
> > arch/arm64/include/asm/stacktrace.h | 4 ++++
> > arch/arm64/kernel/stacktrace.c | 7 +++++++
> > 3 files changed, 12 insertions(+)
>
> Acked-by: Will Deacon <[email protected]>
Thank you!
>
> I'm not sure how you're planning to merge this, so please let me know if
> you want me to queue any of the arm64 bits.
Ah, good question. Since this part depends on the first 3 patches and
Steve's tracing tree, these should go through the tracing tree. Is that
OK for you?
(Or, wait for merging the current tracing tree and merge rest of them.
but this will take a long time.)
Thank you,
--
Masami Hiramatsu <[email protected]>
On Thu, 21 Oct 2021 23:26:30 +0900
Masami Hiramatsu <[email protected]> wrote:
> >
> > I'm not sure how you're planning to merge this, so please let me know if
> > you want me to queue any of the arm64 bits.
>
> Ah, good question. Since this part depends on the first 3 patches and
> Steve's tracing tree, these should go through the tracing tree. Is that
> OK for you?
>
I'm OK with merging this.
> (Or, wait for merging the current tracing tree and merge rest of them.
> but this will take a long time.)
And my linux-next is behind because my tests triggered a bug on one of my
arcane configs, and I'm still debugging it. :-p
-- Steve
On Thu, Oct 21, 2021 at 10:49:02AM -0400, Steven Rostedt wrote:
> On Thu, 21 Oct 2021 23:26:30 +0900
> Masami Hiramatsu <[email protected]> wrote:
>
> > >
> > > I'm not sure how you're planning to merge this, so please let me know if
> > > you want me to queue any of the arm64 bits.
> >
> > Ah, good question. Since this part depends on the first 3 patches and
> > Steve's tracing tree, these should go through the tracing tree. Is that
> > OK for you?
> >
>
> I'm OK with merging this.
Ok, cool. I've acked the arm64 bits so I'll leave it in your hands!
> > (Or, wait for merging the current tracing tree and merge rest of them.
> > but this will take a long time.)
>
> And my linux-next is behind because my tests triggered a bug on one of my
> arcane configs, and I'm still debugging it. :-p
Happy debugging :)
Will
On Thu, 21 Oct 2021 17:52:42 +0100
Will Deacon <[email protected]> wrote:
> > I'm OK with merging this.
>
> Ok, cool. I've acked the arm64 bits so I'll leave it in your hands!
Thanks! I'll pull them in.
>
> > > (Or, wait for merging the current tracing tree and merge rest of them.
> > > but this will take a long time.)
> >
> > And my linux-next is behind because my tests triggered a bug on one of my
> > arcane configs, and I'm still debugging it. :-p
>
> Happy debugging :)
Found the bug. I'll restart my tests (takes around 13 hours more or less to
complete) and when/if they succeed, I'll push it for inclusion in
linux-next.
-- Steve
On Thu, 21 Oct 2021 12:59:43 -0400
Steven Rostedt <[email protected]> wrote:
> > Happy debugging :)
>
> Found the bug. I'll restart my tests (takes around 13 hours more or less to
> complete) and when/if they succeed, I'll push it for inclusion in
> linux-next.
And of course, more bugs appear. Nothing (yet) to do with this patch
series, but as I started running tests I haven't run yet, it's triggering
bugs in other places that I need to go sort out :-p
-- Steve
On Thu, 21 Oct 2021 09:54:32 +0900
Masami Hiramatsu <[email protected]> wrote:
> Add a test case for stacktrace from kretprobe handler and
> nested kretprobe handlers.
>
> This test checks both of stack trace inside kretprobe handler
> and stack trace from pt_regs. Those stack trace must include
> actual function return address instead of kretprobe trampoline.
> The nested kretprobe stacktrace test checks whether the unwinder
> can correctly unwind the call frame on the stack which has been
> modified by the kretprobe.
>
> Since the stacktrace on kretprobe is correctly fixed only on x86,
> this introduces a meta kconfig ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
> which tells user that the stacktrace on kretprobe is correct or not.
>
> The test results will be shown like below;
>
> TAP version 14
> 1..1
> # Subtest: kprobes_test
> 1..6
> ok 1 - test_kprobe
> ok 2 - test_kprobes
> ok 3 - test_kretprobe
> ok 4 - test_kretprobes
> ok 5 - test_stacktrace_on_kretprobe
> ok 6 - test_stacktrace_on_nested_kretprobe
> # kprobes_test: pass:6 fail:0 skip:0 total:6
> # Totals: pass:6 fail:0 skip:0 total:6
> ok 1 - kprobes_test
So my allmodconfig test failed on this:
ERROR: modpost: "stack_trace_save_regs" [kernel/test_kprobes.ko] undefined!
> + /*
> + * Test stacktrace from pt_regs at the return address. Thus the stack
> + * trace must start from the target return address.
> + */
> + ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
> + KUNIT_EXPECT_NE(current_test, ret, 0);
> + KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
> +
> + return 0;
> +}
It appears that that "stack_trace_save_regs" is not exported. And this code
can be compiled as a module.
I'm going to continue testing my code, as I have over 40 patches that need
to go into next. I'll just rebase removing this commit only (hopefully
nothing else breaks), and if everything then passes, I'll push to next.
-- Steve
On Fri, 22 Oct 2021 12:15:37 -0400
Steven Rostedt <[email protected]> wrote:
> I'm going to continue testing my code, as I have over 40 patches that need
> to go into next. I'll just rebase removing this commit only (hopefully
> nothing else breaks), and if everything then passes, I'll push to next.
My tests are now back at the allmodconfig (did most the tests, but not all,
to save time), hopefully it will pass this time ;-)
What I plan on posting soon is located here (if you want to see them).
git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace.git
branch: ftrace/core
-- Steve
On Fri, 22 Oct 2021 12:15:37 -0400
Steven Rostedt <[email protected]> wrote:
> On Thu, 21 Oct 2021 09:54:32 +0900
> Masami Hiramatsu <[email protected]> wrote:
>
> > Add a test case for stacktrace from kretprobe handler and
> > nested kretprobe handlers.
> >
> > This test checks both of stack trace inside kretprobe handler
> > and stack trace from pt_regs. Those stack trace must include
> > actual function return address instead of kretprobe trampoline.
> > The nested kretprobe stacktrace test checks whether the unwinder
> > can correctly unwind the call frame on the stack which has been
> > modified by the kretprobe.
> >
> > Since the stacktrace on kretprobe is correctly fixed only on x86,
> > this introduces a meta kconfig ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
> > which tells user that the stacktrace on kretprobe is correct or not.
> >
> > The test results will be shown like below;
> >
> > TAP version 14
> > 1..1
> > # Subtest: kprobes_test
> > 1..6
> > ok 1 - test_kprobe
> > ok 2 - test_kprobes
> > ok 3 - test_kretprobe
> > ok 4 - test_kretprobes
> > ok 5 - test_stacktrace_on_kretprobe
> > ok 6 - test_stacktrace_on_nested_kretprobe
> > # kprobes_test: pass:6 fail:0 skip:0 total:6
> > # Totals: pass:6 fail:0 skip:0 total:6
> > ok 1 - kprobes_test
>
> So my allmodconfig test failed on this:
>
> ERROR: modpost: "stack_trace_save_regs" [kernel/test_kprobes.ko] undefined!
Oops.
> > + /*
> > + * Test stacktrace from pt_regs at the return address. Thus the stack
> > + * trace must start from the target return address.
> > + */
> > + ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
> > + KUNIT_EXPECT_NE(current_test, ret, 0);
> > + KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
> > +
> > + return 0;
> > +}
>
> It appears that that "stack_trace_save_regs" is not exported. And this code
> can be compiled as a module.
Yes, if the selftest is compiled as a module, it has to remove the
stack_trace_save_regs().
>
> I'm going to continue testing my code, as I have over 40 patches that need
> to go into next. I'll just rebase removing this commit only (hopefully
> nothing else breaks), and if everything then passes, I'll push to next.
OK, let me fix that.
Thank you,
>
> -- Steve
--
Masami Hiramatsu <[email protected]>
On Thu, Oct 21, 2021 at 2:55 AM Masami Hiramatsu <[email protected]> wrote:
>
> Currently kretprobe on ARM just fills r0-r11 of pt_regs, but
> that is not enough for the stacktrace. Moreover, from the user
> kretprobe handler, stacktrace needs a frame pointer on the
> __kretprobe_trampoline.
>
> This adds a frame pointer on __kretprobe_trampoline for both gcc
> and clang case. Those have different frame pointer so we need
> different but similar stack on pt_regs.
>
> Gcc makes the frame pointer (fp) to point the 'pc' address of
> the {fp, ip (=sp), lr, pc}, this means {r11, r13, r14, r15}.
> Thus if we save the r11 (fp) on pt_regs->r12, we can make this
> set on the end of pt_regs.
>
> On the other hand, Clang makes the frame pointer to point the
> 'fp' address of {fp, lr} on stack. Since the next to the
> pt_regs->lr is pt_regs->sp, I reused the pair of pt_regs->fp
> and pt_regs->ip.
> So this stores the 'lr' on pt_regs->ip and make the fp to point
> pt_regs->fp.
>
> For both cases, saves __kretprobe_trampoline address to
> pt_regs->lr, so that the stack tracer can identify this frame
> pointer has been made by the __kretprobe_trampoline.
>
> Note that if the CONFIG_FRAME_POINTER is not set, this keeps
> fp as is.
>
> Signed-off-by: Masami Hiramatsu <[email protected]>
> Reviewed-by: Nick Desaulniers <[email protected]>
This causes a regression that I see in randconfig builds with gcc-11:
/tmp/ccovvQNw.s: Assembler messages:
/tmp/ccovvQNw.s:32: Error: invalid literal constant: pool needs to be closer
make[5]: *** [/git/arm-soc/scripts/Makefile.build:288:
arch/arm/probes/kprobes/core.o] Error 1
I have two randconfigs that reproduce it locally, here is a .config
[1] and assembly
output[2] for reference. I have not done any further analysis, but
maybe you have
an idea.
Arnd
[1] https://pastebin.com/y4rkH8qX
[2] https://pastebin.com/9mEVU8Rd
On Fri, 3 Dec 2021 at 21:38, Arnd Bergmann <[email protected]> wrote:
>
> On Thu, Oct 21, 2021 at 2:55 AM Masami Hiramatsu <[email protected]> wrote:
> >
> > Currently kretprobe on ARM just fills r0-r11 of pt_regs, but
> > that is not enough for the stacktrace. Moreover, from the user
> > kretprobe handler, stacktrace needs a frame pointer on the
> > __kretprobe_trampoline.
> >
> > This adds a frame pointer on __kretprobe_trampoline for both gcc
> > and clang case. Those have different frame pointer so we need
> > different but similar stack on pt_regs.
> >
> > Gcc makes the frame pointer (fp) to point the 'pc' address of
> > the {fp, ip (=sp), lr, pc}, this means {r11, r13, r14, r15}.
> > Thus if we save the r11 (fp) on pt_regs->r12, we can make this
> > set on the end of pt_regs.
> >
> > On the other hand, Clang makes the frame pointer to point the
> > 'fp' address of {fp, lr} on stack. Since the next to the
> > pt_regs->lr is pt_regs->sp, I reused the pair of pt_regs->fp
> > and pt_regs->ip.
> > So this stores the 'lr' on pt_regs->ip and make the fp to point
> > pt_regs->fp.
> >
> > For both cases, saves __kretprobe_trampoline address to
> > pt_regs->lr, so that the stack tracer can identify this frame
> > pointer has been made by the __kretprobe_trampoline.
> >
> > Note that if the CONFIG_FRAME_POINTER is not set, this keeps
> > fp as is.
> >
> > Signed-off-by: Masami Hiramatsu <[email protected]>
> > Reviewed-by: Nick Desaulniers <[email protected]>
>
> This causes a regression that I see in randconfig builds with gcc-11:
>
> /tmp/ccovvQNw.s: Assembler messages:
> /tmp/ccovvQNw.s:32: Error: invalid literal constant: pool needs to be closer
> make[5]: *** [/git/arm-soc/scripts/Makefile.build:288:
> arch/arm/probes/kprobes/core.o] Error 1
>
> I have two randconfigs that reproduce it locally, here is a .config
> [1] and assembly
> output[2] for reference. I have not done any further analysis, but
> maybe you have
> an idea.
Does this help?
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index 9090c3a74dcc..88999ed2cfc4 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -408,6 +408,7 @@ void __naked __kprobes __kretprobe_trampoline(void)
#else
"mov pc, lr \n\t"
#endif
+ ".ltorg \n\t"
: : : "memory");
}
On Sat, Dec 4, 2021 at 9:45 AM Ard Biesheuvel <[email protected]> wrote:
>
> Does this help?
Yes, this fixes it, thanks for the quick help!
Arnd
On Sat, 4 Dec 2021 13:08:46 +0100
Arnd Bergmann <[email protected]> wrote:
> On Sat, Dec 4, 2021 at 9:45 AM Ard Biesheuvel <[email protected]> wrote:
> >
> > Does this help?
>
> Yes, this fixes it, thanks for the quick help!
Thanks Ard and Arnd!
BTW, would you know what kconfig item warns this issue, or is it only
with gcc-11?
I would like to build the same environment.
Thank you,
>
> Arnd
--
Masami Hiramatsu <[email protected]>