2022-12-08 14:05:18

by Tiezhu Yang

[permalink] [raw]
Subject: [PATCH v9 0/4] Add kprobe and kretprobe support for LoongArch

v9:
-- Rename sign_extended() to sign_extend()
-- Modify kprobe_fault_handler() to handle all of kprobe_status

v8:
-- Put "regs->csr_prmd &= ~CSR_PRMD_PIE;" ahead to save one line
-- Add code comment of preempt_disable()
-- Put kprobe_page_fault() in __do_page_fault()
-- Modify the check condition of break insn in kprobe_breakpoint_handler()

v7:
-- Remove stop_machine_cpuslocked() related code

v6:
-- Add a new patch to redefine larch_insn_patch_text() with
stop_machine_cpuslocked()
-- Modify kprobe_breakpoint_handler() to consider the original
insn is break and return the correct value
-- Modify do_bp() to refresh bcode when original insn is break

v5:
-- Rebase on the latest code
-- Use stop_machine_cpuslocked() to modify insn to avoid CPU race

v4:
-- Remove kprobe_exceptions_notify() in kprobes.c
-- Call kprobe_breakpoint_handler() and kprobe_singlestep_handler()
in do_bp()

v3:
-- Rebase on the latest code
-- Check the alignment of PC in simu_branch() and simu_pc()
-- Add ibar in flush_insn_slot()
-- Rename kprobe_{pre,post}_handler() to {post_}kprobe_handler
-- Add preempt_disable() and preempt_enable_no_resched()
-- Remove r0 save/restore and do some minor changes
in kprobes_trampoline.S
-- Do not enable CONFIG_KPROBES by default

v2:
-- Split simu_branch() and simu_pc() into a single patch
-- Call kprobe_page_fault() in do_page_fault()
-- Add kprobes_trampoline.S for kretprobe

Tiezhu Yang (4):
LoongArch: Simulate branch and PC instructions
LoongArch: Add kprobe support
LoongArch: Add kretprobe support
samples/kprobes: Add LoongArch support

arch/loongarch/Kconfig | 2 +
arch/loongarch/include/asm/inst.h | 27 +++
arch/loongarch/include/asm/kprobes.h | 59 +++++
arch/loongarch/include/asm/ptrace.h | 1 +
arch/loongarch/kernel/Makefile | 2 +
arch/loongarch/kernel/inst.c | 123 ++++++++++
arch/loongarch/kernel/kprobes.c | 356 +++++++++++++++++++++++++++++
arch/loongarch/kernel/kprobes_trampoline.S | 96 ++++++++
arch/loongarch/kernel/traps.c | 13 +-
arch/loongarch/mm/fault.c | 3 +
samples/kprobes/kprobe_example.c | 8 +
11 files changed, 686 insertions(+), 4 deletions(-)
create mode 100644 arch/loongarch/include/asm/kprobes.h
create mode 100644 arch/loongarch/kernel/kprobes.c
create mode 100644 arch/loongarch/kernel/kprobes_trampoline.S

--
2.1.0


2022-12-08 14:21:11

by Tiezhu Yang

[permalink] [raw]
Subject: [PATCH v9 2/4] LoongArch: Add kprobe support

Kprobes allows you to trap at almost any kernel address and
execute a callback function, this commit adds kprobe support
for LoongArch.

Signed-off-by: Tiezhu Yang <[email protected]>
---
arch/loongarch/Kconfig | 1 +
arch/loongarch/include/asm/inst.h | 15 ++
arch/loongarch/include/asm/kprobes.h | 59 +++++++
arch/loongarch/kernel/Makefile | 2 +
arch/loongarch/kernel/kprobes.c | 332 +++++++++++++++++++++++++++++++++++
arch/loongarch/kernel/traps.c | 13 +-
arch/loongarch/mm/fault.c | 3 +
7 files changed, 421 insertions(+), 4 deletions(-)
create mode 100644 arch/loongarch/include/asm/kprobes.h
create mode 100644 arch/loongarch/kernel/kprobes.c

diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 16bf1b6..f6fc156 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -102,6 +102,7 @@ config LOONGARCH
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_KPROBES
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_PCI
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 19cf692..e3f2378 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -24,6 +24,10 @@

#define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)

+enum reg0i15_op {
+ break_op = 0x54,
+};
+
enum reg0i26_op {
b_op = 0x14,
bl_op = 0x15,
@@ -180,6 +184,11 @@ enum reg3sa2_op {
alsld_op = 0x16,
};

+struct reg0i15_format {
+ unsigned int immediate : 15;
+ unsigned int opcode : 17;
+};
+
struct reg0i26_format {
unsigned int immediate_h : 10;
unsigned int immediate_l : 16;
@@ -265,6 +274,7 @@ struct reg3sa2_format {

union loongarch_instruction {
unsigned int word;
+ struct reg0i15_format reg0i15_format;
struct reg0i26_format reg0i26_format;
struct reg1i20_format reg1i20_format;
struct reg1i21_format reg1i21_format;
@@ -335,6 +345,11 @@ static inline bool is_branch_ins(union loongarch_instruction *ip)
ip->reg1i21_format.opcode <= bgeu_op;
}

+static inline bool is_break_ins(union loongarch_instruction *ip)
+{
+ return ip->reg0i15_format.opcode == break_op;
+}
+
static inline bool is_ra_save_ins(union loongarch_instruction *ip)
{
/* st.d $ra, $sp, offset */
diff --git a/arch/loongarch/include/asm/kprobes.h b/arch/loongarch/include/asm/kprobes.h
new file mode 100644
index 0000000..d3903f3
--- /dev/null
+++ b/arch/loongarch/include/asm/kprobes.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_LOONGARCH_KPROBES_H
+#define __ASM_LOONGARCH_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_KPROBES
+
+#include <asm/inst.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 2
+
+#define flush_insn_slot(p) \
+do { \
+ if (p->addr) \
+ flush_icache_range((unsigned long)p->addr, \
+ (unsigned long)p->addr + \
+ (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
+} while (0)
+
+#define kretprobe_blacklist_size 0
+
+typedef union loongarch_instruction kprobe_opcode_t;
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+ /* copy of the original instruction */
+ kprobe_opcode_t *insn;
+};
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+ unsigned long saved_irq;
+ unsigned long saved_era;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned long kprobe_status;
+ unsigned long kprobe_saved_irq;
+ unsigned long kprobe_saved_era;
+ struct prev_kprobe prev_kprobe;
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+bool kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+bool kprobe_breakpoint_handler(struct pt_regs *regs);
+bool kprobe_singlestep_handler(struct pt_regs *regs);
+
+#else /* !CONFIG_KPROBES */
+
+static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) { return 0; }
+static inline bool kprobe_singlestep_handler(struct pt_regs *regs) { return 0; }
+
+#endif /* CONFIG_KPROBES */
+#endif /* __ASM_LOONGARCH_KPROBES_H */
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index fcaa024..6fe4a4e 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -47,4 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o

obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o

+obj-$(CONFIG_KPROBES) += kprobes.o
+
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
new file mode 100644
index 0000000..aadfc9ae
--- /dev/null
+++ b/arch/loongarch/kernel/kprobes.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/preempt.h>
+#include <asm/break.h>
+
+static const union loongarch_instruction breakpoint_insn = {
+ .reg0i15_format = {
+ .opcode = break_op,
+ .immediate = BRK_KPROBE_BP,
+ }
+};
+
+static const union loongarch_instruction singlestep_insn = {
+ .reg0i15_format = {
+ .opcode = break_op,
+ .immediate = BRK_KPROBE_SSTEPBP,
+ }
+};
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static bool insns_are_not_supported(union loongarch_instruction insn)
+{
+ switch (insn.reg2i14_format.opcode) {
+ case llw_op:
+ case lld_op:
+ case scw_op:
+ case scd_op:
+ pr_notice("kprobe: ll and sc instructions are not supported\n");
+ return true;
+ }
+
+ switch (insn.reg1i21_format.opcode) {
+ case bceqz_op:
+ pr_notice("kprobe: bceqz and bcnez instructions are not supported\n");
+ return true;
+ }
+
+ return false;
+}
+NOKPROBE_SYMBOL(insns_are_not_supported);
+
+int arch_prepare_kprobe(struct kprobe *p)
+{
+ union loongarch_instruction insn;
+
+ insn = p->addr[0];
+ if (insns_are_not_supported(insn))
+ return -EINVAL;
+
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
+ return -ENOMEM;
+
+ p->ainsn.insn[0] = *p->addr;
+ p->ainsn.insn[1] = singlestep_insn;
+
+ p->opcode = *p->addr;
+
+ return 0;
+}
+NOKPROBE_SYMBOL(arch_prepare_kprobe);
+
+/* Install breakpoint in text */
+void arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = breakpoint_insn;
+ flush_insn_slot(p);
+}
+NOKPROBE_SYMBOL(arch_arm_kprobe);
+
+/* Remove breakpoint from text */
+void arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_insn_slot(p);
+}
+NOKPROBE_SYMBOL(arch_disarm_kprobe);
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+}
+NOKPROBE_SYMBOL(arch_remove_kprobe);
+
+static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.saved_irq = kcb->kprobe_saved_irq;
+ kcb->prev_kprobe.saved_era = kcb->kprobe_saved_era;
+}
+NOKPROBE_SYMBOL(save_previous_kprobe);
+
+static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_saved_irq = kcb->prev_kprobe.saved_irq;
+ kcb->kprobe_saved_era = kcb->prev_kprobe.saved_era;
+}
+NOKPROBE_SYMBOL(restore_previous_kprobe);
+
+static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_saved_irq = regs->csr_prmd & CSR_PRMD_PIE;
+ kcb->kprobe_saved_era = regs->csr_era;
+}
+NOKPROBE_SYMBOL(set_current_kprobe);
+
+static bool insns_are_not_simulated(struct kprobe *p, struct pt_regs *regs)
+{
+ if (is_branch_ins(&p->opcode)) {
+ simu_branch(regs, p->opcode);
+ return false;
+ } else if (is_pc_ins(&p->opcode)) {
+ simu_pc(regs, p->opcode);
+ return false;
+ } else {
+ return true;
+ }
+}
+NOKPROBE_SYMBOL(insns_are_not_simulated);
+
+static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, int reenter)
+{
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ regs->csr_prmd &= ~CSR_PRMD_PIE;
+
+ if (p->ainsn.insn->word == breakpoint_insn.word) {
+ regs->csr_prmd |= kcb->kprobe_saved_irq;
+ preempt_enable_no_resched();
+ return;
+ }
+
+ if (insns_are_not_simulated(p, regs)) {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ regs->csr_era = (unsigned long)&p->ainsn.insn[0];
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (p->post_handler)
+ p->post_handler(p, regs, 0);
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+}
+NOKPROBE_SYMBOL(setup_singlestep);
+
+static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Failed to recover from reentered kprobes.\n");
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ return true;
+}
+NOKPROBE_SYMBOL(reenter_kprobe);
+
+bool kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb;
+ struct kprobe *p, *cur_kprobe;
+ kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing.
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+ cur_kprobe = kprobe_running();
+
+ p = get_kprobe(addr);
+ if (p) {
+ if (cur_kprobe) {
+ if (reenter_kprobe(p, regs, kcb))
+ return true;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
+ *
+ * pre_handler can hit a breakpoint and can step thru
+ * before return.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs, kcb, 0);
+ } else {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+ }
+ return true;
+ }
+
+ if (!is_break_ins(addr)) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Return back to original instruction, and continue.
+ */
+ preempt_enable_no_resched();
+ return true;
+ }
+
+ preempt_enable_no_resched();
+ return false;
+}
+NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
+
+bool kprobe_singlestep_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return false;
+
+ /* Restore back the original saved kprobes variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+
+ /* Call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler)
+ cur->post_handler(cur, regs, 0);
+
+ regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
+ regs->csr_prmd |= kcb->kprobe_saved_irq;
+
+ reset_current_kprobe();
+out:
+ preempt_enable_no_resched();
+ return true;
+}
+NOKPROBE_SYMBOL(kprobe_singlestep_handler);
+
+bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->csr_era = (unsigned long) cur->addr;
+ BUG_ON(!instruction_pointer(regs));
+
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ } else {
+ regs->csr_prmd |= kcb->kprobe_saved_irq;
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if (fixup_exception(regs))
+ return true;
+ }
+ return false;
+}
+NOKPROBE_SYMBOL(kprobe_fault_handler);
+
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
+{
+ return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+}
+
+int __init arch_init_kprobes(void)
+{
+ return 0;
+}
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index a19bb32..4d9f775 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -448,14 +448,12 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
*/
switch (bcode) {
case BRK_KPROBE_BP:
- if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
- current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ if (kprobe_breakpoint_handler(regs))
goto out;
else
break;
case BRK_KPROBE_SSTEPBP:
- if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
- current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ if (kprobe_singlestep_handler(regs))
goto out;
else
break;
@@ -479,6 +477,13 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
break;
}

+ if (bcode == BRK_KPROBE_BP) {
+ if (__get_inst(&opcode, (u32 *)era, user))
+ goto out_sigsegv;
+
+ bcode = (opcode & 0x7fff);
+ }
+
switch (bcode) {
case BRK_BUG:
bug_handler(regs);
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index 1ccd536..449087b 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -135,6 +135,9 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
struct vm_area_struct *vma = NULL;
vm_fault_t fault;

+ if (kprobe_page_fault(regs, current->thread.trap_nr))
+ return;
+
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
--
2.1.0

2022-12-08 14:21:32

by Tiezhu Yang

[permalink] [raw]
Subject: [PATCH v9 3/4] LoongArch: Add kretprobe support

Use the generic kretprobe trampoline handler to add kretprobe
support for LoongArch.

Signed-off-by: Tiezhu Yang <[email protected]>
---
arch/loongarch/Kconfig | 1 +
arch/loongarch/kernel/Makefile | 2 +-
arch/loongarch/kernel/kprobes.c | 24 ++++++++
arch/loongarch/kernel/kprobes_trampoline.S | 96 ++++++++++++++++++++++++++++++
4 files changed, 122 insertions(+), 1 deletion(-)
create mode 100644 arch/loongarch/kernel/kprobes_trampoline.S

diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index f6fc156..12571ee 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -103,6 +103,7 @@ config LOONGARCH
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KPROBES
+ select HAVE_KRETPROBES
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_PCI
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 6fe4a4e..7ca6519 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -47,6 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o

obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o

-obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_KPROBES) += kprobes.o kprobes_trampoline.o

CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
index aadfc9ae..31dd75c 100644
--- a/arch/loongarch/kernel/kprobes.c
+++ b/arch/loongarch/kernel/kprobes.c
@@ -326,6 +326,30 @@ int __init arch_populate_kprobe_blacklist(void)
(unsigned long)__irqentry_text_end);
}

+/* Called from __kretprobe_trampoline */
+void __used *trampoline_probe_handler(struct pt_regs *regs)
+{
+ return (void *)kretprobe_trampoline_handler(regs, NULL);
+}
+NOKPROBE_SYMBOL(trampoline_probe_handler);
+
+void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->regs[1];
+ ri->fp = NULL;
+
+ /* Replace the return addr with trampoline addr */
+ regs->regs[1] = (unsigned long)&__kretprobe_trampoline;
+}
+NOKPROBE_SYMBOL(arch_prepare_kretprobe);
+
+int arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+NOKPROBE_SYMBOL(arch_trampoline_kprobe);
+
int __init arch_init_kprobes(void)
{
return 0;
diff --git a/arch/loongarch/kernel/kprobes_trampoline.S b/arch/loongarch/kernel/kprobes_trampoline.S
new file mode 100644
index 0000000..af94b0d
--- /dev/null
+++ b/arch/loongarch/kernel/kprobes_trampoline.S
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#include <linux/linkage.h>
+#include <asm/stackframe.h>
+
+ .text
+
+ .macro save_all_base_regs
+ cfi_st ra, PT_R1
+ cfi_st tp, PT_R2
+ cfi_st a0, PT_R4
+ cfi_st a1, PT_R5
+ cfi_st a2, PT_R6
+ cfi_st a3, PT_R7
+ cfi_st a4, PT_R8
+ cfi_st a5, PT_R9
+ cfi_st a6, PT_R10
+ cfi_st a7, PT_R11
+ cfi_st t0, PT_R12
+ cfi_st t1, PT_R13
+ cfi_st t2, PT_R14
+ cfi_st t3, PT_R15
+ cfi_st t4, PT_R16
+ cfi_st t5, PT_R17
+ cfi_st t6, PT_R18
+ cfi_st t7, PT_R19
+ cfi_st t8, PT_R20
+ cfi_st u0, PT_R21
+ cfi_st fp, PT_R22
+ cfi_st s0, PT_R23
+ cfi_st s1, PT_R24
+ cfi_st s2, PT_R25
+ cfi_st s3, PT_R26
+ cfi_st s4, PT_R27
+ cfi_st s5, PT_R28
+ cfi_st s6, PT_R29
+ cfi_st s7, PT_R30
+ cfi_st s8, PT_R31
+ csrrd t0, LOONGARCH_CSR_CRMD
+ andi t0, t0, 0x7 /* extract bit[1:0] PLV, bit[2] IE */
+ LONG_S t0, sp, PT_CRMD
+ .endm
+
+ .macro restore_all_base_regs
+ cfi_ld tp, PT_R2
+ cfi_ld a0, PT_R4
+ cfi_ld a1, PT_R5
+ cfi_ld a2, PT_R6
+ cfi_ld a3, PT_R7
+ cfi_ld a4, PT_R8
+ cfi_ld a5, PT_R9
+ cfi_ld a6, PT_R10
+ cfi_ld a7, PT_R11
+ cfi_ld t0, PT_R12
+ cfi_ld t1, PT_R13
+ cfi_ld t2, PT_R14
+ cfi_ld t3, PT_R15
+ cfi_ld t4, PT_R16
+ cfi_ld t5, PT_R17
+ cfi_ld t6, PT_R18
+ cfi_ld t7, PT_R19
+ cfi_ld t8, PT_R20
+ cfi_ld u0, PT_R21
+ cfi_ld fp, PT_R22
+ cfi_ld s0, PT_R23
+ cfi_ld s1, PT_R24
+ cfi_ld s2, PT_R25
+ cfi_ld s3, PT_R26
+ cfi_ld s4, PT_R27
+ cfi_ld s5, PT_R28
+ cfi_ld s6, PT_R29
+ cfi_ld s7, PT_R30
+ cfi_ld s8, PT_R31
+ LONG_L t0, sp, PT_CRMD
+ li.d t1, 0x7 /* mask bit[1:0] PLV, bit[2] IE */
+ csrxchg t0, t1, LOONGARCH_CSR_CRMD
+ .endm
+
+SYM_CODE_START(__kretprobe_trampoline)
+ addi.d sp, sp, -PT_SIZE
+ save_all_base_regs
+
+ addi.d t0, sp, PT_SIZE
+ LONG_S t0, sp, PT_R3
+
+ move a0, sp /* pt_regs */
+
+ bl trampoline_probe_handler
+
+ /* use the result as the return-address */
+ move ra, a0
+
+ restore_all_base_regs
+ addi.d sp, sp, PT_SIZE
+
+ jr ra
+SYM_CODE_END(__kretprobe_trampoline)
--
2.1.0

2022-12-08 14:37:35

by Tiezhu Yang

[permalink] [raw]
Subject: [PATCH v9 1/4] LoongArch: Simulate branch and PC instructions

According to LoongArch Reference Manual, simulate branch and
PC instructions, this is preparation for later patch.

Link: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#branch-instructions
Link: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_pcaddi_pcaddu121_pcaddu18l_pcalau12i

Co-developed-by: Jinyang He <[email protected]>
Signed-off-by: Jinyang He <[email protected]>
Signed-off-by: Tiezhu Yang <[email protected]>
---
arch/loongarch/include/asm/inst.h | 12 ++++
arch/loongarch/include/asm/ptrace.h | 1 +
arch/loongarch/kernel/inst.c | 123 ++++++++++++++++++++++++++++++++++++
3 files changed, 136 insertions(+)

diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 6cd994d..19cf692 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -7,6 +7,7 @@

#include <linux/types.h>
#include <asm/asm.h>
+#include <asm/ptrace.h>

#define INSN_NOP 0x03400000
#define INSN_BREAK 0x002a0000
@@ -32,6 +33,7 @@ enum reg1i20_op {
lu12iw_op = 0x0a,
lu32id_op = 0x0b,
pcaddi_op = 0x0c,
+ pcalau12i_op = 0x0d,
pcaddu12i_op = 0x0e,
pcaddu18i_op = 0x0f,
};
@@ -366,6 +368,16 @@ u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
+void simu_branch(struct pt_regs *regs, union loongarch_instruction insn);
+void simu_pc(struct pt_regs *regs, union loongarch_instruction insn);
+
+static inline unsigned long sign_extend(unsigned long val, unsigned int idx)
+{
+ if (val & (1UL << idx))
+ return ~((1UL << (idx + 1)) - 1) | val;
+ else
+ return ((1UL << (idx + 1)) - 1) & val;
+}

static inline bool signed_imm_check(long val, unsigned int bit)
{
diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
index 59c4608..58596c4 100644
--- a/arch/loongarch/include/asm/ptrace.h
+++ b/arch/loongarch/include/asm/ptrace.h
@@ -6,6 +6,7 @@
#define _ASM_PTRACE_H

#include <asm/page.h>
+#include <asm/irqflags.h>
#include <asm/thread_info.h>
#include <uapi/asm/ptrace.h>

diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
index 512579d..af48faf 100644
--- a/arch/loongarch/kernel/inst.c
+++ b/arch/loongarch/kernel/inst.c
@@ -165,3 +165,126 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned l

return insn.word;
}
+
+void simu_branch(struct pt_regs *regs, union loongarch_instruction insn)
+{
+ unsigned int imm, imm_l, imm_h, rd, rj;
+ unsigned long pc = regs->csr_era;
+
+ if (pc & 3) {
+ pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+ return;
+ }
+
+ imm_l = insn.reg0i26_format.immediate_l;
+ imm_h = insn.reg0i26_format.immediate_h;
+ switch (insn.reg0i26_format.opcode) {
+ case b_op:
+ regs->csr_era = pc + sign_extend((imm_h << 16 | imm_l) << 2, 27);
+ return;
+ case bl_op:
+ regs->csr_era = pc + sign_extend((imm_h << 16 | imm_l) << 2, 27);
+ regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
+ return;
+ }
+
+ imm_l = insn.reg1i21_format.immediate_l;
+ imm_h = insn.reg1i21_format.immediate_h;
+ rj = insn.reg1i21_format.rj;
+ switch (insn.reg1i21_format.opcode) {
+ case beqz_op:
+ if (regs->regs[rj] == 0)
+ regs->csr_era = pc + sign_extend((imm_h << 16 | imm_l) << 2, 22);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ return;
+ case bnez_op:
+ if (regs->regs[rj] != 0)
+ regs->csr_era = pc + sign_extend((imm_h << 16 | imm_l) << 2, 22);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ return;
+ }
+
+ imm = insn.reg2i16_format.immediate;
+ rj = insn.reg2i16_format.rj;
+ rd = insn.reg2i16_format.rd;
+ switch (insn.reg2i16_format.opcode) {
+ case beq_op:
+ if (regs->regs[rj] == regs->regs[rd])
+ regs->csr_era = pc + sign_extend(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bne_op:
+ if (regs->regs[rj] != regs->regs[rd])
+ regs->csr_era = pc + sign_extend(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case blt_op:
+ if ((long)regs->regs[rj] < (long)regs->regs[rd])
+ regs->csr_era = pc + sign_extend(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bge_op:
+ if ((long)regs->regs[rj] >= (long)regs->regs[rd])
+ regs->csr_era = pc + sign_extend(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bltu_op:
+ if (regs->regs[rj] < regs->regs[rd])
+ regs->csr_era = pc + sign_extend(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bgeu_op:
+ if (regs->regs[rj] >= regs->regs[rd])
+ regs->csr_era = pc + sign_extend(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case jirl_op:
+ regs->csr_era = regs->regs[rj] + sign_extend(imm << 2, 17);
+ regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
+ break;
+ default:
+ pr_info("%s: unknown opcode\n", __func__);
+ return;
+ }
+}
+
+void simu_pc(struct pt_regs *regs, union loongarch_instruction insn)
+{
+ unsigned long pc = regs->csr_era;
+ unsigned int rd = insn.reg1i20_format.rd;
+ unsigned int imm = insn.reg1i20_format.immediate;
+
+ if (pc & 3) {
+ pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+ return;
+ }
+
+ switch (insn.reg1i20_format.opcode) {
+ case pcaddi_op:
+ regs->regs[rd] = pc + sign_extend(imm << 2, 21);
+ break;
+ case pcaddu12i_op:
+ regs->regs[rd] = pc + sign_extend(imm << 12, 31);
+ break;
+ case pcaddu18i_op:
+ regs->regs[rd] = pc + sign_extend(imm << 18, 37);
+ break;
+ case pcalau12i_op:
+ regs->regs[rd] = pc + sign_extend(imm << 12, 31);
+ regs->regs[rd] &= ~((1 << 12) - 1);
+ break;
+ default:
+ pr_info("%s: unknown opcode\n", __func__);
+ return;
+ }
+
+ regs->csr_era += LOONGARCH_INSN_SIZE;
+}
--
2.1.0

2022-12-08 14:41:38

by Tiezhu Yang

[permalink] [raw]
Subject: [PATCH v9 4/4] samples/kprobes: Add LoongArch support

Add LoongArch specific info in handler_pre() and handler_post().

Signed-off-by: Tiezhu Yang <[email protected]>
---
samples/kprobes/kprobe_example.c | 8 ++++++++
1 file changed, 8 insertions(+)

diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index fd346f5..ef44c61 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -55,6 +55,10 @@ static int __kprobes handler_pre(struct kprobe *p, struct pt_regs *regs)
pr_info("<%s> p->addr, 0x%p, ip = 0x%lx, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->psw.addr, regs->flags);
#endif
+#ifdef CONFIG_LOONGARCH
+ pr_info("<%s> p->addr = 0x%p, era = 0x%lx, estat = 0x%lx\n",
+ p->symbol_name, p->addr, regs->csr_era, regs->csr_estat);
+#endif

/* A dump_stack() here will give a stack backtrace */
return 0;
@@ -92,6 +96,10 @@ static void __kprobes handler_post(struct kprobe *p, struct pt_regs *regs,
pr_info("<%s> p->addr, 0x%p, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->flags);
#endif
+#ifdef CONFIG_LOONGARCH
+ pr_info("<%s> p->addr = 0x%p, estat = 0x%lx\n",
+ p->symbol_name, p->addr, regs->csr_estat);
+#endif
}

static int __init kprobe_init(void)
--
2.1.0

2022-12-09 02:21:41

by Huacai Chen

[permalink] [raw]
Subject: Re: [PATCH v9 1/4] LoongArch: Simulate branch and PC instructions

Hi, Tiezhu,

On Thu, Dec 8, 2022 at 9:41 PM Tiezhu Yang <[email protected]> wrote:
>
> According to LoongArch Reference Manual, simulate branch and
> PC instructions, this is preparation for later patch.
>
> Link: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#branch-instructions
> Link: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_pcaddi_pcaddu121_pcaddu18l_pcalau12i
>
> Co-developed-by: Jinyang He <[email protected]>
> Signed-off-by: Jinyang He <[email protected]>
> Signed-off-by: Tiezhu Yang <[email protected]>
> ---
> arch/loongarch/include/asm/inst.h | 12 ++++
> arch/loongarch/include/asm/ptrace.h | 1 +
> arch/loongarch/kernel/inst.c | 123 ++++++++++++++++++++++++++++++++++++
> 3 files changed, 136 insertions(+)
>
> diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
> index 6cd994d..19cf692 100644
> --- a/arch/loongarch/include/asm/inst.h
> +++ b/arch/loongarch/include/asm/inst.h
> @@ -7,6 +7,7 @@
>
> #include <linux/types.h>
> #include <asm/asm.h>
> +#include <asm/ptrace.h>
>
> #define INSN_NOP 0x03400000
> #define INSN_BREAK 0x002a0000
> @@ -32,6 +33,7 @@ enum reg1i20_op {
> lu12iw_op = 0x0a,
> lu32id_op = 0x0b,
> pcaddi_op = 0x0c,
> + pcalau12i_op = 0x0d,
> pcaddu12i_op = 0x0e,
> pcaddu18i_op = 0x0f,
> };
> @@ -366,6 +368,16 @@ u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm);
> u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
> u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
> u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
> +void simu_branch(struct pt_regs *regs, union loongarch_instruction insn);
> +void simu_pc(struct pt_regs *regs, union loongarch_instruction insn);
> +
> +static inline unsigned long sign_extend(unsigned long val, unsigned int idx)
> +{
> + if (val & (1UL << idx))
> + return ~((1UL << (idx + 1)) - 1) | val;
> + else
> + return ((1UL << (idx + 1)) - 1) & val;
> +}
I have rebased https://github.com/loongson/linux/commits/loongarch-next,
this function is already there.

Huacai
>
> static inline bool signed_imm_check(long val, unsigned int bit)
> {
> diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
> index 59c4608..58596c4 100644
> --- a/arch/loongarch/include/asm/ptrace.h
> +++ b/arch/loongarch/include/asm/ptrace.h
> @@ -6,6 +6,7 @@
> #define _ASM_PTRACE_H
>
> #include <asm/page.h>
> +#include <asm/irqflags.h>
> #include <asm/thread_info.h>
> #include <uapi/asm/ptrace.h>
>
> diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
> index 512579d..af48faf 100644
> --- a/arch/loongarch/kernel/inst.c
> +++ b/arch/loongarch/kernel/inst.c
> @@ -165,3 +165,126 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned l
>
> return insn.word;
> }
> +
> +void simu_branch(struct pt_regs *regs, union loongarch_instruction insn)
> +{
> + unsigned int imm, imm_l, imm_h, rd, rj;
> + unsigned long pc = regs->csr_era;
> +
> + if (pc & 3) {
> + pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
> + return;
> + }
> +
> + imm_l = insn.reg0i26_format.immediate_l;
> + imm_h = insn.reg0i26_format.immediate_h;
> + switch (insn.reg0i26_format.opcode) {
> + case b_op:
> + regs->csr_era = pc + sign_extend((imm_h << 16 | imm_l) << 2, 27);
> + return;
> + case bl_op:
> + regs->csr_era = pc + sign_extend((imm_h << 16 | imm_l) << 2, 27);
> + regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
> + return;
> + }
> +
> + imm_l = insn.reg1i21_format.immediate_l;
> + imm_h = insn.reg1i21_format.immediate_h;
> + rj = insn.reg1i21_format.rj;
> + switch (insn.reg1i21_format.opcode) {
> + case beqz_op:
> + if (regs->regs[rj] == 0)
> + regs->csr_era = pc + sign_extend((imm_h << 16 | imm_l) << 2, 22);
> + else
> + regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> + return;
> + case bnez_op:
> + if (regs->regs[rj] != 0)
> + regs->csr_era = pc + sign_extend((imm_h << 16 | imm_l) << 2, 22);
> + else
> + regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> + return;
> + }
> +
> + imm = insn.reg2i16_format.immediate;
> + rj = insn.reg2i16_format.rj;
> + rd = insn.reg2i16_format.rd;
> + switch (insn.reg2i16_format.opcode) {
> + case beq_op:
> + if (regs->regs[rj] == regs->regs[rd])
> + regs->csr_era = pc + sign_extend(imm << 2, 17);
> + else
> + regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> + break;
> + case bne_op:
> + if (regs->regs[rj] != regs->regs[rd])
> + regs->csr_era = pc + sign_extend(imm << 2, 17);
> + else
> + regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> + break;
> + case blt_op:
> + if ((long)regs->regs[rj] < (long)regs->regs[rd])
> + regs->csr_era = pc + sign_extend(imm << 2, 17);
> + else
> + regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> + break;
> + case bge_op:
> + if ((long)regs->regs[rj] >= (long)regs->regs[rd])
> + regs->csr_era = pc + sign_extend(imm << 2, 17);
> + else
> + regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> + break;
> + case bltu_op:
> + if (regs->regs[rj] < regs->regs[rd])
> + regs->csr_era = pc + sign_extend(imm << 2, 17);
> + else
> + regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> + break;
> + case bgeu_op:
> + if (regs->regs[rj] >= regs->regs[rd])
> + regs->csr_era = pc + sign_extend(imm << 2, 17);
> + else
> + regs->csr_era = pc + LOONGARCH_INSN_SIZE;
> + break;
> + case jirl_op:
> + regs->csr_era = regs->regs[rj] + sign_extend(imm << 2, 17);
> + regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
> + break;
> + default:
> + pr_info("%s: unknown opcode\n", __func__);
> + return;
> + }
> +}
> +
> +void simu_pc(struct pt_regs *regs, union loongarch_instruction insn)
> +{
> + unsigned long pc = regs->csr_era;
> + unsigned int rd = insn.reg1i20_format.rd;
> + unsigned int imm = insn.reg1i20_format.immediate;
> +
> + if (pc & 3) {
> + pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
> + return;
> + }
> +
> + switch (insn.reg1i20_format.opcode) {
> + case pcaddi_op:
> + regs->regs[rd] = pc + sign_extend(imm << 2, 21);
> + break;
> + case pcaddu12i_op:
> + regs->regs[rd] = pc + sign_extend(imm << 12, 31);
> + break;
> + case pcaddu18i_op:
> + regs->regs[rd] = pc + sign_extend(imm << 18, 37);
> + break;
> + case pcalau12i_op:
> + regs->regs[rd] = pc + sign_extend(imm << 12, 31);
> + regs->regs[rd] &= ~((1 << 12) - 1);
> + break;
> + default:
> + pr_info("%s: unknown opcode\n", __func__);
> + return;
> + }
> +
> + regs->csr_era += LOONGARCH_INSN_SIZE;
> +}
> --
> 2.1.0
>
>

2022-12-09 03:23:45

by Huacai Chen

[permalink] [raw]
Subject: Re: [PATCH v9 2/4] LoongArch: Add kprobe support

cas

On Thu, Dec 8, 2022 at 9:41 PM Tiezhu Yang <[email protected]> wrote:
>
> Kprobes allows you to trap at almost any kernel address and
> execute a callback function, this commit adds kprobe support
> for LoongArch.
>
> Signed-off-by: Tiezhu Yang <[email protected]>
> ---
> arch/loongarch/Kconfig | 1 +
> arch/loongarch/include/asm/inst.h | 15 ++
> arch/loongarch/include/asm/kprobes.h | 59 +++++++
> arch/loongarch/kernel/Makefile | 2 +
> arch/loongarch/kernel/kprobes.c | 332 +++++++++++++++++++++++++++++++++++
> arch/loongarch/kernel/traps.c | 13 +-
> arch/loongarch/mm/fault.c | 3 +
> 7 files changed, 421 insertions(+), 4 deletions(-)
> create mode 100644 arch/loongarch/include/asm/kprobes.h
> create mode 100644 arch/loongarch/kernel/kprobes.c
>
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index 16bf1b6..f6fc156 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> @@ -102,6 +102,7 @@ config LOONGARCH
> select HAVE_IOREMAP_PROT
> select HAVE_IRQ_EXIT_ON_IRQ_STACK
> select HAVE_IRQ_TIME_ACCOUNTING
> + select HAVE_KPROBES
> select HAVE_MOD_ARCH_SPECIFIC
> select HAVE_NMI
> select HAVE_PCI
> diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
> index 19cf692..e3f2378 100644
> --- a/arch/loongarch/include/asm/inst.h
> +++ b/arch/loongarch/include/asm/inst.h
> @@ -24,6 +24,10 @@
>
> #define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)
>
> +enum reg0i15_op {
> + break_op = 0x54,
> +};
> +
> enum reg0i26_op {
> b_op = 0x14,
> bl_op = 0x15,
> @@ -180,6 +184,11 @@ enum reg3sa2_op {
> alsld_op = 0x16,
> };
>
> +struct reg0i15_format {
> + unsigned int immediate : 15;
> + unsigned int opcode : 17;
> +};
> +
> struct reg0i26_format {
> unsigned int immediate_h : 10;
> unsigned int immediate_l : 16;
> @@ -265,6 +274,7 @@ struct reg3sa2_format {
>
> union loongarch_instruction {
> unsigned int word;
> + struct reg0i15_format reg0i15_format;
> struct reg0i26_format reg0i26_format;
> struct reg1i20_format reg1i20_format;
> struct reg1i21_format reg1i21_format;
> @@ -335,6 +345,11 @@ static inline bool is_branch_ins(union loongarch_instruction *ip)
> ip->reg1i21_format.opcode <= bgeu_op;
> }
>
> +static inline bool is_break_ins(union loongarch_instruction *ip)
> +{
> + return ip->reg0i15_format.opcode == break_op;
> +}
> +
> static inline bool is_ra_save_ins(union loongarch_instruction *ip)
> {
> /* st.d $ra, $sp, offset */
> diff --git a/arch/loongarch/include/asm/kprobes.h b/arch/loongarch/include/asm/kprobes.h
> new file mode 100644
> index 0000000..d3903f3
> --- /dev/null
> +++ b/arch/loongarch/include/asm/kprobes.h
> @@ -0,0 +1,59 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +#ifndef __ASM_LOONGARCH_KPROBES_H
> +#define __ASM_LOONGARCH_KPROBES_H
> +
> +#include <asm-generic/kprobes.h>
> +#include <asm/cacheflush.h>
> +
> +#ifdef CONFIG_KPROBES
> +
> +#include <asm/inst.h>
> +
> +#define __ARCH_WANT_KPROBES_INSN_SLOT
> +#define MAX_INSN_SIZE 2
> +
> +#define flush_insn_slot(p) \
> +do { \
> + if (p->addr) \
> + flush_icache_range((unsigned long)p->addr, \
> + (unsigned long)p->addr + \
> + (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
> +} while (0)
> +
> +#define kretprobe_blacklist_size 0
> +
> +typedef union loongarch_instruction kprobe_opcode_t;
> +
> +/* Architecture specific copy of original instruction */
> +struct arch_specific_insn {
> + /* copy of the original instruction */
> + kprobe_opcode_t *insn;
> +};
> +
> +struct prev_kprobe {
> + struct kprobe *kp;
> + unsigned long status;
> + unsigned long saved_irq;
> + unsigned long saved_era;
> +};
> +
> +/* per-cpu kprobe control block */
> +struct kprobe_ctlblk {
> + unsigned long kprobe_status;
> + unsigned long kprobe_saved_irq;
> + unsigned long kprobe_saved_era;
> + struct prev_kprobe prev_kprobe;
> +};
> +
> +void arch_remove_kprobe(struct kprobe *p);
> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr);
> +bool kprobe_breakpoint_handler(struct pt_regs *regs);
> +bool kprobe_singlestep_handler(struct pt_regs *regs);
> +
> +#else /* !CONFIG_KPROBES */
> +
> +static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) { return 0; }
> +static inline bool kprobe_singlestep_handler(struct pt_regs *regs) { return 0; }
> +
> +#endif /* CONFIG_KPROBES */
> +#endif /* __ASM_LOONGARCH_KPROBES_H */
> diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
> index fcaa024..6fe4a4e 100644
> --- a/arch/loongarch/kernel/Makefile
> +++ b/arch/loongarch/kernel/Makefile
> @@ -47,4 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
>
> obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
>
> +obj-$(CONFIG_KPROBES) += kprobes.o
> +
> CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
> diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
> new file mode 100644
> index 0000000..aadfc9ae
> --- /dev/null
> +++ b/arch/loongarch/kernel/kprobes.c
> @@ -0,0 +1,332 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +#include <linux/kprobes.h>
> +#include <linux/kdebug.h>
> +#include <linux/preempt.h>
> +#include <asm/break.h>
> +
> +static const union loongarch_instruction breakpoint_insn = {
> + .reg0i15_format = {
> + .opcode = break_op,
> + .immediate = BRK_KPROBE_BP,
> + }
> +};
> +
> +static const union loongarch_instruction singlestep_insn = {
> + .reg0i15_format = {
> + .opcode = break_op,
> + .immediate = BRK_KPROBE_SSTEPBP,
> + }
> +};
> +
> +DEFINE_PER_CPU(struct kprobe *, current_kprobe);
> +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
> +
> +static bool insns_are_not_supported(union loongarch_instruction insn)
Use insns_not_supported for short.

> +{
> + switch (insn.reg2i14_format.opcode) {
> + case llw_op:
> + case lld_op:
> + case scw_op:
> + case scd_op:
> + pr_notice("kprobe: ll and sc instructions are not supported\n");
> + return true;
> + }
> +
> + switch (insn.reg1i21_format.opcode) {
> + case bceqz_op:
> + pr_notice("kprobe: bceqz and bcnez instructions are not supported\n");
> + return true;
> + }
> +
> + return false;
> +}
> +NOKPROBE_SYMBOL(insns_are_not_supported);
> +
> +int arch_prepare_kprobe(struct kprobe *p)
> +{
> + union loongarch_instruction insn;
> +
> + insn = p->addr[0];
> + if (insns_are_not_supported(insn))
> + return -EINVAL;
> +
> + p->ainsn.insn = get_insn_slot();
> + if (!p->ainsn.insn)
> + return -ENOMEM;
> +
> + p->ainsn.insn[0] = *p->addr;
> + p->ainsn.insn[1] = singlestep_insn;
> +
> + p->opcode = *p->addr;
> +
> + return 0;
> +}
> +NOKPROBE_SYMBOL(arch_prepare_kprobe);
> +
> +/* Install breakpoint in text */
> +void arch_arm_kprobe(struct kprobe *p)
> +{
> + *p->addr = breakpoint_insn;
> + flush_insn_slot(p);
> +}
> +NOKPROBE_SYMBOL(arch_arm_kprobe);
> +
> +/* Remove breakpoint from text */
> +void arch_disarm_kprobe(struct kprobe *p)
> +{
> + *p->addr = p->opcode;
> + flush_insn_slot(p);
> +}
> +NOKPROBE_SYMBOL(arch_disarm_kprobe);
> +
> +void arch_remove_kprobe(struct kprobe *p)
> +{
> + if (p->ainsn.insn) {
> + free_insn_slot(p->ainsn.insn, 0);
> + p->ainsn.insn = NULL;
> + }
> +}
> +NOKPROBE_SYMBOL(arch_remove_kprobe);
> +
> +static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
> +{
> + kcb->prev_kprobe.kp = kprobe_running();
> + kcb->prev_kprobe.status = kcb->kprobe_status;
> + kcb->prev_kprobe.saved_irq = kcb->kprobe_saved_irq;
> + kcb->prev_kprobe.saved_era = kcb->kprobe_saved_era;
> +}
> +NOKPROBE_SYMBOL(save_previous_kprobe);
> +
> +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
> +{
> + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
> + kcb->kprobe_status = kcb->prev_kprobe.status;
> + kcb->kprobe_saved_irq = kcb->prev_kprobe.saved_irq;
> + kcb->kprobe_saved_era = kcb->prev_kprobe.saved_era;
> +}
> +NOKPROBE_SYMBOL(restore_previous_kprobe);
> +
> +static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
> + struct kprobe_ctlblk *kcb)
> +{
> + __this_cpu_write(current_kprobe, p);
> + kcb->kprobe_saved_irq = regs->csr_prmd & CSR_PRMD_PIE;
> + kcb->kprobe_saved_era = regs->csr_era;
> +}
> +NOKPROBE_SYMBOL(set_current_kprobe);
> +
> +static bool insns_are_not_simulated(struct kprobe *p, struct pt_regs *regs)
Use insns_not_simulated for short.
> +{
> + if (is_branch_ins(&p->opcode)) {
> + simu_branch(regs, p->opcode);
> + return false;
> + } else if (is_pc_ins(&p->opcode)) {
> + simu_pc(regs, p->opcode);
> + return false;
> + } else {
> + return true;
> + }
> +}
> +NOKPROBE_SYMBOL(insns_are_not_simulated);
> +
> +static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
> + struct kprobe_ctlblk *kcb, int reenter)
> +{
> + if (reenter) {
> + save_previous_kprobe(kcb);
> + set_current_kprobe(p, regs, kcb);
> + kcb->kprobe_status = KPROBE_REENTER;
> + } else {
> + kcb->kprobe_status = KPROBE_HIT_SS;
> + }
> +
> + regs->csr_prmd &= ~CSR_PRMD_PIE;
> +
> + if (p->ainsn.insn->word == breakpoint_insn.word) {
> + regs->csr_prmd |= kcb->kprobe_saved_irq;
> + preempt_enable_no_resched();
> + return;
> + }
> +
> + if (insns_are_not_simulated(p, regs)) {
> + kcb->kprobe_status = KPROBE_HIT_SS;
> + regs->csr_era = (unsigned long)&p->ainsn.insn[0];
> + } else {
> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> + if (p->post_handler)
> + p->post_handler(p, regs, 0);
> + reset_current_kprobe();
> + preempt_enable_no_resched();
> + }
> +}
> +NOKPROBE_SYMBOL(setup_singlestep);
> +
> +static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
> + struct kprobe_ctlblk *kcb)
> +{
> + switch (kcb->kprobe_status) {
> + case KPROBE_HIT_SSDONE:
> + case KPROBE_HIT_ACTIVE:
> + kprobes_inc_nmissed_count(p);
> + setup_singlestep(p, regs, kcb, 1);
> + break;
> + case KPROBE_HIT_SS:
> + case KPROBE_REENTER:
> + pr_warn("Failed to recover from reentered kprobes.\n");
> + dump_kprobe(p);
> + BUG();
> + break;
> + default:
> + WARN_ON(1);
> + return false;
> + }
> +
> + return true;
> +}
> +NOKPROBE_SYMBOL(reenter_kprobe);
> +
> +bool kprobe_breakpoint_handler(struct pt_regs *regs)
> +{
> + struct kprobe_ctlblk *kcb;
> + struct kprobe *p, *cur_kprobe;
> + kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
> +
> + /*
> + * We don't want to be preempted for the entire
> + * duration of kprobe processing.
> + */
> + preempt_disable();
> + kcb = get_kprobe_ctlblk();
> + cur_kprobe = kprobe_running();
> +
> + p = get_kprobe(addr);
> + if (p) {
> + if (cur_kprobe) {
> + if (reenter_kprobe(p, regs, kcb))
> + return true;
> + } else {
> + /* Probe hit */
> + set_current_kprobe(p, regs, kcb);
> + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> +
> + /*
> + * If we have no pre-handler or it returned 0, we
> + * continue with normal processing. If we have a
> + * pre-handler and it returned non-zero, it will
> + * modify the execution path and no need to single
> + * stepping. Let's just reset current kprobe and exit.
> + *
> + * pre_handler can hit a breakpoint and can step thru
> + * before return.
> + */
> + if (!p->pre_handler || !p->pre_handler(p, regs)) {
> + setup_singlestep(p, regs, kcb, 0);
> + } else {
> + reset_current_kprobe();
> + preempt_enable_no_resched();
> + }
> + }
> + return true;
> + }
> +
> + if (!is_break_ins(addr)) {
> + /*
> + * The breakpoint instruction was removed right
> + * after we hit it. Another cpu has removed
> + * either a probepoint or a debugger breakpoint
> + * at this address. In either case, no further
> + * handling of this interrupt is appropriate.
> + * Return back to original instruction, and continue.
> + */
> + preempt_enable_no_resched();
> + return true;
> + }
> +
> + preempt_enable_no_resched();
> + return false;
> +}
> +NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
> +
> +bool kprobe_singlestep_handler(struct pt_regs *regs)
> +{
> + struct kprobe *cur = kprobe_running();
> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> +
> + if (!cur)
> + return false;
> +
> + /* Restore back the original saved kprobes variables and continue */
> + if (kcb->kprobe_status == KPROBE_REENTER) {
> + restore_previous_kprobe(kcb);
> + goto out;
> + }
> +
> + /* Call post handler */
> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> + if (cur->post_handler)
> + cur->post_handler(cur, regs, 0);
I'm not very familiar, but I think you should set KPROBE_HIT_SSDONE
only when post_handler is not NULL.

> +
> + regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
> + regs->csr_prmd |= kcb->kprobe_saved_irq;
> +
> + reset_current_kprobe();
> +out:
> + preempt_enable_no_resched();
> + return true;
> +}
> +NOKPROBE_SYMBOL(kprobe_singlestep_handler);
> +
> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
> +{
> + struct kprobe *cur = kprobe_running();
> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> +
> + switch (kcb->kprobe_status) {
> + case KPROBE_HIT_SS:
> + case KPROBE_REENTER:
> + /*
> + * We are here because the instruction being single
> + * stepped caused a page fault. We reset the current
> + * kprobe and the ip points back to the probe address
> + * and allow the page fault handler to continue as a
> + * normal page fault.
> + */
> + regs->csr_era = (unsigned long) cur->addr;
> + BUG_ON(!instruction_pointer(regs));
> +
> + if (kcb->kprobe_status == KPROBE_REENTER) {
> + restore_previous_kprobe(kcb);
> + } else {
> + regs->csr_prmd |= kcb->kprobe_saved_irq;
> + reset_current_kprobe();
> + preempt_enable_no_resched();
> + }
I think the KPROBE_REENTER case also needs to enable preemption.

> +
> + break;
> + case KPROBE_HIT_ACTIVE:
> + case KPROBE_HIT_SSDONE:
> + /*
> + * In case the user-specified fault handler returned
> + * zero, try to fix up.
> + */
> + if (fixup_exception(regs))
> + return true;
Does here need to enable preemption too?

> + }
> + return false;
> +}
> +NOKPROBE_SYMBOL(kprobe_fault_handler);
> +
> +/*
> + * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
> + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
> + */
> +int __init arch_populate_kprobe_blacklist(void)
> +{
> + return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
> + (unsigned long)__irqentry_text_end);
> +}
> +
> +int __init arch_init_kprobes(void)
> +{
> + return 0;
> +}
> diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
> index a19bb32..4d9f775 100644
> --- a/arch/loongarch/kernel/traps.c
> +++ b/arch/loongarch/kernel/traps.c
> @@ -448,14 +448,12 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
> */
> switch (bcode) {
> case BRK_KPROBE_BP:
> - if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
> - current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
> + if (kprobe_breakpoint_handler(regs))
> goto out;
> else
> break;
> case BRK_KPROBE_SSTEPBP:
> - if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
> - current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
> + if (kprobe_singlestep_handler(regs))
> goto out;
> else
> break;
> @@ -479,6 +477,13 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
> break;
> }
>
> + if (bcode == BRK_KPROBE_BP) {
> + if (__get_inst(&opcode, (u32 *)era, user))
> + goto out_sigsegv;
> +
> + bcode = (opcode & 0x7fff);
> + }
> +
Why? we already got bcode at the beginning.

Huacai
> switch (bcode) {
> case BRK_BUG:
> bug_handler(regs);
> diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
> index 1ccd536..449087b 100644
> --- a/arch/loongarch/mm/fault.c
> +++ b/arch/loongarch/mm/fault.c
> @@ -135,6 +135,9 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
> struct vm_area_struct *vma = NULL;
> vm_fault_t fault;
>
> + if (kprobe_page_fault(regs, current->thread.trap_nr))
> + return;
> +
> /*
> * We fault-in kernel-space virtual memory on-demand. The
> * 'reference' page table is init_mm.pgd.
> --
> 2.1.0
>
>

2022-12-09 04:34:44

by Youling Tang

[permalink] [raw]
Subject: Re: [PATCH v9 2/4] LoongArch: Add kprobe support

Hi, Jinyang

On 12/09/2022 11:31 AM, Jinyang He wrote:
> On 2022-12-09 10:22, Huacai Chen wrote:
>
>> cas
>>
>> On Thu, Dec 8, 2022 at 9:41 PM Tiezhu Yang <[email protected]>
>> wrote:
>>> Kprobes allows you to trap at almost any kernel address and
>>> execute a callback function, this commit adds kprobe support
>>> for LoongArch.
>>>
>>> Signed-off-by: Tiezhu Yang <[email protected]>
>>> ---
>>> arch/loongarch/Kconfig | 1 +
>>> arch/loongarch/include/asm/inst.h | 15 ++
>>> arch/loongarch/include/asm/kprobes.h | 59 +++++++
>>> arch/loongarch/kernel/Makefile | 2 +
>>> arch/loongarch/kernel/kprobes.c | 332
>>> +++++++++++++++++++++++++++++++++++
>>> arch/loongarch/kernel/traps.c | 13 +-
>>> arch/loongarch/mm/fault.c | 3 +
>>> 7 files changed, 421 insertions(+), 4 deletions(-)
>>> create mode 100644 arch/loongarch/include/asm/kprobes.h
>>> create mode 100644 arch/loongarch/kernel/kprobes.c
>>>
>>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
>>> index 16bf1b6..f6fc156 100644
>>> --- a/arch/loongarch/Kconfig
>>> +++ b/arch/loongarch/Kconfig
>>> @@ -102,6 +102,7 @@ config LOONGARCH
>>> select HAVE_IOREMAP_PROT
>>> select HAVE_IRQ_EXIT_ON_IRQ_STACK
>>> select HAVE_IRQ_TIME_ACCOUNTING
>>> + select HAVE_KPROBES
>>> select HAVE_MOD_ARCH_SPECIFIC
>>> select HAVE_NMI
>>> select HAVE_PCI
>>> diff --git a/arch/loongarch/include/asm/inst.h
>>> b/arch/loongarch/include/asm/inst.h
>>> index 19cf692..e3f2378 100644
>>> --- a/arch/loongarch/include/asm/inst.h
>>> +++ b/arch/loongarch/include/asm/inst.h
>>> @@ -24,6 +24,10 @@
>>>
>>> #define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >>
>>> ADDR_IMMSHIFT_##INSN)
>>>
>>> +enum reg0i15_op {
>>> + break_op = 0x54,
>>> +};
>>> +
>>> enum reg0i26_op {
>>> b_op = 0x14,
>>> bl_op = 0x15,
>>> @@ -180,6 +184,11 @@ enum reg3sa2_op {
>>> alsld_op = 0x16,
>>> };
>>>
>>> +struct reg0i15_format {
>>> + unsigned int immediate : 15;
>>> + unsigned int opcode : 17;
>>> +};
>>> +
>>> struct reg0i26_format {
>>> unsigned int immediate_h : 10;
>>> unsigned int immediate_l : 16;
>>> @@ -265,6 +274,7 @@ struct reg3sa2_format {
>>>
>>> union loongarch_instruction {
>>> unsigned int word;
>>> + struct reg0i15_format reg0i15_format;
>>> struct reg0i26_format reg0i26_format;
>>> struct reg1i20_format reg1i20_format;
>>> struct reg1i21_format reg1i21_format;
>>> @@ -335,6 +345,11 @@ static inline bool is_branch_ins(union
>>> loongarch_instruction *ip)
>>> ip->reg1i21_format.opcode <= bgeu_op;
>>> }
>>>
>>> +static inline bool is_break_ins(union loongarch_instruction *ip)
>>> +{
>>> + return ip->reg0i15_format.opcode == break_op;
>>> +}
>>> +
>>> static inline bool is_ra_save_ins(union loongarch_instruction *ip)
>>> {
>>> /* st.d $ra, $sp, offset */
>>> diff --git a/arch/loongarch/include/asm/kprobes.h
>>> b/arch/loongarch/include/asm/kprobes.h
>>> new file mode 100644
>>> index 0000000..d3903f3
>>> --- /dev/null
>>> +++ b/arch/loongarch/include/asm/kprobes.h
>>> @@ -0,0 +1,59 @@
>>> +/* SPDX-License-Identifier: GPL-2.0-only */
>>> +#ifndef __ASM_LOONGARCH_KPROBES_H
>>> +#define __ASM_LOONGARCH_KPROBES_H
>>> +
>>> +#include <asm-generic/kprobes.h>
>>> +#include <asm/cacheflush.h>
>>> +
>>> +#ifdef CONFIG_KPROBES
>>> +
>>> +#include <asm/inst.h>
>>> +
>>> +#define __ARCH_WANT_KPROBES_INSN_SLOT
>>> +#define MAX_INSN_SIZE 2
>>> +
>>> +#define
>>> flush_insn_slot(p) \
>>> +do
>>> { \
>>> + if
>>> (p->addr) \
>>> + flush_icache_range((unsigned
>>> long)p->addr, \
>>> + (unsigned long)p->addr
>>> + \
>>> + (MAX_INSN_SIZE *
>>> sizeof(kprobe_opcode_t))); \
>>> +} while (0)
>>> +
>>> +#define kretprobe_blacklist_size 0
>>> +
>>> +typedef union loongarch_instruction kprobe_opcode_t;
>>> +
>>> +/* Architecture specific copy of original instruction */
>>> +struct arch_specific_insn {
>>> + /* copy of the original instruction */
>>> + kprobe_opcode_t *insn;
>>> +};
>>> +
>>> +struct prev_kprobe {
>>> + struct kprobe *kp;
>>> + unsigned long status;
>>> + unsigned long saved_irq;
>>> + unsigned long saved_era;
>>> +};
>>> +
>>> +/* per-cpu kprobe control block */
>>> +struct kprobe_ctlblk {
>>> + unsigned long kprobe_status;
>>> + unsigned long kprobe_saved_irq;
>>> + unsigned long kprobe_saved_era;
>>> + struct prev_kprobe prev_kprobe;
>>> +};
>>> +
>>> +void arch_remove_kprobe(struct kprobe *p);
>>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr);
>>> +bool kprobe_breakpoint_handler(struct pt_regs *regs);
>>> +bool kprobe_singlestep_handler(struct pt_regs *regs);
>>> +
>>> +#else /* !CONFIG_KPROBES */
>>> +
>>> +static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) {
>>> return 0; }
>>> +static inline bool kprobe_singlestep_handler(struct pt_regs *regs) {
>>> return 0; }
>>> +
>>> +#endif /* CONFIG_KPROBES */
>>> +#endif /* __ASM_LOONGARCH_KPROBES_H */
>>> diff --git a/arch/loongarch/kernel/Makefile
>>> b/arch/loongarch/kernel/Makefile
>>> index fcaa024..6fe4a4e 100644
>>> --- a/arch/loongarch/kernel/Makefile
>>> +++ b/arch/loongarch/kernel/Makefile
>>> @@ -47,4 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
>>>
>>> obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
>>>
>>> +obj-$(CONFIG_KPROBES) += kprobes.o
>>> +
>>> CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
>>> diff --git a/arch/loongarch/kernel/kprobes.c
>>> b/arch/loongarch/kernel/kprobes.c
>>> new file mode 100644
>>> index 0000000..aadfc9ae
>>> --- /dev/null
>>> +++ b/arch/loongarch/kernel/kprobes.c
>>> @@ -0,0 +1,332 @@
>>> +// SPDX-License-Identifier: GPL-2.0-only
>>> +#include <linux/kprobes.h>
>>> +#include <linux/kdebug.h>
>>> +#include <linux/preempt.h>
>>> +#include <asm/break.h>
>>> +
>>> +static const union loongarch_instruction breakpoint_insn = {
>>> + .reg0i15_format = {
>>> + .opcode = break_op,
>>> + .immediate = BRK_KPROBE_BP,
>>> + }
>>> +};
>>> +
>>> +static const union loongarch_instruction singlestep_insn = {
>>> + .reg0i15_format = {
>>> + .opcode = break_op,
>>> + .immediate = BRK_KPROBE_SSTEPBP,
>>> + }
>>> +};
>>> +
>>> +DEFINE_PER_CPU(struct kprobe *, current_kprobe);
>>> +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>>> +
>>> +static bool insns_are_not_supported(union loongarch_instruction insn)
>> Use insns_not_supported for short.
>>
>>> +{
>>> + switch (insn.reg2i14_format.opcode) {
>>> + case llw_op:
>>> + case lld_op:
>>> + case scw_op:
>>> + case scd_op:
>>> + pr_notice("kprobe: ll and sc instructions are not
>>> supported\n");
>>> + return true;
>>> + }
>>> +
>>> + switch (insn.reg1i21_format.opcode) {
>>> + case bceqz_op:
>>> + pr_notice("kprobe: bceqz and bcnez instructions are
>>> not supported\n");
>>> + return true;
>>> + }
>>> +
>>> + return false;
>>> +}
>>> +NOKPROBE_SYMBOL(insns_are_not_supported);
>>> +
>>> +int arch_prepare_kprobe(struct kprobe *p)
>>> +{
>>> + union loongarch_instruction insn;
>>> +
>>> + insn = p->addr[0];
>>> + if (insns_are_not_supported(insn))
>>> + return -EINVAL;
>>> +
>>> + p->ainsn.insn = get_insn_slot();
>>> + if (!p->ainsn.insn)
>>> + return -ENOMEM;
>>> +
>>> + p->ainsn.insn[0] = *p->addr;
>>> + p->ainsn.insn[1] = singlestep_insn;
>>> +
>>> + p->opcode = *p->addr;
>>> +
>>> + return 0;
>>> +}
>>> +NOKPROBE_SYMBOL(arch_prepare_kprobe);
>>> +
>>> +/* Install breakpoint in text */
>>> +void arch_arm_kprobe(struct kprobe *p)
>>> +{
>>> + *p->addr = breakpoint_insn;
>>> + flush_insn_slot(p);
>>> +}
>>> +NOKPROBE_SYMBOL(arch_arm_kprobe);
>>> +
>>> +/* Remove breakpoint from text */
>>> +void arch_disarm_kprobe(struct kprobe *p)
>>> +{
>>> + *p->addr = p->opcode;
>>> + flush_insn_slot(p);
>>> +}
>>> +NOKPROBE_SYMBOL(arch_disarm_kprobe);
>>> +
>>> +void arch_remove_kprobe(struct kprobe *p)
>>> +{
>>> + if (p->ainsn.insn) {
>>> + free_insn_slot(p->ainsn.insn, 0);
>>> + p->ainsn.insn = NULL;
>>> + }
>>> +}
>>> +NOKPROBE_SYMBOL(arch_remove_kprobe);
>>> +
>>> +static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
>>> +{
>>> + kcb->prev_kprobe.kp = kprobe_running();
>>> + kcb->prev_kprobe.status = kcb->kprobe_status;
>>> + kcb->prev_kprobe.saved_irq = kcb->kprobe_saved_irq;
>>> + kcb->prev_kprobe.saved_era = kcb->kprobe_saved_era;
>>> +}
>>> +NOKPROBE_SYMBOL(save_previous_kprobe);
>>> +
>>> +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
>>> +{
>>> + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
>>> + kcb->kprobe_status = kcb->prev_kprobe.status;
>>> + kcb->kprobe_saved_irq = kcb->prev_kprobe.saved_irq;
>>> + kcb->kprobe_saved_era = kcb->prev_kprobe.saved_era;
>>> +}
>>> +NOKPROBE_SYMBOL(restore_previous_kprobe);
>>> +
>>> +static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
>>> + struct kprobe_ctlblk *kcb)
>>> +{
>>> + __this_cpu_write(current_kprobe, p);
>>> + kcb->kprobe_saved_irq = regs->csr_prmd & CSR_PRMD_PIE;
>>> + kcb->kprobe_saved_era = regs->csr_era;
>>> +}
>>> +NOKPROBE_SYMBOL(set_current_kprobe);
>>> +
>>> +static bool insns_are_not_simulated(struct kprobe *p, struct pt_regs
>>> *regs)
>> Use insns_not_simulated for short.
>>> +{
>>> + if (is_branch_ins(&p->opcode)) {
>>> + simu_branch(regs, p->opcode);
>>> + return false;
>>> + } else if (is_pc_ins(&p->opcode)) {
>>> + simu_pc(regs, p->opcode);
>>> + return false;
>>> + } else {
>>> + return true;
>>> + }
>>> +}
>>> +NOKPROBE_SYMBOL(insns_are_not_simulated);
>>> +
>>> +static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
>>> + struct kprobe_ctlblk *kcb, int reenter)
>>> +{
>>> + if (reenter) {
>>> + save_previous_kprobe(kcb);
>>> + set_current_kprobe(p, regs, kcb);
>>> + kcb->kprobe_status = KPROBE_REENTER;
>>> + } else {
>>> + kcb->kprobe_status = KPROBE_HIT_SS;
>>> + }
>>> +
>>> + regs->csr_prmd &= ~CSR_PRMD_PIE;
>>> +
>>> + if (p->ainsn.insn->word == breakpoint_insn.word) {
>>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
>>> + preempt_enable_no_resched();
>>> + return;
>>> + }
>>> +
>>> + if (insns_are_not_simulated(p, regs)) {
>>> + kcb->kprobe_status = KPROBE_HIT_SS;
>>> + regs->csr_era = (unsigned long)&p->ainsn.insn[0];
>>> + } else {
>>> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
>>> + if (p->post_handler)
>>> + p->post_handler(p, regs, 0);
>>> + reset_current_kprobe();
>>> + preempt_enable_no_resched();
>>> + }
>>> +}
>>> +NOKPROBE_SYMBOL(setup_singlestep);
>>> +
>>> +static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
>>> + struct kprobe_ctlblk *kcb)
>>> +{
>>> + switch (kcb->kprobe_status) {
>>> + case KPROBE_HIT_SSDONE:
>>> + case KPROBE_HIT_ACTIVE:
>>> + kprobes_inc_nmissed_count(p);
>>> + setup_singlestep(p, regs, kcb, 1);
>>> + break;
>>> + case KPROBE_HIT_SS:
>>> + case KPROBE_REENTER:
>>> + pr_warn("Failed to recover from reentered kprobes.\n");
>>> + dump_kprobe(p);
>>> + BUG();
>>> + break;
>>> + default:
>>> + WARN_ON(1);
>>> + return false;
>>> + }
>>> +
>>> + return true;
>>> +}
>>> +NOKPROBE_SYMBOL(reenter_kprobe);
>>> +
>>> +bool kprobe_breakpoint_handler(struct pt_regs *regs)
>>> +{
>>> + struct kprobe_ctlblk *kcb;
>>> + struct kprobe *p, *cur_kprobe;
>>> + kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
>>> +
>>> + /*
>>> + * We don't want to be preempted for the entire
>>> + * duration of kprobe processing.
>>> + */
>>> + preempt_disable();
>>> + kcb = get_kprobe_ctlblk();
>>> + cur_kprobe = kprobe_running();
>>> +
>>> + p = get_kprobe(addr);
>>> + if (p) {
>>> + if (cur_kprobe) {
>>> + if (reenter_kprobe(p, regs, kcb))
>>> + return true;
>>> + } else {
>>> + /* Probe hit */
>>> + set_current_kprobe(p, regs, kcb);
>>> + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
>>> +
>>> + /*
>>> + * If we have no pre-handler or it returned
>>> 0, we
>>> + * continue with normal processing. If we
>>> have a
>>> + * pre-handler and it returned non-zero, it will
>>> + * modify the execution path and no need to
>>> single
>>> + * stepping. Let's just reset current kprobe
>>> and exit.
>>> + *
>>> + * pre_handler can hit a breakpoint and can
>>> step thru
>>> + * before return.
>>> + */
>>> + if (!p->pre_handler || !p->pre_handler(p,
>>> regs)) {
>>> + setup_singlestep(p, regs, kcb, 0);
>>> + } else {
>>> + reset_current_kprobe();
>>> + preempt_enable_no_resched();
>>> + }
>>> + }
>>> + return true;
>>> + }
>>> +
>>> + if (!is_break_ins(addr)) {
>>> + /*
>>> + * The breakpoint instruction was removed right
>>> + * after we hit it. Another cpu has removed
>>> + * either a probepoint or a debugger breakpoint
>>> + * at this address. In either case, no further
>>> + * handling of this interrupt is appropriate.
>>> + * Return back to original instruction, and continue.
>>> + */
>>> + preempt_enable_no_resched();
>>> + return true;
>>> + }
>>> +
>>> + preempt_enable_no_resched();
>>> + return false;
>>> +}
>>> +NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
>>> +
>>> +bool kprobe_singlestep_handler(struct pt_regs *regs)
>>> +{
>>> + struct kprobe *cur = kprobe_running();
>>> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>>> +
>>> + if (!cur)
>>> + return false;
>>> +
>>> + /* Restore back the original saved kprobes variables and
>>> continue */
>>> + if (kcb->kprobe_status == KPROBE_REENTER) {
>>> + restore_previous_kprobe(kcb);
>>> + goto out;
>>> + }
>>> +
>>> + /* Call post handler */
>>> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
>>> + if (cur->post_handler)
>>> + cur->post_handler(cur, regs, 0);
>> I'm not very familiar, but I think you should set KPROBE_HIT_SSDONE
>> only when post_handler is not NULL.
>>
>>> +
>>> + regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
>>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
>>> +
>>> + reset_current_kprobe();
>>> +out:
>>> + preempt_enable_no_resched();
>>> + return true;
>>> +}
>>> +NOKPROBE_SYMBOL(kprobe_singlestep_handler);
>>> +
>>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
>>> +{
>>> + struct kprobe *cur = kprobe_running();
>>> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>>> +
>>> + switch (kcb->kprobe_status) {
>>> + case KPROBE_HIT_SS:
>>> + case KPROBE_REENTER:
>>> + /*
>>> + * We are here because the instruction being single
>>> + * stepped caused a page fault. We reset the current
>>> + * kprobe and the ip points back to the probe address
>>> + * and allow the page fault handler to continue as a
>>> + * normal page fault.
>>> + */
>>> + regs->csr_era = (unsigned long) cur->addr;
>>> + BUG_ON(!instruction_pointer(regs));
>>> +
>>> + if (kcb->kprobe_status == KPROBE_REENTER) {
>>> + restore_previous_kprobe(kcb);
>>> + } else {
>>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
>>> + reset_current_kprobe();
>>> + preempt_enable_no_resched();
>>> + }
>> I think the KPROBE_REENTER case also needs to enable preemption.
>>
>>> +
>>> + break;
>>> + case KPROBE_HIT_ACTIVE:
>>> + case KPROBE_HIT_SSDONE:
>>> + /*
>>> + * In case the user-specified fault handler returned
>>> + * zero, try to fix up.
>>> + */
>>> + if (fixup_exception(regs))
>>> + return true;
>> Does here need to enable preemption too?
>>
>>> + }
>>> + return false;
>>> +}
>>> +NOKPROBE_SYMBOL(kprobe_fault_handler);
>>> +
>>> +/*
>>> + * Provide a blacklist of symbols identifying ranges which cannot be
>>> kprobed.
>>> + * This blacklist is exposed to userspace via debugfs
>>> (kprobes/blacklist).
>>> + */
>>> +int __init arch_populate_kprobe_blacklist(void)
>>> +{
>>> + return kprobe_add_area_blacklist((unsigned
>>> long)__irqentry_text_start,
>>> + (unsigned
>>> long)__irqentry_text_end);
>>> +}
>>> +
>>> +int __init arch_init_kprobes(void)
>>> +{
>>> + return 0;
>>> +}
>>> diff --git a/arch/loongarch/kernel/traps.c
>>> b/arch/loongarch/kernel/traps.c
>>> index a19bb32..4d9f775 100644
>>> --- a/arch/loongarch/kernel/traps.c
>>> +++ b/arch/loongarch/kernel/traps.c
>>> @@ -448,14 +448,12 @@ asmlinkage void noinstr do_bp(struct pt_regs
>>> *regs)
>>> */
>>> switch (bcode) {
>>> case BRK_KPROBE_BP:
>>> - if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
>>> - current->thread.trap_nr, SIGTRAP) ==
>>> NOTIFY_STOP)
>>> + if (kprobe_breakpoint_handler(regs))
>>> goto out;
>>> else
>>> break;
>>> case BRK_KPROBE_SSTEPBP:
>>> - if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep",
>>> regs, bcode,
>>> - current->thread.trap_nr, SIGTRAP) ==
>>> NOTIFY_STOP)
>>> + if (kprobe_singlestep_handler(regs))
>>> goto out;
>>> else
>>> break;
>>> @@ -479,6 +477,13 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
>>> break;
>>> }
>>>
>>> + if (bcode == BRK_KPROBE_BP) {
>>> + if (__get_inst(&opcode, (u32 *)era, user))
>>> + goto out_sigsegv;
>>> +
>>> + bcode = (opcode & 0x7fff);
>>> + }
>>> +
>> Why? we already got bcode at the beginning.
>
> Hi, Huacai,
>
>
> I think it is because kprobe_breakpoint_handler() may deal with that
> case other cpu may remove its breakpoint. In that case, normally we can
> exit this break exception and go on. But if
> the orignal instruction is_break_ins(), we will trigger trap aggain. So
> it is better to avoid double traps and let it go ahead when
> kprobe_breakpoint_handler() returns.
Perhaps it is not described here as avoiding the trap twice, but rather
that if we don't re-fetch the bcode when the original instruction
`break` will go to the default branch regardless of what the bcode of
the original instruction is (since the bcode is BRK_KPROBE_BP at this
point). IMHO, so we re-get the bcode to get the original instruction
`break` is the correct bcode to ensure that it goes to the appropriate
case branch.

Thanks,
Youling.

> Differnt from many other archs, e.g. riscv, we switch bcode to decide
> which handlers should go on. Thus, we need get bcode again.
>
> Thanks,
>
> Jinyang
>
>
>>> switch (bcode) {
>>> case BRK_BUG:
>>> bug_handler(regs);
>>> diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
>>> index 1ccd536..449087b 100644
>>> --- a/arch/loongarch/mm/fault.c
>>> +++ b/arch/loongarch/mm/fault.c
>>> @@ -135,6 +135,9 @@ static void __kprobes __do_page_fault(struct
>>> pt_regs *regs,
>>> struct vm_area_struct *vma = NULL;
>>> vm_fault_t fault;
>>>
>>> + if (kprobe_page_fault(regs, current->thread.trap_nr))
>>> + return;
>>> +
>>> /*
>>> * We fault-in kernel-space virtual memory on-demand. The
>>> * 'reference' page table is init_mm.pgd.
>>> --
>>> 2.1.0
>>>
>>>
>

2022-12-09 04:34:45

by Jinyang He

[permalink] [raw]
Subject: Re: [PATCH v9 2/4] LoongArch: Add kprobe support

On 2022-12-09 10:22, Huacai Chen wrote:

> cas
>
> On Thu, Dec 8, 2022 at 9:41 PM Tiezhu Yang <[email protected]> wrote:
>> Kprobes allows you to trap at almost any kernel address and
>> execute a callback function, this commit adds kprobe support
>> for LoongArch.
>>
>> Signed-off-by: Tiezhu Yang <[email protected]>
>> ---
>> arch/loongarch/Kconfig | 1 +
>> arch/loongarch/include/asm/inst.h | 15 ++
>> arch/loongarch/include/asm/kprobes.h | 59 +++++++
>> arch/loongarch/kernel/Makefile | 2 +
>> arch/loongarch/kernel/kprobes.c | 332 +++++++++++++++++++++++++++++++++++
>> arch/loongarch/kernel/traps.c | 13 +-
>> arch/loongarch/mm/fault.c | 3 +
>> 7 files changed, 421 insertions(+), 4 deletions(-)
>> create mode 100644 arch/loongarch/include/asm/kprobes.h
>> create mode 100644 arch/loongarch/kernel/kprobes.c
>>
>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
>> index 16bf1b6..f6fc156 100644
>> --- a/arch/loongarch/Kconfig
>> +++ b/arch/loongarch/Kconfig
>> @@ -102,6 +102,7 @@ config LOONGARCH
>> select HAVE_IOREMAP_PROT
>> select HAVE_IRQ_EXIT_ON_IRQ_STACK
>> select HAVE_IRQ_TIME_ACCOUNTING
>> + select HAVE_KPROBES
>> select HAVE_MOD_ARCH_SPECIFIC
>> select HAVE_NMI
>> select HAVE_PCI
>> diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
>> index 19cf692..e3f2378 100644
>> --- a/arch/loongarch/include/asm/inst.h
>> +++ b/arch/loongarch/include/asm/inst.h
>> @@ -24,6 +24,10 @@
>>
>> #define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)
>>
>> +enum reg0i15_op {
>> + break_op = 0x54,
>> +};
>> +
>> enum reg0i26_op {
>> b_op = 0x14,
>> bl_op = 0x15,
>> @@ -180,6 +184,11 @@ enum reg3sa2_op {
>> alsld_op = 0x16,
>> };
>>
>> +struct reg0i15_format {
>> + unsigned int immediate : 15;
>> + unsigned int opcode : 17;
>> +};
>> +
>> struct reg0i26_format {
>> unsigned int immediate_h : 10;
>> unsigned int immediate_l : 16;
>> @@ -265,6 +274,7 @@ struct reg3sa2_format {
>>
>> union loongarch_instruction {
>> unsigned int word;
>> + struct reg0i15_format reg0i15_format;
>> struct reg0i26_format reg0i26_format;
>> struct reg1i20_format reg1i20_format;
>> struct reg1i21_format reg1i21_format;
>> @@ -335,6 +345,11 @@ static inline bool is_branch_ins(union loongarch_instruction *ip)
>> ip->reg1i21_format.opcode <= bgeu_op;
>> }
>>
>> +static inline bool is_break_ins(union loongarch_instruction *ip)
>> +{
>> + return ip->reg0i15_format.opcode == break_op;
>> +}
>> +
>> static inline bool is_ra_save_ins(union loongarch_instruction *ip)
>> {
>> /* st.d $ra, $sp, offset */
>> diff --git a/arch/loongarch/include/asm/kprobes.h b/arch/loongarch/include/asm/kprobes.h
>> new file mode 100644
>> index 0000000..d3903f3
>> --- /dev/null
>> +++ b/arch/loongarch/include/asm/kprobes.h
>> @@ -0,0 +1,59 @@
>> +/* SPDX-License-Identifier: GPL-2.0-only */
>> +#ifndef __ASM_LOONGARCH_KPROBES_H
>> +#define __ASM_LOONGARCH_KPROBES_H
>> +
>> +#include <asm-generic/kprobes.h>
>> +#include <asm/cacheflush.h>
>> +
>> +#ifdef CONFIG_KPROBES
>> +
>> +#include <asm/inst.h>
>> +
>> +#define __ARCH_WANT_KPROBES_INSN_SLOT
>> +#define MAX_INSN_SIZE 2
>> +
>> +#define flush_insn_slot(p) \
>> +do { \
>> + if (p->addr) \
>> + flush_icache_range((unsigned long)p->addr, \
>> + (unsigned long)p->addr + \
>> + (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
>> +} while (0)
>> +
>> +#define kretprobe_blacklist_size 0
>> +
>> +typedef union loongarch_instruction kprobe_opcode_t;
>> +
>> +/* Architecture specific copy of original instruction */
>> +struct arch_specific_insn {
>> + /* copy of the original instruction */
>> + kprobe_opcode_t *insn;
>> +};
>> +
>> +struct prev_kprobe {
>> + struct kprobe *kp;
>> + unsigned long status;
>> + unsigned long saved_irq;
>> + unsigned long saved_era;
>> +};
>> +
>> +/* per-cpu kprobe control block */
>> +struct kprobe_ctlblk {
>> + unsigned long kprobe_status;
>> + unsigned long kprobe_saved_irq;
>> + unsigned long kprobe_saved_era;
>> + struct prev_kprobe prev_kprobe;
>> +};
>> +
>> +void arch_remove_kprobe(struct kprobe *p);
>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr);
>> +bool kprobe_breakpoint_handler(struct pt_regs *regs);
>> +bool kprobe_singlestep_handler(struct pt_regs *regs);
>> +
>> +#else /* !CONFIG_KPROBES */
>> +
>> +static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) { return 0; }
>> +static inline bool kprobe_singlestep_handler(struct pt_regs *regs) { return 0; }
>> +
>> +#endif /* CONFIG_KPROBES */
>> +#endif /* __ASM_LOONGARCH_KPROBES_H */
>> diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
>> index fcaa024..6fe4a4e 100644
>> --- a/arch/loongarch/kernel/Makefile
>> +++ b/arch/loongarch/kernel/Makefile
>> @@ -47,4 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
>>
>> obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
>>
>> +obj-$(CONFIG_KPROBES) += kprobes.o
>> +
>> CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
>> diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
>> new file mode 100644
>> index 0000000..aadfc9ae
>> --- /dev/null
>> +++ b/arch/loongarch/kernel/kprobes.c
>> @@ -0,0 +1,332 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +#include <linux/kprobes.h>
>> +#include <linux/kdebug.h>
>> +#include <linux/preempt.h>
>> +#include <asm/break.h>
>> +
>> +static const union loongarch_instruction breakpoint_insn = {
>> + .reg0i15_format = {
>> + .opcode = break_op,
>> + .immediate = BRK_KPROBE_BP,
>> + }
>> +};
>> +
>> +static const union loongarch_instruction singlestep_insn = {
>> + .reg0i15_format = {
>> + .opcode = break_op,
>> + .immediate = BRK_KPROBE_SSTEPBP,
>> + }
>> +};
>> +
>> +DEFINE_PER_CPU(struct kprobe *, current_kprobe);
>> +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>> +
>> +static bool insns_are_not_supported(union loongarch_instruction insn)
> Use insns_not_supported for short.
>
>> +{
>> + switch (insn.reg2i14_format.opcode) {
>> + case llw_op:
>> + case lld_op:
>> + case scw_op:
>> + case scd_op:
>> + pr_notice("kprobe: ll and sc instructions are not supported\n");
>> + return true;
>> + }
>> +
>> + switch (insn.reg1i21_format.opcode) {
>> + case bceqz_op:
>> + pr_notice("kprobe: bceqz and bcnez instructions are not supported\n");
>> + return true;
>> + }
>> +
>> + return false;
>> +}
>> +NOKPROBE_SYMBOL(insns_are_not_supported);
>> +
>> +int arch_prepare_kprobe(struct kprobe *p)
>> +{
>> + union loongarch_instruction insn;
>> +
>> + insn = p->addr[0];
>> + if (insns_are_not_supported(insn))
>> + return -EINVAL;
>> +
>> + p->ainsn.insn = get_insn_slot();
>> + if (!p->ainsn.insn)
>> + return -ENOMEM;
>> +
>> + p->ainsn.insn[0] = *p->addr;
>> + p->ainsn.insn[1] = singlestep_insn;
>> +
>> + p->opcode = *p->addr;
>> +
>> + return 0;
>> +}
>> +NOKPROBE_SYMBOL(arch_prepare_kprobe);
>> +
>> +/* Install breakpoint in text */
>> +void arch_arm_kprobe(struct kprobe *p)
>> +{
>> + *p->addr = breakpoint_insn;
>> + flush_insn_slot(p);
>> +}
>> +NOKPROBE_SYMBOL(arch_arm_kprobe);
>> +
>> +/* Remove breakpoint from text */
>> +void arch_disarm_kprobe(struct kprobe *p)
>> +{
>> + *p->addr = p->opcode;
>> + flush_insn_slot(p);
>> +}
>> +NOKPROBE_SYMBOL(arch_disarm_kprobe);
>> +
>> +void arch_remove_kprobe(struct kprobe *p)
>> +{
>> + if (p->ainsn.insn) {
>> + free_insn_slot(p->ainsn.insn, 0);
>> + p->ainsn.insn = NULL;
>> + }
>> +}
>> +NOKPROBE_SYMBOL(arch_remove_kprobe);
>> +
>> +static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
>> +{
>> + kcb->prev_kprobe.kp = kprobe_running();
>> + kcb->prev_kprobe.status = kcb->kprobe_status;
>> + kcb->prev_kprobe.saved_irq = kcb->kprobe_saved_irq;
>> + kcb->prev_kprobe.saved_era = kcb->kprobe_saved_era;
>> +}
>> +NOKPROBE_SYMBOL(save_previous_kprobe);
>> +
>> +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
>> +{
>> + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
>> + kcb->kprobe_status = kcb->prev_kprobe.status;
>> + kcb->kprobe_saved_irq = kcb->prev_kprobe.saved_irq;
>> + kcb->kprobe_saved_era = kcb->prev_kprobe.saved_era;
>> +}
>> +NOKPROBE_SYMBOL(restore_previous_kprobe);
>> +
>> +static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
>> + struct kprobe_ctlblk *kcb)
>> +{
>> + __this_cpu_write(current_kprobe, p);
>> + kcb->kprobe_saved_irq = regs->csr_prmd & CSR_PRMD_PIE;
>> + kcb->kprobe_saved_era = regs->csr_era;
>> +}
>> +NOKPROBE_SYMBOL(set_current_kprobe);
>> +
>> +static bool insns_are_not_simulated(struct kprobe *p, struct pt_regs *regs)
> Use insns_not_simulated for short.
>> +{
>> + if (is_branch_ins(&p->opcode)) {
>> + simu_branch(regs, p->opcode);
>> + return false;
>> + } else if (is_pc_ins(&p->opcode)) {
>> + simu_pc(regs, p->opcode);
>> + return false;
>> + } else {
>> + return true;
>> + }
>> +}
>> +NOKPROBE_SYMBOL(insns_are_not_simulated);
>> +
>> +static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
>> + struct kprobe_ctlblk *kcb, int reenter)
>> +{
>> + if (reenter) {
>> + save_previous_kprobe(kcb);
>> + set_current_kprobe(p, regs, kcb);
>> + kcb->kprobe_status = KPROBE_REENTER;
>> + } else {
>> + kcb->kprobe_status = KPROBE_HIT_SS;
>> + }
>> +
>> + regs->csr_prmd &= ~CSR_PRMD_PIE;
>> +
>> + if (p->ainsn.insn->word == breakpoint_insn.word) {
>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
>> + preempt_enable_no_resched();
>> + return;
>> + }
>> +
>> + if (insns_are_not_simulated(p, regs)) {
>> + kcb->kprobe_status = KPROBE_HIT_SS;
>> + regs->csr_era = (unsigned long)&p->ainsn.insn[0];
>> + } else {
>> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
>> + if (p->post_handler)
>> + p->post_handler(p, regs, 0);
>> + reset_current_kprobe();
>> + preempt_enable_no_resched();
>> + }
>> +}
>> +NOKPROBE_SYMBOL(setup_singlestep);
>> +
>> +static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
>> + struct kprobe_ctlblk *kcb)
>> +{
>> + switch (kcb->kprobe_status) {
>> + case KPROBE_HIT_SSDONE:
>> + case KPROBE_HIT_ACTIVE:
>> + kprobes_inc_nmissed_count(p);
>> + setup_singlestep(p, regs, kcb, 1);
>> + break;
>> + case KPROBE_HIT_SS:
>> + case KPROBE_REENTER:
>> + pr_warn("Failed to recover from reentered kprobes.\n");
>> + dump_kprobe(p);
>> + BUG();
>> + break;
>> + default:
>> + WARN_ON(1);
>> + return false;
>> + }
>> +
>> + return true;
>> +}
>> +NOKPROBE_SYMBOL(reenter_kprobe);
>> +
>> +bool kprobe_breakpoint_handler(struct pt_regs *regs)
>> +{
>> + struct kprobe_ctlblk *kcb;
>> + struct kprobe *p, *cur_kprobe;
>> + kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
>> +
>> + /*
>> + * We don't want to be preempted for the entire
>> + * duration of kprobe processing.
>> + */
>> + preempt_disable();
>> + kcb = get_kprobe_ctlblk();
>> + cur_kprobe = kprobe_running();
>> +
>> + p = get_kprobe(addr);
>> + if (p) {
>> + if (cur_kprobe) {
>> + if (reenter_kprobe(p, regs, kcb))
>> + return true;
>> + } else {
>> + /* Probe hit */
>> + set_current_kprobe(p, regs, kcb);
>> + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
>> +
>> + /*
>> + * If we have no pre-handler or it returned 0, we
>> + * continue with normal processing. If we have a
>> + * pre-handler and it returned non-zero, it will
>> + * modify the execution path and no need to single
>> + * stepping. Let's just reset current kprobe and exit.
>> + *
>> + * pre_handler can hit a breakpoint and can step thru
>> + * before return.
>> + */
>> + if (!p->pre_handler || !p->pre_handler(p, regs)) {
>> + setup_singlestep(p, regs, kcb, 0);
>> + } else {
>> + reset_current_kprobe();
>> + preempt_enable_no_resched();
>> + }
>> + }
>> + return true;
>> + }
>> +
>> + if (!is_break_ins(addr)) {
>> + /*
>> + * The breakpoint instruction was removed right
>> + * after we hit it. Another cpu has removed
>> + * either a probepoint or a debugger breakpoint
>> + * at this address. In either case, no further
>> + * handling of this interrupt is appropriate.
>> + * Return back to original instruction, and continue.
>> + */
>> + preempt_enable_no_resched();
>> + return true;
>> + }
>> +
>> + preempt_enable_no_resched();
>> + return false;
>> +}
>> +NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
>> +
>> +bool kprobe_singlestep_handler(struct pt_regs *regs)
>> +{
>> + struct kprobe *cur = kprobe_running();
>> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>> +
>> + if (!cur)
>> + return false;
>> +
>> + /* Restore back the original saved kprobes variables and continue */
>> + if (kcb->kprobe_status == KPROBE_REENTER) {
>> + restore_previous_kprobe(kcb);
>> + goto out;
>> + }
>> +
>> + /* Call post handler */
>> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
>> + if (cur->post_handler)
>> + cur->post_handler(cur, regs, 0);
> I'm not very familiar, but I think you should set KPROBE_HIT_SSDONE
> only when post_handler is not NULL.
>
>> +
>> + regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
>> +
>> + reset_current_kprobe();
>> +out:
>> + preempt_enable_no_resched();
>> + return true;
>> +}
>> +NOKPROBE_SYMBOL(kprobe_singlestep_handler);
>> +
>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
>> +{
>> + struct kprobe *cur = kprobe_running();
>> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>> +
>> + switch (kcb->kprobe_status) {
>> + case KPROBE_HIT_SS:
>> + case KPROBE_REENTER:
>> + /*
>> + * We are here because the instruction being single
>> + * stepped caused a page fault. We reset the current
>> + * kprobe and the ip points back to the probe address
>> + * and allow the page fault handler to continue as a
>> + * normal page fault.
>> + */
>> + regs->csr_era = (unsigned long) cur->addr;
>> + BUG_ON(!instruction_pointer(regs));
>> +
>> + if (kcb->kprobe_status == KPROBE_REENTER) {
>> + restore_previous_kprobe(kcb);
>> + } else {
>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
>> + reset_current_kprobe();
>> + preempt_enable_no_resched();
>> + }
> I think the KPROBE_REENTER case also needs to enable preemption.
>
>> +
>> + break;
>> + case KPROBE_HIT_ACTIVE:
>> + case KPROBE_HIT_SSDONE:
>> + /*
>> + * In case the user-specified fault handler returned
>> + * zero, try to fix up.
>> + */
>> + if (fixup_exception(regs))
>> + return true;
> Does here need to enable preemption too?
>
>> + }
>> + return false;
>> +}
>> +NOKPROBE_SYMBOL(kprobe_fault_handler);
>> +
>> +/*
>> + * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
>> + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
>> + */
>> +int __init arch_populate_kprobe_blacklist(void)
>> +{
>> + return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
>> + (unsigned long)__irqentry_text_end);
>> +}
>> +
>> +int __init arch_init_kprobes(void)
>> +{
>> + return 0;
>> +}
>> diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
>> index a19bb32..4d9f775 100644
>> --- a/arch/loongarch/kernel/traps.c
>> +++ b/arch/loongarch/kernel/traps.c
>> @@ -448,14 +448,12 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
>> */
>> switch (bcode) {
>> case BRK_KPROBE_BP:
>> - if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
>> - current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
>> + if (kprobe_breakpoint_handler(regs))
>> goto out;
>> else
>> break;
>> case BRK_KPROBE_SSTEPBP:
>> - if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
>> - current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
>> + if (kprobe_singlestep_handler(regs))
>> goto out;
>> else
>> break;
>> @@ -479,6 +477,13 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
>> break;
>> }
>>
>> + if (bcode == BRK_KPROBE_BP) {
>> + if (__get_inst(&opcode, (u32 *)era, user))
>> + goto out_sigsegv;
>> +
>> + bcode = (opcode & 0x7fff);
>> + }
>> +
> Why? we already got bcode at the beginning.

Hi, Huacai,


I think it is because kprobe_breakpoint_handler() may deal with that
case other cpu may remove its breakpoint. In that case, normally we can
exit this break exception and go on. But if
the orignal instruction is_break_ins(), we will trigger trap aggain. So
it is better to avoid double traps and let it go ahead when
kprobe_breakpoint_handler() returns.
Differnt from many other archs, e.g. riscv, we switch bcode to decide
which handlers should go on. Thus, we need get bcode again.

Thanks,

Jinyang


>> switch (bcode) {
>> case BRK_BUG:
>> bug_handler(regs);
>> diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
>> index 1ccd536..449087b 100644
>> --- a/arch/loongarch/mm/fault.c
>> +++ b/arch/loongarch/mm/fault.c
>> @@ -135,6 +135,9 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
>> struct vm_area_struct *vma = NULL;
>> vm_fault_t fault;
>>
>> + if (kprobe_page_fault(regs, current->thread.trap_nr))
>> + return;
>> +
>> /*
>> * We fault-in kernel-space virtual memory on-demand. The
>> * 'reference' page table is init_mm.pgd.
>> --
>> 2.1.0
>>
>>

2022-12-09 06:15:19

by Jinyang He

[permalink] [raw]
Subject: Re: [PATCH v9 2/4] LoongArch: Add kprobe support

On 2022-12-09 11:44, Youling Tang wrote:

> Hi, Jinyang
>
> On 12/09/2022 11:31 AM, Jinyang He wrote:
>> On 2022-12-09 10:22, Huacai Chen wrote:
>>
>>>    cas
>>>
>>> On Thu, Dec 8, 2022 at 9:41 PM Tiezhu Yang <[email protected]>
>>> wrote:
>>>> Kprobes allows you to trap at almost any kernel address and
>>>> execute a callback function, this commit adds kprobe support
>>>> for LoongArch.
>>>>
>>>> Signed-off-by: Tiezhu Yang <[email protected]>
>>>> ---
>>>>   arch/loongarch/Kconfig               |   1 +
>>>>   arch/loongarch/include/asm/inst.h    |  15 ++
>>>>   arch/loongarch/include/asm/kprobes.h |  59 +++++++
>>>>   arch/loongarch/kernel/Makefile       |   2 +
>>>>   arch/loongarch/kernel/kprobes.c      | 332
>>>> +++++++++++++++++++++++++++++++++++
>>>>   arch/loongarch/kernel/traps.c        |  13 +-
>>>>   arch/loongarch/mm/fault.c            |   3 +
>>>>   7 files changed, 421 insertions(+), 4 deletions(-)
>>>>   create mode 100644 arch/loongarch/include/asm/kprobes.h
>>>>   create mode 100644 arch/loongarch/kernel/kprobes.c
>>>>
>>>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
>>>> index 16bf1b6..f6fc156 100644
>>>> --- a/arch/loongarch/Kconfig
>>>> +++ b/arch/loongarch/Kconfig
>>>> @@ -102,6 +102,7 @@ config LOONGARCH
>>>>          select HAVE_IOREMAP_PROT
>>>>          select HAVE_IRQ_EXIT_ON_IRQ_STACK
>>>>          select HAVE_IRQ_TIME_ACCOUNTING
>>>> +       select HAVE_KPROBES
>>>>          select HAVE_MOD_ARCH_SPECIFIC
>>>>          select HAVE_NMI
>>>>          select HAVE_PCI
>>>> diff --git a/arch/loongarch/include/asm/inst.h
>>>> b/arch/loongarch/include/asm/inst.h
>>>> index 19cf692..e3f2378 100644
>>>> --- a/arch/loongarch/include/asm/inst.h
>>>> +++ b/arch/loongarch/include/asm/inst.h
>>>> @@ -24,6 +24,10 @@
>>>>
>>>>   #define ADDR_IMM(addr, INSN)   ((addr & ADDR_IMMMASK_##INSN) >>
>>>> ADDR_IMMSHIFT_##INSN)
>>>>
>>>> +enum reg0i15_op {
>>>> +       break_op        = 0x54,
>>>> +};
>>>> +
>>>>   enum reg0i26_op {
>>>>          b_op            = 0x14,
>>>>          bl_op           = 0x15,
>>>> @@ -180,6 +184,11 @@ enum reg3sa2_op {
>>>>          alsld_op        = 0x16,
>>>>   };
>>>>
>>>> +struct reg0i15_format {
>>>> +       unsigned int immediate : 15;
>>>> +       unsigned int opcode : 17;
>>>> +};
>>>> +
>>>>   struct reg0i26_format {
>>>>          unsigned int immediate_h : 10;
>>>>          unsigned int immediate_l : 16;
>>>> @@ -265,6 +274,7 @@ struct reg3sa2_format {
>>>>
>>>>   union loongarch_instruction {
>>>>          unsigned int word;
>>>> +       struct reg0i15_format   reg0i15_format;
>>>>          struct reg0i26_format   reg0i26_format;
>>>>          struct reg1i20_format   reg1i20_format;
>>>>          struct reg1i21_format   reg1i21_format;
>>>> @@ -335,6 +345,11 @@ static inline bool is_branch_ins(union
>>>> loongarch_instruction *ip)
>>>>                  ip->reg1i21_format.opcode <= bgeu_op;
>>>>   }
>>>>
>>>> +static inline bool is_break_ins(union loongarch_instruction *ip)
>>>> +{
>>>> +       return ip->reg0i15_format.opcode == break_op;
>>>> +}
>>>> +
>>>>   static inline bool is_ra_save_ins(union loongarch_instruction *ip)
>>>>   {
>>>>          /* st.d $ra, $sp, offset */
>>>> diff --git a/arch/loongarch/include/asm/kprobes.h
>>>> b/arch/loongarch/include/asm/kprobes.h
>>>> new file mode 100644
>>>> index 0000000..d3903f3
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/include/asm/kprobes.h
>>>> @@ -0,0 +1,59 @@
>>>> +/* SPDX-License-Identifier: GPL-2.0-only */
>>>> +#ifndef __ASM_LOONGARCH_KPROBES_H
>>>> +#define __ASM_LOONGARCH_KPROBES_H
>>>> +
>>>> +#include <asm-generic/kprobes.h>
>>>> +#include <asm/cacheflush.h>
>>>> +
>>>> +#ifdef CONFIG_KPROBES
>>>> +
>>>> +#include <asm/inst.h>
>>>> +
>>>> +#define __ARCH_WANT_KPROBES_INSN_SLOT
>>>> +#define MAX_INSN_SIZE                  2
>>>> +
>>>> +#define
>>>> flush_insn_slot(p)                                             \
>>>> +do
>>>> {                                                                   \
>>>> +       if
>>>> (p->addr)                                                    \
>>>> +               flush_icache_range((unsigned
>>>> long)p->addr,              \
>>>> +                          (unsigned long)p->addr
>>>> +                     \
>>>> +                          (MAX_INSN_SIZE *
>>>> sizeof(kprobe_opcode_t)));  \
>>>> +} while (0)
>>>> +
>>>> +#define kretprobe_blacklist_size       0
>>>> +
>>>> +typedef union loongarch_instruction kprobe_opcode_t;
>>>> +
>>>> +/* Architecture specific copy of original instruction */
>>>> +struct arch_specific_insn {
>>>> +       /* copy of the original instruction */
>>>> +       kprobe_opcode_t *insn;
>>>> +};
>>>> +
>>>> +struct prev_kprobe {
>>>> +       struct kprobe *kp;
>>>> +       unsigned long status;
>>>> +       unsigned long saved_irq;
>>>> +       unsigned long saved_era;
>>>> +};
>>>> +
>>>> +/* per-cpu kprobe control block */
>>>> +struct kprobe_ctlblk {
>>>> +       unsigned long kprobe_status;
>>>> +       unsigned long kprobe_saved_irq;
>>>> +       unsigned long kprobe_saved_era;
>>>> +       struct prev_kprobe prev_kprobe;
>>>> +};
>>>> +
>>>> +void arch_remove_kprobe(struct kprobe *p);
>>>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr);
>>>> +bool kprobe_breakpoint_handler(struct pt_regs *regs);
>>>> +bool kprobe_singlestep_handler(struct pt_regs *regs);
>>>> +
>>>> +#else /* !CONFIG_KPROBES */
>>>> +
>>>> +static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) {
>>>> return 0; }
>>>> +static inline bool kprobe_singlestep_handler(struct pt_regs *regs) {
>>>> return 0; }
>>>> +
>>>> +#endif /* CONFIG_KPROBES */
>>>> +#endif /* __ASM_LOONGARCH_KPROBES_H */
>>>> diff --git a/arch/loongarch/kernel/Makefile
>>>> b/arch/loongarch/kernel/Makefile
>>>> index fcaa024..6fe4a4e 100644
>>>> --- a/arch/loongarch/kernel/Makefile
>>>> +++ b/arch/loongarch/kernel/Makefile
>>>> @@ -47,4 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
>>>>
>>>>   obj-$(CONFIG_PERF_EVENTS)      += perf_event.o perf_regs.o
>>>>
>>>> +obj-$(CONFIG_KPROBES)          += kprobes.o
>>>> +
>>>>   CPPFLAGS_vmlinux.lds           := $(KBUILD_CFLAGS)
>>>> diff --git a/arch/loongarch/kernel/kprobes.c
>>>> b/arch/loongarch/kernel/kprobes.c
>>>> new file mode 100644
>>>> index 0000000..aadfc9ae
>>>> --- /dev/null
>>>> +++ b/arch/loongarch/kernel/kprobes.c
>>>> @@ -0,0 +1,332 @@
>>>> +// SPDX-License-Identifier: GPL-2.0-only
>>>> +#include <linux/kprobes.h>
>>>> +#include <linux/kdebug.h>
>>>> +#include <linux/preempt.h>
>>>> +#include <asm/break.h>
>>>> +
>>>> +static const union loongarch_instruction breakpoint_insn = {
>>>> +       .reg0i15_format = {
>>>> +               .opcode = break_op,
>>>> +               .immediate = BRK_KPROBE_BP,
>>>> +       }
>>>> +};
>>>> +
>>>> +static const union loongarch_instruction singlestep_insn = {
>>>> +       .reg0i15_format = {
>>>> +               .opcode = break_op,
>>>> +               .immediate = BRK_KPROBE_SSTEPBP,
>>>> +       }
>>>> +};
>>>> +
>>>> +DEFINE_PER_CPU(struct kprobe *, current_kprobe);
>>>> +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>>>> +
>>>> +static bool insns_are_not_supported(union loongarch_instruction insn)
>>> Use  insns_not_supported for short.
>>>
>>>> +{
>>>> +       switch (insn.reg2i14_format.opcode) {
>>>> +       case llw_op:
>>>> +       case lld_op:
>>>> +       case scw_op:
>>>> +       case scd_op:
>>>> +               pr_notice("kprobe: ll and sc instructions are not
>>>> supported\n");
>>>> +               return true;
>>>> +       }
>>>> +
>>>> +       switch (insn.reg1i21_format.opcode) {
>>>> +       case bceqz_op:
>>>> +               pr_notice("kprobe: bceqz and bcnez instructions are
>>>> not supported\n");
>>>> +               return true;
>>>> +       }
>>>> +
>>>> +       return false;
>>>> +}
>>>> +NOKPROBE_SYMBOL(insns_are_not_supported);
>>>> +
>>>> +int arch_prepare_kprobe(struct kprobe *p)
>>>> +{
>>>> +       union loongarch_instruction insn;
>>>> +
>>>> +       insn = p->addr[0];
>>>> +       if (insns_are_not_supported(insn))
>>>> +               return -EINVAL;
>>>> +
>>>> +       p->ainsn.insn = get_insn_slot();
>>>> +       if (!p->ainsn.insn)
>>>> +               return -ENOMEM;
>>>> +
>>>> +       p->ainsn.insn[0] = *p->addr;
>>>> +       p->ainsn.insn[1] = singlestep_insn;
>>>> +
>>>> +       p->opcode = *p->addr;
>>>> +
>>>> +       return 0;
>>>> +}
>>>> +NOKPROBE_SYMBOL(arch_prepare_kprobe);
>>>> +
>>>> +/* Install breakpoint in text */
>>>> +void arch_arm_kprobe(struct kprobe *p)
>>>> +{
>>>> +       *p->addr = breakpoint_insn;
>>>> +       flush_insn_slot(p);
>>>> +}
>>>> +NOKPROBE_SYMBOL(arch_arm_kprobe);
>>>> +
>>>> +/* Remove breakpoint from text */
>>>> +void arch_disarm_kprobe(struct kprobe *p)
>>>> +{
>>>> +       *p->addr = p->opcode;
>>>> +       flush_insn_slot(p);
>>>> +}
>>>> +NOKPROBE_SYMBOL(arch_disarm_kprobe);
>>>> +
>>>> +void arch_remove_kprobe(struct kprobe *p)
>>>> +{
>>>> +       if (p->ainsn.insn) {
>>>> +               free_insn_slot(p->ainsn.insn, 0);
>>>> +               p->ainsn.insn = NULL;
>>>> +       }
>>>> +}
>>>> +NOKPROBE_SYMBOL(arch_remove_kprobe);
>>>> +
>>>> +static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
>>>> +{
>>>> +       kcb->prev_kprobe.kp = kprobe_running();
>>>> +       kcb->prev_kprobe.status = kcb->kprobe_status;
>>>> +       kcb->prev_kprobe.saved_irq = kcb->kprobe_saved_irq;
>>>> +       kcb->prev_kprobe.saved_era = kcb->kprobe_saved_era;
>>>> +}
>>>> +NOKPROBE_SYMBOL(save_previous_kprobe);
>>>> +
>>>> +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
>>>> +{
>>>> +       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
>>>> +       kcb->kprobe_status = kcb->prev_kprobe.status;
>>>> +       kcb->kprobe_saved_irq = kcb->prev_kprobe.saved_irq;
>>>> +       kcb->kprobe_saved_era = kcb->prev_kprobe.saved_era;
>>>> +}
>>>> +NOKPROBE_SYMBOL(restore_previous_kprobe);
>>>> +
>>>> +static void set_current_kprobe(struct kprobe *p, struct pt_regs
>>>> *regs,
>>>> +                              struct kprobe_ctlblk *kcb)
>>>> +{
>>>> +       __this_cpu_write(current_kprobe, p);
>>>> +       kcb->kprobe_saved_irq = regs->csr_prmd & CSR_PRMD_PIE;
>>>> +       kcb->kprobe_saved_era = regs->csr_era;
>>>> +}
>>>> +NOKPROBE_SYMBOL(set_current_kprobe);
>>>> +
>>>> +static bool insns_are_not_simulated(struct kprobe *p, struct pt_regs
>>>> *regs)
>>> Use  insns_not_simulated for short.
>>>> +{
>>>> +       if (is_branch_ins(&p->opcode)) {
>>>> +               simu_branch(regs, p->opcode);
>>>> +               return false;
>>>> +       } else if (is_pc_ins(&p->opcode)) {
>>>> +               simu_pc(regs, p->opcode);
>>>> +               return false;
>>>> +       } else {
>>>> +               return true;
>>>> +       }
>>>> +}
>>>> +NOKPROBE_SYMBOL(insns_are_not_simulated);
>>>> +
>>>> +static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
>>>> +                            struct kprobe_ctlblk *kcb, int reenter)
>>>> +{
>>>> +       if (reenter) {
>>>> +               save_previous_kprobe(kcb);
>>>> +               set_current_kprobe(p, regs, kcb);
>>>> +               kcb->kprobe_status = KPROBE_REENTER;
>>>> +       } else {
>>>> +               kcb->kprobe_status = KPROBE_HIT_SS;
>>>> +       }
>>>> +
>>>> +       regs->csr_prmd &= ~CSR_PRMD_PIE;
>>>> +
>>>> +       if (p->ainsn.insn->word == breakpoint_insn.word) {
>>>> +               regs->csr_prmd |= kcb->kprobe_saved_irq;
>>>> +               preempt_enable_no_resched();
>>>> +               return;
>>>> +       }
>>>> +
>>>> +       if (insns_are_not_simulated(p, regs)) {
>>>> +               kcb->kprobe_status = KPROBE_HIT_SS;
>>>> +               regs->csr_era = (unsigned long)&p->ainsn.insn[0];
>>>> +       } else {
>>>> +               kcb->kprobe_status = KPROBE_HIT_SSDONE;
>>>> +               if (p->post_handler)
>>>> +                       p->post_handler(p, regs, 0);
>>>> +               reset_current_kprobe();
>>>> +               preempt_enable_no_resched();
>>>> +       }
>>>> +}
>>>> +NOKPROBE_SYMBOL(setup_singlestep);
>>>> +
>>>> +static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
>>>> +                         struct kprobe_ctlblk *kcb)
>>>> +{
>>>> +       switch (kcb->kprobe_status) {
>>>> +       case KPROBE_HIT_SSDONE:
>>>> +       case KPROBE_HIT_ACTIVE:
>>>> +               kprobes_inc_nmissed_count(p);
>>>> +               setup_singlestep(p, regs, kcb, 1);
>>>> +               break;
>>>> +       case KPROBE_HIT_SS:
>>>> +       case KPROBE_REENTER:
>>>> +               pr_warn("Failed to recover from reentered
>>>> kprobes.\n");
>>>> +               dump_kprobe(p);
>>>> +               BUG();
>>>> +               break;
>>>> +       default:
>>>> +               WARN_ON(1);
>>>> +               return false;
>>>> +       }
>>>> +
>>>> +       return true;
>>>> +}
>>>> +NOKPROBE_SYMBOL(reenter_kprobe);
>>>> +
>>>> +bool kprobe_breakpoint_handler(struct pt_regs *regs)
>>>> +{
>>>> +       struct kprobe_ctlblk *kcb;
>>>> +       struct kprobe *p, *cur_kprobe;
>>>> +       kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
>>>> +
>>>> +       /*
>>>> +        * We don't want to be preempted for the entire
>>>> +        * duration of kprobe processing.
>>>> +        */
>>>> +       preempt_disable();
>>>> +       kcb = get_kprobe_ctlblk();
>>>> +       cur_kprobe = kprobe_running();
>>>> +
>>>> +       p = get_kprobe(addr);
>>>> +       if (p) {
>>>> +               if (cur_kprobe) {
>>>> +                       if (reenter_kprobe(p, regs, kcb))
>>>> +                               return true;
>>>> +               } else {
>>>> +                       /* Probe hit */
>>>> +                       set_current_kprobe(p, regs, kcb);
>>>> +                       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
>>>> +
>>>> +                       /*
>>>> +                        * If we have no pre-handler or it returned
>>>> 0, we
>>>> +                        * continue with normal processing. If we
>>>> have a
>>>> +                        * pre-handler and it returned non-zero, it
>>>> will
>>>> +                        * modify the execution path and no need to
>>>> single
>>>> +                        * stepping. Let's just reset current kprobe
>>>> and exit.
>>>> +                        *
>>>> +                        * pre_handler can hit a breakpoint and can
>>>> step thru
>>>> +                        * before return.
>>>> +                        */
>>>> +                       if (!p->pre_handler || !p->pre_handler(p,
>>>> regs)) {
>>>> +                               setup_singlestep(p, regs, kcb, 0);
>>>> +                       } else {
>>>> +                               reset_current_kprobe();
>>>> +                               preempt_enable_no_resched();
>>>> +                       }
>>>> +               }
>>>> +               return true;
>>>> +       }
>>>> +
>>>> +       if (!is_break_ins(addr)) {
>>>> +               /*
>>>> +                * The breakpoint instruction was removed right
>>>> +                * after we hit it.  Another cpu has removed
>>>> +                * either a probepoint or a debugger breakpoint
>>>> +                * at this address.  In either case, no further
>>>> +                * handling of this interrupt is appropriate.
>>>> +                * Return back to original instruction, and continue.
>>>> +                */
>>>> +               preempt_enable_no_resched();
>>>> +               return true;
>>>> +       }
>>>> +
>>>> +       preempt_enable_no_resched();
>>>> +       return false;
>>>> +}
>>>> +NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
>>>> +
>>>> +bool kprobe_singlestep_handler(struct pt_regs *regs)
>>>> +{
>>>> +       struct kprobe *cur = kprobe_running();
>>>> +       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>>>> +
>>>> +       if (!cur)
>>>> +               return false;
>>>> +
>>>> +       /* Restore back the original saved kprobes variables and
>>>> continue */
>>>> +       if (kcb->kprobe_status == KPROBE_REENTER) {
>>>> +               restore_previous_kprobe(kcb);
>>>> +               goto out;
>>>> +       }
>>>> +
>>>> +       /* Call post handler */
>>>> +       kcb->kprobe_status = KPROBE_HIT_SSDONE;
>>>> +       if (cur->post_handler)
>>>> +               cur->post_handler(cur, regs, 0);
>>> I'm not very familiar, but I think you should set KPROBE_HIT_SSDONE
>>> only when post_handler is not NULL.
>>>
>>>> +
>>>> +       regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
>>>> +       regs->csr_prmd |= kcb->kprobe_saved_irq;
>>>> +
>>>> +       reset_current_kprobe();
>>>> +out:
>>>> +       preempt_enable_no_resched();
>>>> +       return true;
>>>> +}
>>>> +NOKPROBE_SYMBOL(kprobe_singlestep_handler);
>>>> +
>>>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
>>>> +{
>>>> +       struct kprobe *cur = kprobe_running();
>>>> +       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>>>> +
>>>> +       switch (kcb->kprobe_status) {
>>>> +       case KPROBE_HIT_SS:
>>>> +       case KPROBE_REENTER:
>>>> +               /*
>>>> +                * We are here because the instruction being single
>>>> +                * stepped caused a page fault. We reset the current
>>>> +                * kprobe and the ip points back to the probe address
>>>> +                * and allow the page fault handler to continue as a
>>>> +                * normal page fault.
>>>> +                */
>>>> +               regs->csr_era = (unsigned long) cur->addr;
>>>> +               BUG_ON(!instruction_pointer(regs));
>>>> +
>>>> +               if (kcb->kprobe_status == KPROBE_REENTER) {
>>>> +                       restore_previous_kprobe(kcb);
>>>> +               } else {
>>>> +                       regs->csr_prmd |= kcb->kprobe_saved_irq;
>>>> +                       reset_current_kprobe();
>>>> +                       preempt_enable_no_resched();
>>>> +               }
>>> I think the KPROBE_REENTER case also needs to enable preemption.
>>>
>>>> +
>>>> +               break;
>>>> +       case KPROBE_HIT_ACTIVE:
>>>> +       case KPROBE_HIT_SSDONE:
>>>> +               /*
>>>> +                * In case the user-specified fault handler returned
>>>> +                * zero, try to fix up.
>>>> +                */
>>>> +               if (fixup_exception(regs))
>>>> +                       return true;
>>> Does here need to enable preemption too?
>>>
>>>> +       }
>>>> +       return false;
>>>> +}
>>>> +NOKPROBE_SYMBOL(kprobe_fault_handler);
>>>> +
>>>> +/*
>>>> + * Provide a blacklist of symbols identifying ranges which cannot be
>>>> kprobed.
>>>> + * This blacklist is exposed to userspace via debugfs
>>>> (kprobes/blacklist).
>>>> + */
>>>> +int __init arch_populate_kprobe_blacklist(void)
>>>> +{
>>>> +       return kprobe_add_area_blacklist((unsigned
>>>> long)__irqentry_text_start,
>>>> +                                        (unsigned
>>>> long)__irqentry_text_end);
>>>> +}
>>>> +
>>>> +int __init arch_init_kprobes(void)
>>>> +{
>>>> +       return 0;
>>>> +}
>>>> diff --git a/arch/loongarch/kernel/traps.c
>>>> b/arch/loongarch/kernel/traps.c
>>>> index a19bb32..4d9f775 100644
>>>> --- a/arch/loongarch/kernel/traps.c
>>>> +++ b/arch/loongarch/kernel/traps.c
>>>> @@ -448,14 +448,12 @@ asmlinkage void noinstr do_bp(struct pt_regs
>>>> *regs)
>>>>           */
>>>>          switch (bcode) {
>>>>          case BRK_KPROBE_BP:
>>>> -               if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
>>>> -                              current->thread.trap_nr, SIGTRAP) ==
>>>> NOTIFY_STOP)
>>>> +               if (kprobe_breakpoint_handler(regs))
>>>>                          goto out;
>>>>                  else
>>>>                          break;
>>>>          case BRK_KPROBE_SSTEPBP:
>>>> -               if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep",
>>>> regs, bcode,
>>>> -                              current->thread.trap_nr, SIGTRAP) ==
>>>> NOTIFY_STOP)
>>>> +               if (kprobe_singlestep_handler(regs))
>>>>                          goto out;
>>>>                  else
>>>>                          break;
>>>> @@ -479,6 +477,13 @@ asmlinkage void noinstr do_bp(struct pt_regs
>>>> *regs)
>>>>                          break;
>>>>          }
>>>>
>>>> +       if (bcode == BRK_KPROBE_BP) {
>>>> +               if (__get_inst(&opcode, (u32 *)era, user))
>>>> +                       goto out_sigsegv;
>>>> +
>>>> +               bcode = (opcode & 0x7fff);
>>>> +       }
>>>> +
>>> Why? we already got bcode at the beginning.
>>
>> Hi, Huacai,
>>
>>
>> I think it is because kprobe_breakpoint_handler() may deal with that
>> case other cpu may remove its breakpoint. In that case, normally we can
>> exit this break exception and go on. But if
>> the orignal instruction is_break_ins(), we will trigger trap aggain. So
>> it is better to avoid double traps and let it go ahead when
>> kprobe_breakpoint_handler() returns.
> Perhaps it is not described here as avoiding the trap twice,

That due to the implementation in the last branch, is_break_ins(), in
kprobe_breakpoint_handler(). We can indirectly return true for all
instructions to indicate we can stop kprobe now.
And then it returns the normal code flow. If the instruction at ERA is a
"break xxx", it would traps again. I call it as "trap twice" because at
here we are in the fisrt time trap.
To avoid the second trap, we add the judgment named is_break_ins() and
return false to indicate we should go another break handler. Of course,
with the instruction being restored, we should re-get the bcode.


> but rather
> that if we don't re-fetch the bcode when the original instruction
> `break` will go to the default branch regardless of what the bcode of
> the original instruction is (since the bcode is BRK_KPROBE_BP at this
> point). IMHO, so we re-get the bcode to get the original instruction
> `break` is the correct bcode to ensure that it goes to the appropriate
> case branch.
>
> Thanks,
> Youling.
>
>> Differnt from many other archs, e.g. riscv, we switch bcode to decide
>> which handlers should go on. Thus, we need get bcode again.
>>
>> Thanks,
>>
>> Jinyang
>>
>>
>>>>          switch (bcode) {
>>>>          case BRK_BUG:
>>>>                  bug_handler(regs);
>>>> diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
>>>> index 1ccd536..449087b 100644
>>>> --- a/arch/loongarch/mm/fault.c
>>>> +++ b/arch/loongarch/mm/fault.c
>>>> @@ -135,6 +135,9 @@ static void __kprobes __do_page_fault(struct
>>>> pt_regs *regs,
>>>>          struct vm_area_struct *vma = NULL;
>>>>          vm_fault_t fault;
>>>>
>>>> +       if (kprobe_page_fault(regs, current->thread.trap_nr))
>>>> +               return;
>>>> +
>>>>          /*
>>>>           * We fault-in kernel-space virtual memory on-demand. The
>>>>           * 'reference' page table is init_mm.pgd.
>>>> --
>>>> 2.1.0
>>>>
>>>>
>>

2022-12-09 06:30:56

by Youling Tang

[permalink] [raw]
Subject: Re: [PATCH v9 2/4] LoongArch: Add kprobe support



On 12/09/2022 01:53 PM, Jinyang He wrote:
> On 2022-12-09 11:44, Youling Tang wrote:
>
>> Hi, Jinyang
>>
>> On 12/09/2022 11:31 AM, Jinyang He wrote:
>>> On 2022-12-09 10:22, Huacai Chen wrote:
>>>
>>>> cas
>>>>
>>>> On Thu, Dec 8, 2022 at 9:41 PM Tiezhu Yang <[email protected]>
>>>> wrote:
>>>>> Kprobes allows you to trap at almost any kernel address and
>>>>> execute a callback function, this commit adds kprobe support
>>>>> for LoongArch.
>>>>>
>>>>> Signed-off-by: Tiezhu Yang <[email protected]>
>>>>> ---
>>>>> arch/loongarch/Kconfig | 1 +
>>>>> arch/loongarch/include/asm/inst.h | 15 ++
>>>>> arch/loongarch/include/asm/kprobes.h | 59 +++++++
>>>>> arch/loongarch/kernel/Makefile | 2 +
>>>>> arch/loongarch/kernel/kprobes.c | 332
>>>>> +++++++++++++++++++++++++++++++++++
>>>>> arch/loongarch/kernel/traps.c | 13 +-
>>>>> arch/loongarch/mm/fault.c | 3 +
>>>>> 7 files changed, 421 insertions(+), 4 deletions(-)
>>>>> create mode 100644 arch/loongarch/include/asm/kprobes.h
>>>>> create mode 100644 arch/loongarch/kernel/kprobes.c
>>>>>
>>>>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
>>>>> index 16bf1b6..f6fc156 100644
>>>>> --- a/arch/loongarch/Kconfig
>>>>> +++ b/arch/loongarch/Kconfig
>>>>> @@ -102,6 +102,7 @@ config LOONGARCH
>>>>> select HAVE_IOREMAP_PROT
>>>>> select HAVE_IRQ_EXIT_ON_IRQ_STACK
>>>>> select HAVE_IRQ_TIME_ACCOUNTING
>>>>> + select HAVE_KPROBES
>>>>> select HAVE_MOD_ARCH_SPECIFIC
>>>>> select HAVE_NMI
>>>>> select HAVE_PCI
>>>>> diff --git a/arch/loongarch/include/asm/inst.h
>>>>> b/arch/loongarch/include/asm/inst.h
>>>>> index 19cf692..e3f2378 100644
>>>>> --- a/arch/loongarch/include/asm/inst.h
>>>>> +++ b/arch/loongarch/include/asm/inst.h
>>>>> @@ -24,6 +24,10 @@
>>>>>
>>>>> #define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >>
>>>>> ADDR_IMMSHIFT_##INSN)
>>>>>
>>>>> +enum reg0i15_op {
>>>>> + break_op = 0x54,
>>>>> +};
>>>>> +
>>>>> enum reg0i26_op {
>>>>> b_op = 0x14,
>>>>> bl_op = 0x15,
>>>>> @@ -180,6 +184,11 @@ enum reg3sa2_op {
>>>>> alsld_op = 0x16,
>>>>> };
>>>>>
>>>>> +struct reg0i15_format {
>>>>> + unsigned int immediate : 15;
>>>>> + unsigned int opcode : 17;
>>>>> +};
>>>>> +
>>>>> struct reg0i26_format {
>>>>> unsigned int immediate_h : 10;
>>>>> unsigned int immediate_l : 16;
>>>>> @@ -265,6 +274,7 @@ struct reg3sa2_format {
>>>>>
>>>>> union loongarch_instruction {
>>>>> unsigned int word;
>>>>> + struct reg0i15_format reg0i15_format;
>>>>> struct reg0i26_format reg0i26_format;
>>>>> struct reg1i20_format reg1i20_format;
>>>>> struct reg1i21_format reg1i21_format;
>>>>> @@ -335,6 +345,11 @@ static inline bool is_branch_ins(union
>>>>> loongarch_instruction *ip)
>>>>> ip->reg1i21_format.opcode <= bgeu_op;
>>>>> }
>>>>>
>>>>> +static inline bool is_break_ins(union loongarch_instruction *ip)
>>>>> +{
>>>>> + return ip->reg0i15_format.opcode == break_op;
>>>>> +}
>>>>> +
>>>>> static inline bool is_ra_save_ins(union loongarch_instruction *ip)
>>>>> {
>>>>> /* st.d $ra, $sp, offset */
>>>>> diff --git a/arch/loongarch/include/asm/kprobes.h
>>>>> b/arch/loongarch/include/asm/kprobes.h
>>>>> new file mode 100644
>>>>> index 0000000..d3903f3
>>>>> --- /dev/null
>>>>> +++ b/arch/loongarch/include/asm/kprobes.h
>>>>> @@ -0,0 +1,59 @@
>>>>> +/* SPDX-License-Identifier: GPL-2.0-only */
>>>>> +#ifndef __ASM_LOONGARCH_KPROBES_H
>>>>> +#define __ASM_LOONGARCH_KPROBES_H
>>>>> +
>>>>> +#include <asm-generic/kprobes.h>
>>>>> +#include <asm/cacheflush.h>
>>>>> +
>>>>> +#ifdef CONFIG_KPROBES
>>>>> +
>>>>> +#include <asm/inst.h>
>>>>> +
>>>>> +#define __ARCH_WANT_KPROBES_INSN_SLOT
>>>>> +#define MAX_INSN_SIZE 2
>>>>> +
>>>>> +#define
>>>>> flush_insn_slot(p) \
>>>>> +do
>>>>> { \
>>>>> + if
>>>>> (p->addr) \
>>>>> + flush_icache_range((unsigned
>>>>> long)p->addr, \
>>>>> + (unsigned long)p->addr
>>>>> + \
>>>>> + (MAX_INSN_SIZE *
>>>>> sizeof(kprobe_opcode_t))); \
>>>>> +} while (0)
>>>>> +
>>>>> +#define kretprobe_blacklist_size 0
>>>>> +
>>>>> +typedef union loongarch_instruction kprobe_opcode_t;
>>>>> +
>>>>> +/* Architecture specific copy of original instruction */
>>>>> +struct arch_specific_insn {
>>>>> + /* copy of the original instruction */
>>>>> + kprobe_opcode_t *insn;
>>>>> +};
>>>>> +
>>>>> +struct prev_kprobe {
>>>>> + struct kprobe *kp;
>>>>> + unsigned long status;
>>>>> + unsigned long saved_irq;
>>>>> + unsigned long saved_era;
>>>>> +};
>>>>> +
>>>>> +/* per-cpu kprobe control block */
>>>>> +struct kprobe_ctlblk {
>>>>> + unsigned long kprobe_status;
>>>>> + unsigned long kprobe_saved_irq;
>>>>> + unsigned long kprobe_saved_era;
>>>>> + struct prev_kprobe prev_kprobe;
>>>>> +};
>>>>> +
>>>>> +void arch_remove_kprobe(struct kprobe *p);
>>>>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr);
>>>>> +bool kprobe_breakpoint_handler(struct pt_regs *regs);
>>>>> +bool kprobe_singlestep_handler(struct pt_regs *regs);
>>>>> +
>>>>> +#else /* !CONFIG_KPROBES */
>>>>> +
>>>>> +static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) {
>>>>> return 0; }
>>>>> +static inline bool kprobe_singlestep_handler(struct pt_regs *regs) {
>>>>> return 0; }
>>>>> +
>>>>> +#endif /* CONFIG_KPROBES */
>>>>> +#endif /* __ASM_LOONGARCH_KPROBES_H */
>>>>> diff --git a/arch/loongarch/kernel/Makefile
>>>>> b/arch/loongarch/kernel/Makefile
>>>>> index fcaa024..6fe4a4e 100644
>>>>> --- a/arch/loongarch/kernel/Makefile
>>>>> +++ b/arch/loongarch/kernel/Makefile
>>>>> @@ -47,4 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
>>>>>
>>>>> obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
>>>>>
>>>>> +obj-$(CONFIG_KPROBES) += kprobes.o
>>>>> +
>>>>> CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
>>>>> diff --git a/arch/loongarch/kernel/kprobes.c
>>>>> b/arch/loongarch/kernel/kprobes.c
>>>>> new file mode 100644
>>>>> index 0000000..aadfc9ae
>>>>> --- /dev/null
>>>>> +++ b/arch/loongarch/kernel/kprobes.c
>>>>> @@ -0,0 +1,332 @@
>>>>> +// SPDX-License-Identifier: GPL-2.0-only
>>>>> +#include <linux/kprobes.h>
>>>>> +#include <linux/kdebug.h>
>>>>> +#include <linux/preempt.h>
>>>>> +#include <asm/break.h>
>>>>> +
>>>>> +static const union loongarch_instruction breakpoint_insn = {
>>>>> + .reg0i15_format = {
>>>>> + .opcode = break_op,
>>>>> + .immediate = BRK_KPROBE_BP,
>>>>> + }
>>>>> +};
>>>>> +
>>>>> +static const union loongarch_instruction singlestep_insn = {
>>>>> + .reg0i15_format = {
>>>>> + .opcode = break_op,
>>>>> + .immediate = BRK_KPROBE_SSTEPBP,
>>>>> + }
>>>>> +};
>>>>> +
>>>>> +DEFINE_PER_CPU(struct kprobe *, current_kprobe);
>>>>> +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>>>>> +
>>>>> +static bool insns_are_not_supported(union loongarch_instruction insn)
>>>> Use insns_not_supported for short.
>>>>
>>>>> +{
>>>>> + switch (insn.reg2i14_format.opcode) {
>>>>> + case llw_op:
>>>>> + case lld_op:
>>>>> + case scw_op:
>>>>> + case scd_op:
>>>>> + pr_notice("kprobe: ll and sc instructions are not
>>>>> supported\n");
>>>>> + return true;
>>>>> + }
>>>>> +
>>>>> + switch (insn.reg1i21_format.opcode) {
>>>>> + case bceqz_op:
>>>>> + pr_notice("kprobe: bceqz and bcnez instructions are
>>>>> not supported\n");
>>>>> + return true;
>>>>> + }
>>>>> +
>>>>> + return false;
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(insns_are_not_supported);
>>>>> +
>>>>> +int arch_prepare_kprobe(struct kprobe *p)
>>>>> +{
>>>>> + union loongarch_instruction insn;
>>>>> +
>>>>> + insn = p->addr[0];
>>>>> + if (insns_are_not_supported(insn))
>>>>> + return -EINVAL;
>>>>> +
>>>>> + p->ainsn.insn = get_insn_slot();
>>>>> + if (!p->ainsn.insn)
>>>>> + return -ENOMEM;
>>>>> +
>>>>> + p->ainsn.insn[0] = *p->addr;
>>>>> + p->ainsn.insn[1] = singlestep_insn;
>>>>> +
>>>>> + p->opcode = *p->addr;
>>>>> +
>>>>> + return 0;
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(arch_prepare_kprobe);
>>>>> +
>>>>> +/* Install breakpoint in text */
>>>>> +void arch_arm_kprobe(struct kprobe *p)
>>>>> +{
>>>>> + *p->addr = breakpoint_insn;
>>>>> + flush_insn_slot(p);
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(arch_arm_kprobe);
>>>>> +
>>>>> +/* Remove breakpoint from text */
>>>>> +void arch_disarm_kprobe(struct kprobe *p)
>>>>> +{
>>>>> + *p->addr = p->opcode;
>>>>> + flush_insn_slot(p);
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(arch_disarm_kprobe);
>>>>> +
>>>>> +void arch_remove_kprobe(struct kprobe *p)
>>>>> +{
>>>>> + if (p->ainsn.insn) {
>>>>> + free_insn_slot(p->ainsn.insn, 0);
>>>>> + p->ainsn.insn = NULL;
>>>>> + }
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(arch_remove_kprobe);
>>>>> +
>>>>> +static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
>>>>> +{
>>>>> + kcb->prev_kprobe.kp = kprobe_running();
>>>>> + kcb->prev_kprobe.status = kcb->kprobe_status;
>>>>> + kcb->prev_kprobe.saved_irq = kcb->kprobe_saved_irq;
>>>>> + kcb->prev_kprobe.saved_era = kcb->kprobe_saved_era;
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(save_previous_kprobe);
>>>>> +
>>>>> +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
>>>>> +{
>>>>> + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
>>>>> + kcb->kprobe_status = kcb->prev_kprobe.status;
>>>>> + kcb->kprobe_saved_irq = kcb->prev_kprobe.saved_irq;
>>>>> + kcb->kprobe_saved_era = kcb->prev_kprobe.saved_era;
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(restore_previous_kprobe);
>>>>> +
>>>>> +static void set_current_kprobe(struct kprobe *p, struct pt_regs
>>>>> *regs,
>>>>> + struct kprobe_ctlblk *kcb)
>>>>> +{
>>>>> + __this_cpu_write(current_kprobe, p);
>>>>> + kcb->kprobe_saved_irq = regs->csr_prmd & CSR_PRMD_PIE;
>>>>> + kcb->kprobe_saved_era = regs->csr_era;
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(set_current_kprobe);
>>>>> +
>>>>> +static bool insns_are_not_simulated(struct kprobe *p, struct pt_regs
>>>>> *regs)
>>>> Use insns_not_simulated for short.
>>>>> +{
>>>>> + if (is_branch_ins(&p->opcode)) {
>>>>> + simu_branch(regs, p->opcode);
>>>>> + return false;
>>>>> + } else if (is_pc_ins(&p->opcode)) {
>>>>> + simu_pc(regs, p->opcode);
>>>>> + return false;
>>>>> + } else {
>>>>> + return true;
>>>>> + }
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(insns_are_not_simulated);
>>>>> +
>>>>> +static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
>>>>> + struct kprobe_ctlblk *kcb, int reenter)
>>>>> +{
>>>>> + if (reenter) {
>>>>> + save_previous_kprobe(kcb);
>>>>> + set_current_kprobe(p, regs, kcb);
>>>>> + kcb->kprobe_status = KPROBE_REENTER;
>>>>> + } else {
>>>>> + kcb->kprobe_status = KPROBE_HIT_SS;
>>>>> + }
>>>>> +
>>>>> + regs->csr_prmd &= ~CSR_PRMD_PIE;
>>>>> +
>>>>> + if (p->ainsn.insn->word == breakpoint_insn.word) {
>>>>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
>>>>> + preempt_enable_no_resched();
>>>>> + return;
>>>>> + }
>>>>> +
>>>>> + if (insns_are_not_simulated(p, regs)) {
>>>>> + kcb->kprobe_status = KPROBE_HIT_SS;
>>>>> + regs->csr_era = (unsigned long)&p->ainsn.insn[0];
>>>>> + } else {
>>>>> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
>>>>> + if (p->post_handler)
>>>>> + p->post_handler(p, regs, 0);
>>>>> + reset_current_kprobe();
>>>>> + preempt_enable_no_resched();
>>>>> + }
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(setup_singlestep);
>>>>> +
>>>>> +static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
>>>>> + struct kprobe_ctlblk *kcb)
>>>>> +{
>>>>> + switch (kcb->kprobe_status) {
>>>>> + case KPROBE_HIT_SSDONE:
>>>>> + case KPROBE_HIT_ACTIVE:
>>>>> + kprobes_inc_nmissed_count(p);
>>>>> + setup_singlestep(p, regs, kcb, 1);
>>>>> + break;
>>>>> + case KPROBE_HIT_SS:
>>>>> + case KPROBE_REENTER:
>>>>> + pr_warn("Failed to recover from reentered
>>>>> kprobes.\n");
>>>>> + dump_kprobe(p);
>>>>> + BUG();
>>>>> + break;
>>>>> + default:
>>>>> + WARN_ON(1);
>>>>> + return false;
>>>>> + }
>>>>> +
>>>>> + return true;
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(reenter_kprobe);
>>>>> +
>>>>> +bool kprobe_breakpoint_handler(struct pt_regs *regs)
>>>>> +{
>>>>> + struct kprobe_ctlblk *kcb;
>>>>> + struct kprobe *p, *cur_kprobe;
>>>>> + kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
>>>>> +
>>>>> + /*
>>>>> + * We don't want to be preempted for the entire
>>>>> + * duration of kprobe processing.
>>>>> + */
>>>>> + preempt_disable();
>>>>> + kcb = get_kprobe_ctlblk();
>>>>> + cur_kprobe = kprobe_running();
>>>>> +
>>>>> + p = get_kprobe(addr);
>>>>> + if (p) {
>>>>> + if (cur_kprobe) {
>>>>> + if (reenter_kprobe(p, regs, kcb))
>>>>> + return true;
>>>>> + } else {
>>>>> + /* Probe hit */
>>>>> + set_current_kprobe(p, regs, kcb);
>>>>> + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
>>>>> +
>>>>> + /*
>>>>> + * If we have no pre-handler or it returned
>>>>> 0, we
>>>>> + * continue with normal processing. If we
>>>>> have a
>>>>> + * pre-handler and it returned non-zero, it
>>>>> will
>>>>> + * modify the execution path and no need to
>>>>> single
>>>>> + * stepping. Let's just reset current kprobe
>>>>> and exit.
>>>>> + *
>>>>> + * pre_handler can hit a breakpoint and can
>>>>> step thru
>>>>> + * before return.
>>>>> + */
>>>>> + if (!p->pre_handler || !p->pre_handler(p,
>>>>> regs)) {
>>>>> + setup_singlestep(p, regs, kcb, 0);
>>>>> + } else {
>>>>> + reset_current_kprobe();
>>>>> + preempt_enable_no_resched();
>>>>> + }
>>>>> + }
>>>>> + return true;
>>>>> + }
>>>>> +
>>>>> + if (!is_break_ins(addr)) {
>>>>> + /*
>>>>> + * The breakpoint instruction was removed right
>>>>> + * after we hit it. Another cpu has removed
>>>>> + * either a probepoint or a debugger breakpoint
>>>>> + * at this address. In either case, no further
>>>>> + * handling of this interrupt is appropriate.
>>>>> + * Return back to original instruction, and continue.
>>>>> + */
>>>>> + preempt_enable_no_resched();
>>>>> + return true;
>>>>> + }
>>>>> +
>>>>> + preempt_enable_no_resched();
>>>>> + return false;
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
>>>>> +
>>>>> +bool kprobe_singlestep_handler(struct pt_regs *regs)
>>>>> +{
>>>>> + struct kprobe *cur = kprobe_running();
>>>>> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>>>>> +
>>>>> + if (!cur)
>>>>> + return false;
>>>>> +
>>>>> + /* Restore back the original saved kprobes variables and
>>>>> continue */
>>>>> + if (kcb->kprobe_status == KPROBE_REENTER) {
>>>>> + restore_previous_kprobe(kcb);
>>>>> + goto out;
>>>>> + }
>>>>> +
>>>>> + /* Call post handler */
>>>>> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
>>>>> + if (cur->post_handler)
>>>>> + cur->post_handler(cur, regs, 0);
>>>> I'm not very familiar, but I think you should set KPROBE_HIT_SSDONE
>>>> only when post_handler is not NULL.
>>>>
>>>>> +
>>>>> + regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
>>>>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
>>>>> +
>>>>> + reset_current_kprobe();
>>>>> +out:
>>>>> + preempt_enable_no_resched();
>>>>> + return true;
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(kprobe_singlestep_handler);
>>>>> +
>>>>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
>>>>> +{
>>>>> + struct kprobe *cur = kprobe_running();
>>>>> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
>>>>> +
>>>>> + switch (kcb->kprobe_status) {
>>>>> + case KPROBE_HIT_SS:
>>>>> + case KPROBE_REENTER:
>>>>> + /*
>>>>> + * We are here because the instruction being single
>>>>> + * stepped caused a page fault. We reset the current
>>>>> + * kprobe and the ip points back to the probe address
>>>>> + * and allow the page fault handler to continue as a
>>>>> + * normal page fault.
>>>>> + */
>>>>> + regs->csr_era = (unsigned long) cur->addr;
>>>>> + BUG_ON(!instruction_pointer(regs));
>>>>> +
>>>>> + if (kcb->kprobe_status == KPROBE_REENTER) {
>>>>> + restore_previous_kprobe(kcb);
>>>>> + } else {
>>>>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
>>>>> + reset_current_kprobe();
>>>>> + preempt_enable_no_resched();
>>>>> + }
>>>> I think the KPROBE_REENTER case also needs to enable preemption.
>>>>
>>>>> +
>>>>> + break;
>>>>> + case KPROBE_HIT_ACTIVE:
>>>>> + case KPROBE_HIT_SSDONE:
>>>>> + /*
>>>>> + * In case the user-specified fault handler returned
>>>>> + * zero, try to fix up.
>>>>> + */
>>>>> + if (fixup_exception(regs))
>>>>> + return true;
>>>> Does here need to enable preemption too?
>>>>
>>>>> + }
>>>>> + return false;
>>>>> +}
>>>>> +NOKPROBE_SYMBOL(kprobe_fault_handler);
>>>>> +
>>>>> +/*
>>>>> + * Provide a blacklist of symbols identifying ranges which cannot be
>>>>> kprobed.
>>>>> + * This blacklist is exposed to userspace via debugfs
>>>>> (kprobes/blacklist).
>>>>> + */
>>>>> +int __init arch_populate_kprobe_blacklist(void)
>>>>> +{
>>>>> + return kprobe_add_area_blacklist((unsigned
>>>>> long)__irqentry_text_start,
>>>>> + (unsigned
>>>>> long)__irqentry_text_end);
>>>>> +}
>>>>> +
>>>>> +int __init arch_init_kprobes(void)
>>>>> +{
>>>>> + return 0;
>>>>> +}
>>>>> diff --git a/arch/loongarch/kernel/traps.c
>>>>> b/arch/loongarch/kernel/traps.c
>>>>> index a19bb32..4d9f775 100644
>>>>> --- a/arch/loongarch/kernel/traps.c
>>>>> +++ b/arch/loongarch/kernel/traps.c
>>>>> @@ -448,14 +448,12 @@ asmlinkage void noinstr do_bp(struct pt_regs
>>>>> *regs)
>>>>> */
>>>>> switch (bcode) {
>>>>> case BRK_KPROBE_BP:
>>>>> - if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
>>>>> - current->thread.trap_nr, SIGTRAP) ==
>>>>> NOTIFY_STOP)
>>>>> + if (kprobe_breakpoint_handler(regs))
>>>>> goto out;
>>>>> else
>>>>> break;
>>>>> case BRK_KPROBE_SSTEPBP:
>>>>> - if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep",
>>>>> regs, bcode,
>>>>> - current->thread.trap_nr, SIGTRAP) ==
>>>>> NOTIFY_STOP)
>>>>> + if (kprobe_singlestep_handler(regs))
>>>>> goto out;
>>>>> else
>>>>> break;
>>>>> @@ -479,6 +477,13 @@ asmlinkage void noinstr do_bp(struct pt_regs
>>>>> *regs)
>>>>> break;
>>>>> }
>>>>>
>>>>> + if (bcode == BRK_KPROBE_BP) {
>>>>> + if (__get_inst(&opcode, (u32 *)era, user))
>>>>> + goto out_sigsegv;
>>>>> +
>>>>> + bcode = (opcode & 0x7fff);
>>>>> + }
>>>>> +
>>>> Why? we already got bcode at the beginning.
>>>
>>> Hi, Huacai,
>>>
>>>
>>> I think it is because kprobe_breakpoint_handler() may deal with that
>>> case other cpu may remove its breakpoint. In that case, normally we can
>>> exit this break exception and go on. But if
>>> the orignal instruction is_break_ins(), we will trigger trap aggain. So
>>> it is better to avoid double traps and let it go ahead when
>>> kprobe_breakpoint_handler() returns.
>> Perhaps it is not described here as avoiding the trap twice,
>
> That due to the implementation in the last branch, is_break_ins(), in
> kprobe_breakpoint_handler(). We can indirectly return true for all
> instructions to indicate we can stop kprobe now.
> And then it returns the normal code flow. If the instruction at ERA is a
> "break xxx", it would traps again. I call it as "trap twice" because at
> here we are in the fisrt time trap.
Mmm, it can be understood that way.

Thanks.
> To avoid the second trap, we add the judgment named is_break_ins() and
> return false to indicate we should go another break handler. Of course,
> with the instruction being restored, we should re-get the bcode.
>
>
>> but rather
>> that if we don't re-fetch the bcode when the original instruction
>> `break` will go to the default branch regardless of what the bcode of
>> the original instruction is (since the bcode is BRK_KPROBE_BP at this
>> point). IMHO, so we re-get the bcode to get the original instruction
>> `break` is the correct bcode to ensure that it goes to the appropriate
>> case branch.
>>
>> Thanks,
>> Youling.
>>
>>> Differnt from many other archs, e.g. riscv, we switch bcode to decide
>>> which handlers should go on. Thus, we need get bcode again.
>>>
>>> Thanks,
>>>
>>> Jinyang
>>>
>>>
>>>>> switch (bcode) {
>>>>> case BRK_BUG:
>>>>> bug_handler(regs);
>>>>> diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
>>>>> index 1ccd536..449087b 100644
>>>>> --- a/arch/loongarch/mm/fault.c
>>>>> +++ b/arch/loongarch/mm/fault.c
>>>>> @@ -135,6 +135,9 @@ static void __kprobes __do_page_fault(struct
>>>>> pt_regs *regs,
>>>>> struct vm_area_struct *vma = NULL;
>>>>> vm_fault_t fault;
>>>>>
>>>>> + if (kprobe_page_fault(regs, current->thread.trap_nr))
>>>>> + return;
>>>>> +
>>>>> /*
>>>>> * We fault-in kernel-space virtual memory on-demand. The
>>>>> * 'reference' page table is init_mm.pgd.
>>>>> --
>>>>> 2.1.0
>>>>>
>>>>>
>>>
>

2022-12-09 07:29:03

by Tiezhu Yang

[permalink] [raw]
Subject: Re: [PATCH v9 2/4] LoongArch: Add kprobe support



On 12/09/2022 10:22 AM, Huacai Chen wrote:
> cas
>
> On Thu, Dec 8, 2022 at 9:41 PM Tiezhu Yang <[email protected]> wrote:
>>
>> Kprobes allows you to trap at almost any kernel address and
>> execute a callback function, this commit adds kprobe support
>> for LoongArch.

...

>> + case KPROBE_HIT_ACTIVE:
>> + case KPROBE_HIT_SSDONE:
>> + /*
>> + * In case the user-specified fault handler returned
>> + * zero, try to fix up.
>> + */
>> + if (fixup_exception(regs))
>> + return true;
> Does here need to enable preemption too?
>

No need, because in this two case, after call fixup_exception() or
do_page_fault(), the original process will continue, preemption will
be enabled in the original process.

Thanks,
Tiezhu

2022-12-09 08:29:52

by Huacai Chen

[permalink] [raw]
Subject: Re: [PATCH v9 2/4] LoongArch: Add kprobe support

On Fri, Dec 9, 2022 at 2:10 PM Youling Tang <[email protected]> wrote:
>
>
>
> On 12/09/2022 01:53 PM, Jinyang He wrote:
> > On 2022-12-09 11:44, Youling Tang wrote:
> >
> >> Hi, Jinyang
> >>
> >> On 12/09/2022 11:31 AM, Jinyang He wrote:
> >>> On 2022-12-09 10:22, Huacai Chen wrote:
> >>>
> >>>> cas
> >>>>
> >>>> On Thu, Dec 8, 2022 at 9:41 PM Tiezhu Yang <[email protected]>
> >>>> wrote:
> >>>>> Kprobes allows you to trap at almost any kernel address and
> >>>>> execute a callback function, this commit adds kprobe support
> >>>>> for LoongArch.
> >>>>>
> >>>>> Signed-off-by: Tiezhu Yang <[email protected]>
> >>>>> ---
> >>>>> arch/loongarch/Kconfig | 1 +
> >>>>> arch/loongarch/include/asm/inst.h | 15 ++
> >>>>> arch/loongarch/include/asm/kprobes.h | 59 +++++++
> >>>>> arch/loongarch/kernel/Makefile | 2 +
> >>>>> arch/loongarch/kernel/kprobes.c | 332
> >>>>> +++++++++++++++++++++++++++++++++++
> >>>>> arch/loongarch/kernel/traps.c | 13 +-
> >>>>> arch/loongarch/mm/fault.c | 3 +
> >>>>> 7 files changed, 421 insertions(+), 4 deletions(-)
> >>>>> create mode 100644 arch/loongarch/include/asm/kprobes.h
> >>>>> create mode 100644 arch/loongarch/kernel/kprobes.c
> >>>>>
> >>>>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> >>>>> index 16bf1b6..f6fc156 100644
> >>>>> --- a/arch/loongarch/Kconfig
> >>>>> +++ b/arch/loongarch/Kconfig
> >>>>> @@ -102,6 +102,7 @@ config LOONGARCH
> >>>>> select HAVE_IOREMAP_PROT
> >>>>> select HAVE_IRQ_EXIT_ON_IRQ_STACK
> >>>>> select HAVE_IRQ_TIME_ACCOUNTING
> >>>>> + select HAVE_KPROBES
> >>>>> select HAVE_MOD_ARCH_SPECIFIC
> >>>>> select HAVE_NMI
> >>>>> select HAVE_PCI
> >>>>> diff --git a/arch/loongarch/include/asm/inst.h
> >>>>> b/arch/loongarch/include/asm/inst.h
> >>>>> index 19cf692..e3f2378 100644
> >>>>> --- a/arch/loongarch/include/asm/inst.h
> >>>>> +++ b/arch/loongarch/include/asm/inst.h
> >>>>> @@ -24,6 +24,10 @@
> >>>>>
> >>>>> #define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >>
> >>>>> ADDR_IMMSHIFT_##INSN)
> >>>>>
> >>>>> +enum reg0i15_op {
> >>>>> + break_op = 0x54,
> >>>>> +};
> >>>>> +
> >>>>> enum reg0i26_op {
> >>>>> b_op = 0x14,
> >>>>> bl_op = 0x15,
> >>>>> @@ -180,6 +184,11 @@ enum reg3sa2_op {
> >>>>> alsld_op = 0x16,
> >>>>> };
> >>>>>
> >>>>> +struct reg0i15_format {
> >>>>> + unsigned int immediate : 15;
> >>>>> + unsigned int opcode : 17;
> >>>>> +};
> >>>>> +
> >>>>> struct reg0i26_format {
> >>>>> unsigned int immediate_h : 10;
> >>>>> unsigned int immediate_l : 16;
> >>>>> @@ -265,6 +274,7 @@ struct reg3sa2_format {
> >>>>>
> >>>>> union loongarch_instruction {
> >>>>> unsigned int word;
> >>>>> + struct reg0i15_format reg0i15_format;
> >>>>> struct reg0i26_format reg0i26_format;
> >>>>> struct reg1i20_format reg1i20_format;
> >>>>> struct reg1i21_format reg1i21_format;
> >>>>> @@ -335,6 +345,11 @@ static inline bool is_branch_ins(union
> >>>>> loongarch_instruction *ip)
> >>>>> ip->reg1i21_format.opcode <= bgeu_op;
> >>>>> }
> >>>>>
> >>>>> +static inline bool is_break_ins(union loongarch_instruction *ip)
> >>>>> +{
> >>>>> + return ip->reg0i15_format.opcode == break_op;
> >>>>> +}
> >>>>> +
> >>>>> static inline bool is_ra_save_ins(union loongarch_instruction *ip)
> >>>>> {
> >>>>> /* st.d $ra, $sp, offset */
> >>>>> diff --git a/arch/loongarch/include/asm/kprobes.h
> >>>>> b/arch/loongarch/include/asm/kprobes.h
> >>>>> new file mode 100644
> >>>>> index 0000000..d3903f3
> >>>>> --- /dev/null
> >>>>> +++ b/arch/loongarch/include/asm/kprobes.h
> >>>>> @@ -0,0 +1,59 @@
> >>>>> +/* SPDX-License-Identifier: GPL-2.0-only */
> >>>>> +#ifndef __ASM_LOONGARCH_KPROBES_H
> >>>>> +#define __ASM_LOONGARCH_KPROBES_H
> >>>>> +
> >>>>> +#include <asm-generic/kprobes.h>
> >>>>> +#include <asm/cacheflush.h>
> >>>>> +
> >>>>> +#ifdef CONFIG_KPROBES
> >>>>> +
> >>>>> +#include <asm/inst.h>
> >>>>> +
> >>>>> +#define __ARCH_WANT_KPROBES_INSN_SLOT
> >>>>> +#define MAX_INSN_SIZE 2
> >>>>> +
> >>>>> +#define
> >>>>> flush_insn_slot(p) \
> >>>>> +do
> >>>>> { \
> >>>>> + if
> >>>>> (p->addr) \
> >>>>> + flush_icache_range((unsigned
> >>>>> long)p->addr, \
> >>>>> + (unsigned long)p->addr
> >>>>> + \
> >>>>> + (MAX_INSN_SIZE *
> >>>>> sizeof(kprobe_opcode_t))); \
> >>>>> +} while (0)
> >>>>> +
> >>>>> +#define kretprobe_blacklist_size 0
> >>>>> +
> >>>>> +typedef union loongarch_instruction kprobe_opcode_t;
> >>>>> +
> >>>>> +/* Architecture specific copy of original instruction */
> >>>>> +struct arch_specific_insn {
> >>>>> + /* copy of the original instruction */
> >>>>> + kprobe_opcode_t *insn;
> >>>>> +};
> >>>>> +
> >>>>> +struct prev_kprobe {
> >>>>> + struct kprobe *kp;
> >>>>> + unsigned long status;
> >>>>> + unsigned long saved_irq;
> >>>>> + unsigned long saved_era;
> >>>>> +};
> >>>>> +
> >>>>> +/* per-cpu kprobe control block */
> >>>>> +struct kprobe_ctlblk {
> >>>>> + unsigned long kprobe_status;
> >>>>> + unsigned long kprobe_saved_irq;
> >>>>> + unsigned long kprobe_saved_era;
> >>>>> + struct prev_kprobe prev_kprobe;
> >>>>> +};
> >>>>> +
> >>>>> +void arch_remove_kprobe(struct kprobe *p);
> >>>>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr);
> >>>>> +bool kprobe_breakpoint_handler(struct pt_regs *regs);
> >>>>> +bool kprobe_singlestep_handler(struct pt_regs *regs);
> >>>>> +
> >>>>> +#else /* !CONFIG_KPROBES */
> >>>>> +
> >>>>> +static inline bool kprobe_breakpoint_handler(struct pt_regs *regs) {
> >>>>> return 0; }
> >>>>> +static inline bool kprobe_singlestep_handler(struct pt_regs *regs) {
> >>>>> return 0; }
> >>>>> +
> >>>>> +#endif /* CONFIG_KPROBES */
> >>>>> +#endif /* __ASM_LOONGARCH_KPROBES_H */
> >>>>> diff --git a/arch/loongarch/kernel/Makefile
> >>>>> b/arch/loongarch/kernel/Makefile
> >>>>> index fcaa024..6fe4a4e 100644
> >>>>> --- a/arch/loongarch/kernel/Makefile
> >>>>> +++ b/arch/loongarch/kernel/Makefile
> >>>>> @@ -47,4 +47,6 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
> >>>>>
> >>>>> obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
> >>>>>
> >>>>> +obj-$(CONFIG_KPROBES) += kprobes.o
> >>>>> +
> >>>>> CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
> >>>>> diff --git a/arch/loongarch/kernel/kprobes.c
> >>>>> b/arch/loongarch/kernel/kprobes.c
> >>>>> new file mode 100644
> >>>>> index 0000000..aadfc9ae
> >>>>> --- /dev/null
> >>>>> +++ b/arch/loongarch/kernel/kprobes.c
> >>>>> @@ -0,0 +1,332 @@
> >>>>> +// SPDX-License-Identifier: GPL-2.0-only
> >>>>> +#include <linux/kprobes.h>
> >>>>> +#include <linux/kdebug.h>
> >>>>> +#include <linux/preempt.h>
> >>>>> +#include <asm/break.h>
> >>>>> +
> >>>>> +static const union loongarch_instruction breakpoint_insn = {
> >>>>> + .reg0i15_format = {
> >>>>> + .opcode = break_op,
> >>>>> + .immediate = BRK_KPROBE_BP,
> >>>>> + }
> >>>>> +};
> >>>>> +
> >>>>> +static const union loongarch_instruction singlestep_insn = {
> >>>>> + .reg0i15_format = {
> >>>>> + .opcode = break_op,
> >>>>> + .immediate = BRK_KPROBE_SSTEPBP,
> >>>>> + }
> >>>>> +};
> >>>>> +
> >>>>> +DEFINE_PER_CPU(struct kprobe *, current_kprobe);
> >>>>> +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
> >>>>> +
> >>>>> +static bool insns_are_not_supported(union loongarch_instruction insn)
> >>>> Use insns_not_supported for short.
> >>>>
> >>>>> +{
> >>>>> + switch (insn.reg2i14_format.opcode) {
> >>>>> + case llw_op:
> >>>>> + case lld_op:
> >>>>> + case scw_op:
> >>>>> + case scd_op:
> >>>>> + pr_notice("kprobe: ll and sc instructions are not
> >>>>> supported\n");
> >>>>> + return true;
> >>>>> + }
> >>>>> +
> >>>>> + switch (insn.reg1i21_format.opcode) {
> >>>>> + case bceqz_op:
> >>>>> + pr_notice("kprobe: bceqz and bcnez instructions are
> >>>>> not supported\n");
> >>>>> + return true;
> >>>>> + }
> >>>>> +
> >>>>> + return false;
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(insns_are_not_supported);
> >>>>> +
> >>>>> +int arch_prepare_kprobe(struct kprobe *p)
> >>>>> +{
> >>>>> + union loongarch_instruction insn;
> >>>>> +
> >>>>> + insn = p->addr[0];
> >>>>> + if (insns_are_not_supported(insn))
> >>>>> + return -EINVAL;
> >>>>> +
> >>>>> + p->ainsn.insn = get_insn_slot();
> >>>>> + if (!p->ainsn.insn)
> >>>>> + return -ENOMEM;
> >>>>> +
> >>>>> + p->ainsn.insn[0] = *p->addr;
> >>>>> + p->ainsn.insn[1] = singlestep_insn;
> >>>>> +
> >>>>> + p->opcode = *p->addr;
> >>>>> +
> >>>>> + return 0;
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(arch_prepare_kprobe);
> >>>>> +
> >>>>> +/* Install breakpoint in text */
> >>>>> +void arch_arm_kprobe(struct kprobe *p)
> >>>>> +{
> >>>>> + *p->addr = breakpoint_insn;
> >>>>> + flush_insn_slot(p);
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(arch_arm_kprobe);
> >>>>> +
> >>>>> +/* Remove breakpoint from text */
> >>>>> +void arch_disarm_kprobe(struct kprobe *p)
> >>>>> +{
> >>>>> + *p->addr = p->opcode;
> >>>>> + flush_insn_slot(p);
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(arch_disarm_kprobe);
> >>>>> +
> >>>>> +void arch_remove_kprobe(struct kprobe *p)
> >>>>> +{
> >>>>> + if (p->ainsn.insn) {
> >>>>> + free_insn_slot(p->ainsn.insn, 0);
> >>>>> + p->ainsn.insn = NULL;
> >>>>> + }
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(arch_remove_kprobe);
> >>>>> +
> >>>>> +static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
> >>>>> +{
> >>>>> + kcb->prev_kprobe.kp = kprobe_running();
> >>>>> + kcb->prev_kprobe.status = kcb->kprobe_status;
> >>>>> + kcb->prev_kprobe.saved_irq = kcb->kprobe_saved_irq;
> >>>>> + kcb->prev_kprobe.saved_era = kcb->kprobe_saved_era;
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(save_previous_kprobe);
> >>>>> +
> >>>>> +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
> >>>>> +{
> >>>>> + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
> >>>>> + kcb->kprobe_status = kcb->prev_kprobe.status;
> >>>>> + kcb->kprobe_saved_irq = kcb->prev_kprobe.saved_irq;
> >>>>> + kcb->kprobe_saved_era = kcb->prev_kprobe.saved_era;
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(restore_previous_kprobe);
> >>>>> +
> >>>>> +static void set_current_kprobe(struct kprobe *p, struct pt_regs
> >>>>> *regs,
> >>>>> + struct kprobe_ctlblk *kcb)
> >>>>> +{
> >>>>> + __this_cpu_write(current_kprobe, p);
> >>>>> + kcb->kprobe_saved_irq = regs->csr_prmd & CSR_PRMD_PIE;
> >>>>> + kcb->kprobe_saved_era = regs->csr_era;
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(set_current_kprobe);
> >>>>> +
> >>>>> +static bool insns_are_not_simulated(struct kprobe *p, struct pt_regs
> >>>>> *regs)
> >>>> Use insns_not_simulated for short.
> >>>>> +{
> >>>>> + if (is_branch_ins(&p->opcode)) {
> >>>>> + simu_branch(regs, p->opcode);
> >>>>> + return false;
> >>>>> + } else if (is_pc_ins(&p->opcode)) {
> >>>>> + simu_pc(regs, p->opcode);
> >>>>> + return false;
> >>>>> + } else {
> >>>>> + return true;
> >>>>> + }
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(insns_are_not_simulated);
> >>>>> +
> >>>>> +static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
> >>>>> + struct kprobe_ctlblk *kcb, int reenter)
> >>>>> +{
> >>>>> + if (reenter) {
> >>>>> + save_previous_kprobe(kcb);
> >>>>> + set_current_kprobe(p, regs, kcb);
> >>>>> + kcb->kprobe_status = KPROBE_REENTER;
> >>>>> + } else {
> >>>>> + kcb->kprobe_status = KPROBE_HIT_SS;
> >>>>> + }
> >>>>> +
> >>>>> + regs->csr_prmd &= ~CSR_PRMD_PIE;
> >>>>> +
> >>>>> + if (p->ainsn.insn->word == breakpoint_insn.word) {
> >>>>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
> >>>>> + preempt_enable_no_resched();
> >>>>> + return;
> >>>>> + }
> >>>>> +
> >>>>> + if (insns_are_not_simulated(p, regs)) {
> >>>>> + kcb->kprobe_status = KPROBE_HIT_SS;
> >>>>> + regs->csr_era = (unsigned long)&p->ainsn.insn[0];
> >>>>> + } else {
> >>>>> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> >>>>> + if (p->post_handler)
> >>>>> + p->post_handler(p, regs, 0);
> >>>>> + reset_current_kprobe();
> >>>>> + preempt_enable_no_resched();
> >>>>> + }
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(setup_singlestep);
> >>>>> +
> >>>>> +static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
> >>>>> + struct kprobe_ctlblk *kcb)
> >>>>> +{
> >>>>> + switch (kcb->kprobe_status) {
> >>>>> + case KPROBE_HIT_SSDONE:
> >>>>> + case KPROBE_HIT_ACTIVE:
> >>>>> + kprobes_inc_nmissed_count(p);
> >>>>> + setup_singlestep(p, regs, kcb, 1);
> >>>>> + break;
> >>>>> + case KPROBE_HIT_SS:
> >>>>> + case KPROBE_REENTER:
> >>>>> + pr_warn("Failed to recover from reentered
> >>>>> kprobes.\n");
> >>>>> + dump_kprobe(p);
> >>>>> + BUG();
> >>>>> + break;
> >>>>> + default:
> >>>>> + WARN_ON(1);
> >>>>> + return false;
> >>>>> + }
> >>>>> +
> >>>>> + return true;
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(reenter_kprobe);
> >>>>> +
> >>>>> +bool kprobe_breakpoint_handler(struct pt_regs *regs)
> >>>>> +{
> >>>>> + struct kprobe_ctlblk *kcb;
> >>>>> + struct kprobe *p, *cur_kprobe;
> >>>>> + kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
> >>>>> +
> >>>>> + /*
> >>>>> + * We don't want to be preempted for the entire
> >>>>> + * duration of kprobe processing.
> >>>>> + */
> >>>>> + preempt_disable();
> >>>>> + kcb = get_kprobe_ctlblk();
> >>>>> + cur_kprobe = kprobe_running();
> >>>>> +
> >>>>> + p = get_kprobe(addr);
> >>>>> + if (p) {
> >>>>> + if (cur_kprobe) {
> >>>>> + if (reenter_kprobe(p, regs, kcb))
> >>>>> + return true;
> >>>>> + } else {
> >>>>> + /* Probe hit */
> >>>>> + set_current_kprobe(p, regs, kcb);
> >>>>> + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> >>>>> +
> >>>>> + /*
> >>>>> + * If we have no pre-handler or it returned
> >>>>> 0, we
> >>>>> + * continue with normal processing. If we
> >>>>> have a
> >>>>> + * pre-handler and it returned non-zero, it
> >>>>> will
> >>>>> + * modify the execution path and no need to
> >>>>> single
> >>>>> + * stepping. Let's just reset current kprobe
> >>>>> and exit.
> >>>>> + *
> >>>>> + * pre_handler can hit a breakpoint and can
> >>>>> step thru
> >>>>> + * before return.
> >>>>> + */
> >>>>> + if (!p->pre_handler || !p->pre_handler(p,
> >>>>> regs)) {
> >>>>> + setup_singlestep(p, regs, kcb, 0);
> >>>>> + } else {
> >>>>> + reset_current_kprobe();
> >>>>> + preempt_enable_no_resched();
> >>>>> + }
> >>>>> + }
> >>>>> + return true;
> >>>>> + }
> >>>>> +
> >>>>> + if (!is_break_ins(addr)) {
> >>>>> + /*
> >>>>> + * The breakpoint instruction was removed right
> >>>>> + * after we hit it. Another cpu has removed
> >>>>> + * either a probepoint or a debugger breakpoint
> >>>>> + * at this address. In either case, no further
> >>>>> + * handling of this interrupt is appropriate.
> >>>>> + * Return back to original instruction, and continue.
> >>>>> + */
> >>>>> + preempt_enable_no_resched();
> >>>>> + return true;
> >>>>> + }
> >>>>> +
> >>>>> + preempt_enable_no_resched();
> >>>>> + return false;
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
> >>>>> +
> >>>>> +bool kprobe_singlestep_handler(struct pt_regs *regs)
> >>>>> +{
> >>>>> + struct kprobe *cur = kprobe_running();
> >>>>> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> >>>>> +
> >>>>> + if (!cur)
> >>>>> + return false;
> >>>>> +
> >>>>> + /* Restore back the original saved kprobes variables and
> >>>>> continue */
> >>>>> + if (kcb->kprobe_status == KPROBE_REENTER) {
> >>>>> + restore_previous_kprobe(kcb);
> >>>>> + goto out;
> >>>>> + }
> >>>>> +
> >>>>> + /* Call post handler */
> >>>>> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> >>>>> + if (cur->post_handler)
> >>>>> + cur->post_handler(cur, regs, 0);
> >>>> I'm not very familiar, but I think you should set KPROBE_HIT_SSDONE
> >>>> only when post_handler is not NULL.
> >>>>
> >>>>> +
> >>>>> + regs->csr_era = kcb->kprobe_saved_era + LOONGARCH_INSN_SIZE;
> >>>>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
> >>>>> +
> >>>>> + reset_current_kprobe();
> >>>>> +out:
> >>>>> + preempt_enable_no_resched();
> >>>>> + return true;
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(kprobe_singlestep_handler);
> >>>>> +
> >>>>> +bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
> >>>>> +{
> >>>>> + struct kprobe *cur = kprobe_running();
> >>>>> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> >>>>> +
> >>>>> + switch (kcb->kprobe_status) {
> >>>>> + case KPROBE_HIT_SS:
> >>>>> + case KPROBE_REENTER:
> >>>>> + /*
> >>>>> + * We are here because the instruction being single
> >>>>> + * stepped caused a page fault. We reset the current
> >>>>> + * kprobe and the ip points back to the probe address
> >>>>> + * and allow the page fault handler to continue as a
> >>>>> + * normal page fault.
> >>>>> + */
> >>>>> + regs->csr_era = (unsigned long) cur->addr;
> >>>>> + BUG_ON(!instruction_pointer(regs));
> >>>>> +
> >>>>> + if (kcb->kprobe_status == KPROBE_REENTER) {
> >>>>> + restore_previous_kprobe(kcb);
> >>>>> + } else {
> >>>>> + regs->csr_prmd |= kcb->kprobe_saved_irq;
> >>>>> + reset_current_kprobe();
> >>>>> + preempt_enable_no_resched();
> >>>>> + }
> >>>> I think the KPROBE_REENTER case also needs to enable preemption.
> >>>>
> >>>>> +
> >>>>> + break;
> >>>>> + case KPROBE_HIT_ACTIVE:
> >>>>> + case KPROBE_HIT_SSDONE:
> >>>>> + /*
> >>>>> + * In case the user-specified fault handler returned
> >>>>> + * zero, try to fix up.
> >>>>> + */
> >>>>> + if (fixup_exception(regs))
> >>>>> + return true;
> >>>> Does here need to enable preemption too?
> >>>>
> >>>>> + }
> >>>>> + return false;
> >>>>> +}
> >>>>> +NOKPROBE_SYMBOL(kprobe_fault_handler);
> >>>>> +
> >>>>> +/*
> >>>>> + * Provide a blacklist of symbols identifying ranges which cannot be
> >>>>> kprobed.
> >>>>> + * This blacklist is exposed to userspace via debugfs
> >>>>> (kprobes/blacklist).
> >>>>> + */
> >>>>> +int __init arch_populate_kprobe_blacklist(void)
> >>>>> +{
> >>>>> + return kprobe_add_area_blacklist((unsigned
> >>>>> long)__irqentry_text_start,
> >>>>> + (unsigned
> >>>>> long)__irqentry_text_end);
> >>>>> +}
> >>>>> +
> >>>>> +int __init arch_init_kprobes(void)
> >>>>> +{
> >>>>> + return 0;
> >>>>> +}
> >>>>> diff --git a/arch/loongarch/kernel/traps.c
> >>>>> b/arch/loongarch/kernel/traps.c
> >>>>> index a19bb32..4d9f775 100644
> >>>>> --- a/arch/loongarch/kernel/traps.c
> >>>>> +++ b/arch/loongarch/kernel/traps.c
> >>>>> @@ -448,14 +448,12 @@ asmlinkage void noinstr do_bp(struct pt_regs
> >>>>> *regs)
> >>>>> */
> >>>>> switch (bcode) {
> >>>>> case BRK_KPROBE_BP:
> >>>>> - if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
> >>>>> - current->thread.trap_nr, SIGTRAP) ==
> >>>>> NOTIFY_STOP)
> >>>>> + if (kprobe_breakpoint_handler(regs))
> >>>>> goto out;
> >>>>> else
> >>>>> break;
> >>>>> case BRK_KPROBE_SSTEPBP:
> >>>>> - if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep",
> >>>>> regs, bcode,
> >>>>> - current->thread.trap_nr, SIGTRAP) ==
> >>>>> NOTIFY_STOP)
> >>>>> + if (kprobe_singlestep_handler(regs))
> >>>>> goto out;
> >>>>> else
> >>>>> break;
> >>>>> @@ -479,6 +477,13 @@ asmlinkage void noinstr do_bp(struct pt_regs
> >>>>> *regs)
> >>>>> break;
> >>>>> }
> >>>>>
> >>>>> + if (bcode == BRK_KPROBE_BP) {
> >>>>> + if (__get_inst(&opcode, (u32 *)era, user))
> >>>>> + goto out_sigsegv;
> >>>>> +
> >>>>> + bcode = (opcode & 0x7fff);
> >>>>> + }
> >>>>> +
> >>>> Why? we already got bcode at the beginning.
> >>>
> >>> Hi, Huacai,
> >>>
> >>>
> >>> I think it is because kprobe_breakpoint_handler() may deal with that
> >>> case other cpu may remove its breakpoint. In that case, normally we can
> >>> exit this break exception and go on. But if
> >>> the orignal instruction is_break_ins(), we will trigger trap aggain. So
> >>> it is better to avoid double traps and let it go ahead when
> >>> kprobe_breakpoint_handler() returns.
> >> Perhaps it is not described here as avoiding the trap twice,
> >
> > That due to the implementation in the last branch, is_break_ins(), in
> > kprobe_breakpoint_handler(). We can indirectly return true for all
> > instructions to indicate we can stop kprobe now.
> > And then it returns the normal code flow. If the instruction at ERA is a
> > "break xxx", it would traps again. I call it as "trap twice" because at
> > here we are in the fisrt time trap.
> Mmm, it can be understood that way.
I think we should construct a test program to prove it is actually
necessary to get bcode again.

Huacai
>
> Thanks.
> > To avoid the second trap, we add the judgment named is_break_ins() and
> > return false to indicate we should go another break handler. Of course,
> > with the instruction being restored, we should re-get the bcode.
> >
> >
> >> but rather
> >> that if we don't re-fetch the bcode when the original instruction
> >> `break` will go to the default branch regardless of what the bcode of
> >> the original instruction is (since the bcode is BRK_KPROBE_BP at this
> >> point). IMHO, so we re-get the bcode to get the original instruction
> >> `break` is the correct bcode to ensure that it goes to the appropriate
> >> case branch.
> >>
> >> Thanks,
> >> Youling.
> >>
> >>> Differnt from many other archs, e.g. riscv, we switch bcode to decide
> >>> which handlers should go on. Thus, we need get bcode again.
> >>>
> >>> Thanks,
> >>>
> >>> Jinyang
> >>>
> >>>
> >>>>> switch (bcode) {
> >>>>> case BRK_BUG:
> >>>>> bug_handler(regs);
> >>>>> diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
> >>>>> index 1ccd536..449087b 100644
> >>>>> --- a/arch/loongarch/mm/fault.c
> >>>>> +++ b/arch/loongarch/mm/fault.c
> >>>>> @@ -135,6 +135,9 @@ static void __kprobes __do_page_fault(struct
> >>>>> pt_regs *regs,
> >>>>> struct vm_area_struct *vma = NULL;
> >>>>> vm_fault_t fault;
> >>>>>
> >>>>> + if (kprobe_page_fault(regs, current->thread.trap_nr))
> >>>>> + return;
> >>>>> +
> >>>>> /*
> >>>>> * We fault-in kernel-space virtual memory on-demand. The
> >>>>> * 'reference' page table is init_mm.pgd.
> >>>>> --
> >>>>> 2.1.0
> >>>>>
> >>>>>
> >>>
> >
>