Hi,
Here is the 7th version of fprobe. This version fixes unregister_fprobe()
ensures that exit_handler is not called after returning from the
unregister_fprobe(), and fixes some comments and documents.
The previous version is here[1];
[1] https://lore.kernel.org/all/164338031590.2429999.6203979005944292576.stgit@devnote2/T/#u
This series introduces the fprobe, the function entry/exit probe
with multiple probe point support. This also introduces the rethook
for hooking function return as same as the kretprobe does. This
abstraction will help us to generalize the fgraph tracer,
because we can just switch to it from the rethook in fprobe,
depending on the kernel configuration.
The patch [1/10] is from Jiri's series[2].
[2] https://lore.kernel.org/all/[email protected]/T/#u
And the patch [9/10] adds the FPROBE_FL_KPROBE_SHARED flag for the case
if user wants to share the same code (or share a same resource) on the
fprobe and the kprobes.
Thank you,
---
Jiri Olsa (1):
ftrace: Add ftrace_set_filter_ips function
Masami Hiramatsu (9):
fprobe: Add ftrace based probe APIs
rethook: Add a generic return hook
rethook: x86: Add rethook x86 implementation
ARM: rethook: Add rethook arm implementation
arm64: rethook: Add arm64 rethook implementation
fprobe: Add exit_handler support
fprobe: Add sample program for fprobe
fprobe: Introduce FPROBE_FL_KPROBE_SHARED flag for fprobe
docs: fprobe: Add fprobe description to ftrace-use.rst
Documentation/trace/fprobe.rst | 171 +++++++++++++
Documentation/trace/index.rst | 1
arch/arm/Kconfig | 1
arch/arm/include/asm/stacktrace.h | 4
arch/arm/kernel/stacktrace.c | 6
arch/arm/probes/Makefile | 1
arch/arm/probes/rethook.c | 71 +++++
arch/arm64/Kconfig | 1
arch/arm64/include/asm/stacktrace.h | 2
arch/arm64/kernel/probes/Makefile | 1
arch/arm64/kernel/probes/rethook.c | 25 ++
arch/arm64/kernel/probes/rethook_trampoline.S | 87 ++++++
arch/arm64/kernel/stacktrace.c | 7 -
arch/x86/Kconfig | 1
arch/x86/include/asm/unwind.h | 8 +
arch/x86/kernel/Makefile | 1
arch/x86/kernel/kprobes/common.h | 1
arch/x86/kernel/rethook.c | 115 ++++++++
include/linux/fprobe.h | 97 +++++++
include/linux/ftrace.h | 3
include/linux/kprobes.h | 3
include/linux/rethook.h | 100 +++++++
include/linux/sched.h | 3
kernel/exit.c | 2
kernel/fork.c | 3
kernel/trace/Kconfig | 26 ++
kernel/trace/Makefile | 2
kernel/trace/fprobe.c | 341 +++++++++++++++++++++++++
kernel/trace/ftrace.c | 58 ++++
kernel/trace/rethook.c | 313 +++++++++++++++++++++++
samples/Kconfig | 7 +
samples/Makefile | 1
samples/fprobe/Makefile | 3
samples/fprobe/fprobe_example.c | 120 +++++++++
34 files changed, 1572 insertions(+), 14 deletions(-)
create mode 100644 Documentation/trace/fprobe.rst
create mode 100644 arch/arm/probes/rethook.c
create mode 100644 arch/arm64/kernel/probes/rethook.c
create mode 100644 arch/arm64/kernel/probes/rethook_trampoline.S
create mode 100644 arch/x86/kernel/rethook.c
create mode 100644 include/linux/fprobe.h
create mode 100644 include/linux/rethook.h
create mode 100644 kernel/trace/fprobe.c
create mode 100644 kernel/trace/rethook.c
create mode 100644 samples/fprobe/Makefile
create mode 100644 samples/fprobe/fprobe_example.c
--
Masami Hiramatsu (Linaro) <[email protected]>
From: Jiri Olsa <[email protected]>
Adding ftrace_set_filter_ips function to be able to set filter on
multiple ip addresses at once.
With the kprobe multi attach interface we have cases where we need to
initialize ftrace_ops object with thousands of functions, so having
single function diving into ftrace_hash_move_and_update_ops with
ftrace_lock is faster.
The functions ips are passed as unsigned long array with count.
Signed-off-by: Jiri Olsa <[email protected]>
---
Changes in v6: [Masami]
- Fix a typo and add a comment.
---
include/linux/ftrace.h | 3 ++
kernel/trace/ftrace.c | 58 +++++++++++++++++++++++++++++++++++++++++-------
2 files changed, 52 insertions(+), 9 deletions(-)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9999e29187de..60847cbce0da 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -512,6 +512,8 @@ struct dyn_ftrace {
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset);
+int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+ unsigned int cnt, int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
@@ -802,6 +804,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
+#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b01e1fa62193..a28b1bdb234a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4958,7 +4958,7 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf,
}
static int
-ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
+__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
{
struct ftrace_func_entry *entry;
@@ -4976,9 +4976,30 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
return add_hash_entry(hash, ip);
}
+static int
+ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
+ unsigned int cnt, int remove)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < cnt; i++) {
+ err = __ftrace_match_addr(hash, ips[i], remove);
+ if (err) {
+ /*
+ * This expects the @hash is a temporary hash and if this
+ * fails the caller must free the @hash.
+ */
+ return err;
+ }
+ }
+ return 0;
+}
+
static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
- unsigned long ip, int remove, int reset, int enable)
+ unsigned long *ips, unsigned int cnt,
+ int remove, int reset, int enable)
{
struct ftrace_hash **orig_hash;
struct ftrace_hash *hash;
@@ -5008,8 +5029,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
ret = -EINVAL;
goto out_regex_unlock;
}
- if (ip) {
- ret = ftrace_match_addr(hash, ip, remove);
+ if (ips) {
+ ret = ftrace_match_addr(hash, ips, cnt, remove);
if (ret < 0)
goto out_regex_unlock;
}
@@ -5026,10 +5047,10 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
}
static int
-ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
- int reset, int enable)
+ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
+ int remove, int reset, int enable)
{
- return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
+ return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
@@ -5628,10 +5649,29 @@ int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset)
{
ftrace_ops_init(ops);
- return ftrace_set_addr(ops, ip, remove, reset, 1);
+ return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
+/**
+ * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
+ * @ops - the ops to set the filter with
+ * @ips - the array of addresses to add to or remove from the filter.
+ * @cnt - the number of addresses in @ips
+ * @remove - non zero to remove ips from the filter
+ * @reset - non zero to reset all filters before applying this filter.
+ *
+ * Filters denote which functions should be enabled when tracing is enabled
+ * If @ips array or any ip specified within is NULL , it fails to update filter.
+ */
+int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+ unsigned int cnt, int remove, int reset)
+{
+ ftrace_ops_init(ops);
+ return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
+}
+EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
+
/**
* ftrace_ops_set_global_filter - setup ops to use global filters
* @ops - the ops which will use the global filters
@@ -5653,7 +5693,7 @@ static int
ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
int reset, int enable)
{
- return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
+ return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
}
/**
Add a return hook framework which hooks the function return. Most of the
logic came from the kretprobe, but this is independent from kretprobe.
Note that this is expected to be used with other function entry hooking
feature, like ftrace, fprobe, adn kprobes. Eventually this will replace
the kretprobe (e.g. kprobe + rethook = kretprobe), but at this moment,
this is just an additional hook.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
Changes in v6:
- Fix typos.
- Use dereference_symbol_descriptor() to check the trampoline address.
- Shrink down the preempt-disabled section for recycling nodes.
- Reject stack searching if the task is not current and is running in
rethook_find_ret_addr().
Changes in v4:
- Fix rethook_trampoline_handler() loops as same as
what currently kretprobe does. This will fix some
stacktrace issue in the rethook handler.
---
include/linux/rethook.h | 100 +++++++++++++++
include/linux/sched.h | 3
kernel/exit.c | 2
kernel/fork.c | 3
kernel/trace/Kconfig | 11 ++
kernel/trace/Makefile | 1
kernel/trace/rethook.c | 313 +++++++++++++++++++++++++++++++++++++++++++++++
7 files changed, 433 insertions(+)
create mode 100644 include/linux/rethook.h
create mode 100644 kernel/trace/rethook.c
diff --git a/include/linux/rethook.h b/include/linux/rethook.h
new file mode 100644
index 000000000000..6ff745d8f784
--- /dev/null
+++ b/include/linux/rethook.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Return hooking with list-based shadow stack.
+ */
+#ifndef _LINUX_RETHOOK_H
+#define _LINUX_RETHOOK_H
+
+#include <linux/compiler.h>
+#include <linux/freelist.h>
+#include <linux/kallsyms.h>
+#include <linux/llist.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
+
+struct rethook_node;
+
+typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs *);
+
+/**
+ * struct rethook - The rethook management data structure.
+ * @data: The user-defined data storage.
+ * @handler: The user-defined return hook handler.
+ * @pool: The pool of struct rethook_node.
+ * @ref: The reference counter.
+ * @rcu: The rcu_head for deferred freeing.
+ *
+ * Don't embed to another data structure, because this is a self-destructive
+ * data structure when all rethook_node are freed.
+ */
+struct rethook {
+ void *data;
+ rethook_handler_t handler;
+ struct freelist_head pool;
+ refcount_t ref;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct rethook_node - The rethook shadow-stack entry node.
+ * @freelist: The freelist, linked to struct rethook::pool.
+ * @rcu: The rcu_head for deferred freeing.
+ * @llist: The llist, linked to a struct task_struct::rethooks.
+ * @rethook: The pointer to the struct rethook.
+ * @ret_addr: The storage for the real return address.
+ * @frame: The storage for the frame pointer.
+ *
+ * You can embed this to your extended data structure to store any data
+ * on each entry of the shadow stack.
+ */
+struct rethook_node {
+ union {
+ struct freelist_node freelist;
+ struct rcu_head rcu;
+ };
+ struct llist_node llist;
+ struct rethook *rethook;
+ unsigned long ret_addr;
+ unsigned long frame;
+};
+
+struct rethook *rethook_alloc(void *data, rethook_handler_t handler);
+void rethook_free(struct rethook *rh);
+void rethook_add_node(struct rethook *rh, struct rethook_node *node);
+struct rethook_node *rethook_try_get(struct rethook *rh);
+void rethook_recycle(struct rethook_node *node);
+void rethook_hook(struct rethook_node *node, struct pt_regs *regs);
+unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame,
+ struct llist_node **cur);
+
+/* Arch dependent code must implement arch_* and trampoline code */
+void arch_rethook_prepare(struct rethook_node *node, struct pt_regs *regs);
+void arch_rethook_trampoline(void);
+
+/**
+ * is_rethook_trampoline() - Check whether the address is rethook trampoline
+ * @addr: The address to be checked
+ *
+ * Return true if the @addr is the rethook trampoline address.
+ */
+static inline bool is_rethook_trampoline(unsigned long addr)
+{
+ return addr == (unsigned long)dereference_symbol_descriptor(arch_rethook_trampoline);
+}
+
+/* If the architecture needs to fixup the return address, implement it. */
+void arch_rethook_fixup_return(struct pt_regs *regs,
+ unsigned long correct_ret_addr);
+
+/* Generic trampoline handler, arch code must prepare asm stub */
+unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ unsigned long frame);
+
+#ifdef CONFIG_RETHOOK
+void rethook_flush_task(struct task_struct *tk);
+#else
+#define rethook_flush_task(tsk) do { } while (0)
+#endif
+
+#endif
+
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 78c351e35fec..2bfabf5355b7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1473,6 +1473,9 @@ struct task_struct {
#ifdef CONFIG_KRETPROBES
struct llist_head kretprobe_instances;
#endif
+#ifdef CONFIG_RETHOOK
+ struct llist_head rethooks;
+#endif
#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
/*
diff --git a/kernel/exit.c b/kernel/exit.c
index f702a6a63686..a39a321c1f37 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -64,6 +64,7 @@
#include <linux/compat.h>
#include <linux/io_uring.h>
#include <linux/kprobes.h>
+#include <linux/rethook.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -169,6 +170,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
kprobe_flush_task(tsk);
+ rethook_flush_task(tsk);
perf_event_delayed_put(tsk);
trace_sched_process_free(tsk);
put_task_struct(tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index 3244cc56b697..ffae38be64c4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2282,6 +2282,9 @@ static __latent_entropy struct task_struct *copy_process(
#ifdef CONFIG_KRETPROBES
p->kretprobe_instances.first = NULL;
#endif
+#ifdef CONFIG_RETHOOK
+ p->rethooks.first = NULL;
+#endif
/*
* Ensure that the cgroup subsystem policies allow the new process to be
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 043c8f6c4075..9e66fd29d94e 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -10,6 +10,17 @@ config USER_STACKTRACE_SUPPORT
config NOP_TRACER
bool
+config HAVE_RETHOOK
+ bool
+
+config RETHOOK
+ bool
+ depends on HAVE_RETHOOK
+ help
+ Enable generic return hooking feature. This is an internal
+ API, which will be used by other function-entry hooking
+ features like fprobe and kprobes.
+
config HAVE_FUNCTION_TRACER
bool
help
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 79255f9de9a4..c6f11a139eac 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o
obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o
obj-$(CONFIG_FPROBE) += fprobe.o
+obj-$(CONFIG_RETHOOK) += rethook.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
new file mode 100644
index 000000000000..e851d60feb7d
--- /dev/null
+++ b/kernel/trace/rethook.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "rethook: " fmt
+
+#include <linux/bug.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/preempt.h>
+#include <linux/rethook.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+/* Return hook list (shadow stack by list) */
+
+/*
+ * This function is called from delayed_put_task_struct() when a task is
+ * dead and cleaned up to recycle any kretprobe instances associated with
+ * this task. These left over instances represent probed functions that
+ * have been called but will never return.
+ */
+void rethook_flush_task(struct task_struct *tk)
+{
+ struct rethook_node *rhn;
+ struct llist_node *node;
+
+ node = __llist_del_all(&tk->rethooks);
+ while (node) {
+ rhn = container_of(node, struct rethook_node, llist);
+ node = node->next;
+ preempt_disable();
+ rethook_recycle(rhn);
+ preempt_enable();
+ }
+}
+
+static void rethook_free_rcu(struct rcu_head *head)
+{
+ struct rethook *rh = container_of(head, struct rethook, rcu);
+ struct rethook_node *rhn;
+ struct freelist_node *node;
+ int count = 1;
+
+ node = rh->pool.head;
+ while (node) {
+ rhn = container_of(node, struct rethook_node, freelist);
+ node = node->next;
+ kfree(rhn);
+ count++;
+ }
+
+ /* The rh->ref is the number of pooled node + 1 */
+ if (refcount_sub_and_test(count, &rh->ref))
+ kfree(rh);
+}
+
+/**
+ * rethook_free() - Free struct rethook.
+ * @rh: the struct rethook to be freed.
+ *
+ * Free the rethook. Before calling this function, user must ensure the
+ * @rh::data is cleaned if needed (or, the handler can access it after
+ * calling this function.) This function will set the @rh to be freed
+ * after all rethook_node are freed (not soon). And the caller must
+ * not touch @rh after calling this.
+ */
+void rethook_free(struct rethook *rh)
+{
+ rcu_assign_pointer(rh->handler, NULL);
+
+ call_rcu(&rh->rcu, rethook_free_rcu);
+}
+
+/**
+ * rethook_alloc() - Allocate struct rethook.
+ * @data: a data to pass the @handler when hooking the return.
+ * @handler: the return hook callback function.
+ *
+ * Allocate and initialize a new rethook with @data and @handler.
+ * Return NULL if memory allocation fails or @handler is NULL.
+ * Note that @handler == NULL means this rethook is going to be freed.
+ */
+struct rethook *rethook_alloc(void *data, rethook_handler_t handler)
+{
+ struct rethook *rh = kzalloc(sizeof(struct rethook), GFP_KERNEL);
+
+ if (!rh || !handler)
+ return NULL;
+
+ rh->data = data;
+ rh->handler = handler;
+ rh->pool.head = NULL;
+ refcount_set(&rh->ref, 1);
+
+ return rh;
+}
+
+/**
+ * rethook_add_node() - Add a new node to the rethook.
+ * @rh: the struct rethook.
+ * @node: the struct rethook_node to be added.
+ *
+ * Add @node to @rh. User must allocate @node (as a part of user's
+ * data structure.) The @node fields are initialized in this function.
+ */
+void rethook_add_node(struct rethook *rh, struct rethook_node *node)
+{
+ node->rethook = rh;
+ freelist_add(&node->freelist, &rh->pool);
+ refcount_inc(&rh->ref);
+}
+
+static void free_rethook_node_rcu(struct rcu_head *head)
+{
+ struct rethook_node *node = container_of(head, struct rethook_node, rcu);
+
+ if (refcount_dec_and_test(&node->rethook->ref))
+ kfree(node->rethook);
+ kfree(node);
+}
+
+/**
+ * rethook_recycle() - return the node to rethook.
+ * @node: The struct rethook_node to be returned.
+ *
+ * Return back the @node to @node::rethook. If the @node::rethook is already
+ * marked as freed, this will free the @node.
+ */
+void rethook_recycle(struct rethook_node *node)
+{
+ lockdep_assert_preemption_disabled();
+
+ if (likely(READ_ONCE(node->rethook->handler)))
+ freelist_add(&node->freelist, &node->rethook->pool);
+ else
+ call_rcu(&node->rcu, free_rethook_node_rcu);
+}
+NOKPROBE_SYMBOL(rethook_recycle);
+
+/**
+ * rethook_try_get() - get an unused rethook node.
+ * @rh: The struct rethook which pools the nodes.
+ *
+ * Get an unused rethook node from @rh. If the node pool is empty, this
+ * will return NULL. Caller must disable preemption.
+ */
+struct rethook_node *rethook_try_get(struct rethook *rh)
+{
+ rethook_handler_t handler = READ_ONCE(rh->handler);
+ struct freelist_node *fn;
+
+ lockdep_assert_preemption_disabled();
+
+ /* Check whether @rh is going to be freed. */
+ if (unlikely(!handler))
+ return NULL;
+
+ fn = freelist_try_get(&rh->pool);
+ if (!fn)
+ return NULL;
+
+ return container_of(fn, struct rethook_node, freelist);
+}
+NOKPROBE_SYMBOL(rethook_try_get);
+
+/**
+ * rethook_hook() - Hook the current function return.
+ * @node: The struct rethook node to hook the function return.
+ * @regs: The struct pt_regs for the function entry.
+ *
+ * Hook the current running function return. This must be called when the
+ * function entry (or at least @regs must be the registers of the function
+ * entry.)
+ */
+void rethook_hook(struct rethook_node *node, struct pt_regs *regs)
+{
+ arch_rethook_prepare(node, regs);
+ __llist_add(&node->llist, ¤t->rethooks);
+}
+NOKPROBE_SYMBOL(rethook_hook);
+
+/* This assumes the 'tsk' is the current task or is not running. */
+static unsigned long __rethook_find_ret_addr(struct task_struct *tsk,
+ struct llist_node **cur)
+{
+ struct rethook_node *rh = NULL;
+ struct llist_node *node = *cur;
+
+ if (!node)
+ node = tsk->rethooks.first;
+ else
+ node = node->next;
+
+ while (node) {
+ rh = container_of(node, struct rethook_node, llist);
+ if (rh->ret_addr != (unsigned long)arch_rethook_trampoline) {
+ *cur = node;
+ return rh->ret_addr;
+ }
+ node = node->next;
+ }
+ return 0;
+}
+NOKPROBE_SYMBOL(__rethook_find_ret_addr);
+
+/**
+ * rethook_find_ret_addr -- Find correct return address modified by rethook
+ * @tsk: Target task
+ * @frame: A frame pointer
+ * @cur: a storage of the loop cursor llist_node pointer for next call
+ *
+ * Find the correct return address modified by a rethook on @tsk in unsigned
+ * long type.
+ * The @tsk must be 'current' or a task which is not running. @frame is a hint
+ * to get the currect return address - which is compared with the
+ * rethook::frame field. The @cur is a loop cursor for searching the
+ * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the
+ * first call, but '@cur' itself must NOT NULL.
+ *
+ * Returns found address value or zero if not found.
+ */
+unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame,
+ struct llist_node **cur)
+{
+ struct rethook_node *rhn = NULL;
+ unsigned long ret;
+
+ if (WARN_ON_ONCE(!cur))
+ return 0;
+
+ if (WARN_ON_ONCE(tsk != current && task_is_running(tsk)))
+ return 0;
+
+ do {
+ ret = __rethook_find_ret_addr(tsk, cur);
+ if (!ret)
+ break;
+ rhn = container_of(*cur, struct rethook_node, llist);
+ } while (rhn->frame != frame);
+
+ return ret;
+}
+NOKPROBE_SYMBOL(rethook_find_ret_addr);
+
+void __weak arch_rethook_fixup_return(struct pt_regs *regs,
+ unsigned long correct_ret_addr)
+{
+ /*
+ * Do nothing by default. If the architecture which uses a
+ * frame pointer to record real return address on the stack,
+ * it should fill this function to fixup the return address
+ * so that stacktrace works from the rethook handler.
+ */
+}
+
+/* This function will be called from each arch-defined trampoline. */
+unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ unsigned long frame)
+{
+ struct llist_node *first, *node = NULL;
+ unsigned long correct_ret_addr;
+ rethook_handler_t handler;
+ struct rethook_node *rhn;
+
+ correct_ret_addr = __rethook_find_ret_addr(current, &node);
+ if (!correct_ret_addr) {
+ pr_err("rethook: Return address not found! Maybe there is a bug in the kernel\n");
+ BUG_ON(1);
+ }
+
+ instruction_pointer_set(regs, correct_ret_addr);
+
+ /*
+ * These loops must be protected from rethook_free_rcu() because those
+ * are accessing 'rhn->rethook'.
+ */
+ preempt_disable();
+
+ /*
+ * Run the handler on the shadow stack. Do not unlink the list here because
+ * stackdump inside the handlers needs to decode it.
+ */
+ first = current->rethooks.first;
+ while (first) {
+ rhn = container_of(first, struct rethook_node, llist);
+ if (WARN_ON_ONCE(rhn->frame != frame))
+ break;
+ handler = READ_ONCE(rhn->rethook->handler);
+ if (handler)
+ handler(rhn, rhn->rethook->data, regs);
+
+ if (first == node)
+ break;
+ first = first->next;
+ }
+
+ /* Fixup registers for returning to correct address. */
+ arch_rethook_fixup_return(regs, correct_ret_addr);
+
+ /* Unlink used shadow stack */
+ first = current->rethooks.first;
+ current->rethooks.first = node->next;
+ node->next = NULL;
+
+ while (first) {
+ rhn = container_of(first, struct rethook_node, llist);
+ first = first->next;
+ rethook_recycle(rhn);
+ }
+ preempt_enable();
+
+ return correct_ret_addr;
+}
+NOKPROBE_SYMBOL(rethook_trampoline_handler);
Add rethook arm implementation. Most of the code has been copied from
kretprobes on arm.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
Changes in v5:
- Fix build error when !CONFIG_KRETPROBES
---
arch/arm/Kconfig | 1 +
arch/arm/include/asm/stacktrace.h | 4 +-
arch/arm/kernel/stacktrace.c | 6 +++
arch/arm/probes/Makefile | 1 +
arch/arm/probes/rethook.c | 71 +++++++++++++++++++++++++++++++++++++
5 files changed, 81 insertions(+), 2 deletions(-)
create mode 100644 arch/arm/probes/rethook.c
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c2724d986fa0..2fe24bbca618 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -106,6 +106,7 @@ config ARM
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPTPROBES if !THUMB2_KERNEL
+ select HAVE_RETHOOK
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
index 8f54f9ad8a9b..babed1707ca8 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -14,7 +14,7 @@ struct stackframe {
unsigned long sp;
unsigned long lr;
unsigned long pc;
-#ifdef CONFIG_KRETPROBES
+#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK)
struct llist_node *kr_cur;
struct task_struct *tsk;
#endif
@@ -27,7 +27,7 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
frame->sp = regs->ARM_sp;
frame->lr = regs->ARM_lr;
frame->pc = regs->ARM_pc;
-#ifdef CONFIG_KRETPROBES
+#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK)
frame->kr_cur = NULL;
frame->tsk = current;
#endif
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 75e905508f27..f509c6be4f57 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
#include <linux/kprobes.h>
+#include <linux/rethook.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/stacktrace.h>
@@ -66,6 +67,11 @@ int notrace unwind_frame(struct stackframe *frame)
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
#endif
+#ifdef CONFIG_RETHOOK
+ if (is_rethook_trampoline(frame->pc))
+ frame->pc = rethook_find_ret_addr(frame->tsk, frame->fp,
+ &frame->kr_cur);
+#endif
#ifdef CONFIG_KRETPROBES
if (is_kretprobe_trampoline(frame->pc))
frame->pc = kretprobe_find_ret_addr(frame->tsk,
diff --git a/arch/arm/probes/Makefile b/arch/arm/probes/Makefile
index 8b0ea5ace100..10c083a22223 100644
--- a/arch/arm/probes/Makefile
+++ b/arch/arm/probes/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_KPROBES) += decode-thumb.o
else
obj-$(CONFIG_KPROBES) += decode-arm.o
endif
+obj-$(CONFIG_RETHOOK) += rethook.o
diff --git a/arch/arm/probes/rethook.c b/arch/arm/probes/rethook.c
new file mode 100644
index 000000000000..adc16cdf358a
--- /dev/null
+++ b/arch/arm/probes/rethook.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arm implementation of rethook. Mostly copied from arch/arm/probes/kprobes/core.c
+ */
+
+#include <linux/kprobes.h>
+#include <linux/rethook.h>
+
+/* Called from arch_rethook_trampoline */
+static __used unsigned long arch_rethook_trampoline_callback(struct pt_regs *regs)
+{
+ return rethook_trampoline_handler(regs, regs->ARM_fp);
+}
+NOKPROBE_SYMBOL(arch_rethook_trampoline_callback);
+
+/*
+ * When a rethook'ed function returns, it returns to arch_rethook_trampoline
+ * which calls rethook callback. We construct a struct pt_regs to
+ * give a view of registers r0-r11, sp, lr, and pc to the user
+ * return-handler. This is not a complete pt_regs structure, but that
+ * should be enough for stacktrace from the return handler with or
+ * without pt_regs.
+ */
+void __naked arch_rethook_trampoline(void)
+{
+ __asm__ __volatile__ (
+#ifdef CONFIG_FRAME_POINTER
+ "ldr lr, =arch_rethook_trampoline \n\t"
+ /* this makes a framepointer on pt_regs. */
+#ifdef CONFIG_CC_IS_CLANG
+ "stmdb sp, {sp, lr, pc} \n\t"
+ "sub sp, sp, #12 \n\t"
+ /* In clang case, pt_regs->ip = lr. */
+ "stmdb sp!, {r0 - r11, lr} \n\t"
+ /* fp points regs->r11 (fp) */
+ "add fp, sp, #44 \n\t"
+#else /* !CONFIG_CC_IS_CLANG */
+ /* In gcc case, pt_regs->ip = fp. */
+ "stmdb sp, {fp, sp, lr, pc} \n\t"
+ "sub sp, sp, #16 \n\t"
+ "stmdb sp!, {r0 - r11} \n\t"
+ /* fp points regs->r15 (pc) */
+ "add fp, sp, #60 \n\t"
+#endif /* CONFIG_CC_IS_CLANG */
+#else /* !CONFIG_FRAME_POINTER */
+ "sub sp, sp, #16 \n\t"
+ "stmdb sp!, {r0 - r11} \n\t"
+#endif /* CONFIG_FRAME_POINTER */
+ "mov r0, sp \n\t"
+ "bl arch_rethook_trampoline_callback \n\t"
+ "mov lr, r0 \n\t"
+ "ldmia sp!, {r0 - r11} \n\t"
+ "add sp, sp, #16 \n\t"
+#ifdef CONFIG_THUMB2_KERNEL
+ "bx lr \n\t"
+#else
+ "mov pc, lr \n\t"
+#endif
+ : : : "memory");
+}
+NOKPROBE_SYMBOL(arch_rethook_trampoline);
+
+void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs)
+{
+ rh->ret_addr = regs->ARM_lr;
+ rh->frame = regs->ARM_fp;
+
+ /* Replace the return addr with trampoline addr. */
+ regs->ARM_lr = (unsigned long)arch_rethook_trampoline;
+}
+NOKPROBE_SYMBOL(arch_rethook_prepare);
Add rethook arm64 implementation. Most of the code has been copied from
kretprobes on arm64.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
Changes in v5:
- Add description.
- Fix build error if !CONFIG_KRETPROBES
---
arch/arm64/Kconfig | 1
arch/arm64/include/asm/stacktrace.h | 2 -
arch/arm64/kernel/probes/Makefile | 1
arch/arm64/kernel/probes/rethook.c | 25 +++++++
arch/arm64/kernel/probes/rethook_trampoline.S | 87 +++++++++++++++++++++++++
arch/arm64/kernel/stacktrace.c | 7 ++
6 files changed, 121 insertions(+), 2 deletions(-)
create mode 100644 arch/arm64/kernel/probes/rethook.c
create mode 100644 arch/arm64/kernel/probes/rethook_trampoline.S
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c4207cf9bb17..c706ed25ea50 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -201,6 +201,7 @@ config ARM64
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
select HAVE_KRETPROBES
+ select HAVE_RETHOOK
select HAVE_GENERIC_VDSO
select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 6564a01cc085..1cc472eb4852 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -54,7 +54,7 @@ struct stackframe {
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
unsigned long prev_fp;
enum stack_type prev_type;
-#ifdef CONFIG_KRETPROBES
+#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK)
struct llist_node *kr_cur;
#endif
};
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
index 8e4be92e25b1..24e689f44c32 100644
--- a/arch/arm64/kernel/probes/Makefile
+++ b/arch/arm64/kernel/probes/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
simulate-insn.o
obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \
simulate-insn.o
+obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o
diff --git a/arch/arm64/kernel/probes/rethook.c b/arch/arm64/kernel/probes/rethook.c
new file mode 100644
index 000000000000..38c33c81438b
--- /dev/null
+++ b/arch/arm64/kernel/probes/rethook.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic return hook for arm64.
+ * Most of the code is copied from arch/arm64/kernel/probes/kprobes.c
+ */
+
+#include <linux/kprobes.h>
+#include <linux/rethook.h>
+
+/* This is called from arch_rethook_trampoline() */
+unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs)
+{
+ return rethook_trampoline_handler(regs, regs->regs[29]);
+}
+NOKPROBE_SYMBOL(arch_rethook_trampoline_callback);
+
+void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs)
+{
+ rhn->ret_addr = regs->regs[30];
+ rhn->frame = regs->regs[29];
+
+ /* replace return addr (x30) with trampoline */
+ regs->regs[30] = (u64)arch_rethook_trampoline;
+}
+NOKPROBE_SYMBOL(arch_rethook_prepare);
diff --git a/arch/arm64/kernel/probes/rethook_trampoline.S b/arch/arm64/kernel/probes/rethook_trampoline.S
new file mode 100644
index 000000000000..610f520ee72b
--- /dev/null
+++ b/arch/arm64/kernel/probes/rethook_trampoline.S
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * trampoline entry and return code for rethook.
+ * Copied from arch/arm64/kernel/probes/kprobes_trampoline.S
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+
+ .text
+
+ .macro save_all_base_regs
+ stp x0, x1, [sp, #S_X0]
+ stp x2, x3, [sp, #S_X2]
+ stp x4, x5, [sp, #S_X4]
+ stp x6, x7, [sp, #S_X6]
+ stp x8, x9, [sp, #S_X8]
+ stp x10, x11, [sp, #S_X10]
+ stp x12, x13, [sp, #S_X12]
+ stp x14, x15, [sp, #S_X14]
+ stp x16, x17, [sp, #S_X16]
+ stp x18, x19, [sp, #S_X18]
+ stp x20, x21, [sp, #S_X20]
+ stp x22, x23, [sp, #S_X22]
+ stp x24, x25, [sp, #S_X24]
+ stp x26, x27, [sp, #S_X26]
+ stp x28, x29, [sp, #S_X28]
+ add x0, sp, #PT_REGS_SIZE
+ stp lr, x0, [sp, #S_LR]
+ /*
+ * Construct a useful saved PSTATE
+ */
+ mrs x0, nzcv
+ mrs x1, daif
+ orr x0, x0, x1
+ mrs x1, CurrentEL
+ orr x0, x0, x1
+ mrs x1, SPSel
+ orr x0, x0, x1
+ stp xzr, x0, [sp, #S_PC]
+ .endm
+
+ .macro restore_all_base_regs
+ ldr x0, [sp, #S_PSTATE]
+ and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT)
+ msr nzcv, x0
+ ldp x0, x1, [sp, #S_X0]
+ ldp x2, x3, [sp, #S_X2]
+ ldp x4, x5, [sp, #S_X4]
+ ldp x6, x7, [sp, #S_X6]
+ ldp x8, x9, [sp, #S_X8]
+ ldp x10, x11, [sp, #S_X10]
+ ldp x12, x13, [sp, #S_X12]
+ ldp x14, x15, [sp, #S_X14]
+ ldp x16, x17, [sp, #S_X16]
+ ldp x18, x19, [sp, #S_X18]
+ ldp x20, x21, [sp, #S_X20]
+ ldp x22, x23, [sp, #S_X22]
+ ldp x24, x25, [sp, #S_X24]
+ ldp x26, x27, [sp, #S_X26]
+ ldp x28, x29, [sp, #S_X28]
+ .endm
+
+SYM_CODE_START(arch_rethook_trampoline)
+ sub sp, sp, #PT_REGS_SIZE
+
+ save_all_base_regs
+
+ /* Setup a frame pointer. */
+ add x29, sp, #S_FP
+
+ mov x0, sp
+ bl arch_rethook_trampoline_callback
+ /*
+ * Replace trampoline address in lr with actual orig_ret_addr return
+ * address.
+ */
+ mov lr, x0
+
+ /* The frame pointer (x29) is restored with other registers. */
+ restore_all_base_regs
+
+ add sp, sp, #PT_REGS_SIZE
+ ret
+
+SYM_CODE_END(arch_rethook_trampoline)
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 94f83cd44e50..8e2c211b1187 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -8,6 +8,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/kprobes.h>
+#include <linux/rethook.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
@@ -38,7 +39,7 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
{
frame->fp = fp;
frame->pc = pc;
-#ifdef CONFIG_KRETPROBES
+#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK)
frame->kr_cur = NULL;
#endif
@@ -136,6 +137,10 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
if (is_kretprobe_trampoline(frame->pc))
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
#endif
+#ifdef CONFIG_RETHOOK
+ if (is_rethook_trampoline(frame->pc))
+ frame->pc = rethook_find_ret_addr(tsk, frame->fp, &frame->kr_cur);
+#endif
return 0;
}
Add a documentation of fprobe for the user who needs
this interface.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
Changes in v7:
- Clarify unregister_fprobe() guarantee the callbacks will no
longer being called after that.
- Fix some wording.
Changes in v6:
- Update document according to the latest spec.
---
Documentation/trace/fprobe.rst | 171 ++++++++++++++++++++++++++++++++++++++++
Documentation/trace/index.rst | 1
2 files changed, 172 insertions(+)
create mode 100644 Documentation/trace/fprobe.rst
diff --git a/Documentation/trace/fprobe.rst b/Documentation/trace/fprobe.rst
new file mode 100644
index 000000000000..4275e95e16bc
--- /dev/null
+++ b/Documentation/trace/fprobe.rst
@@ -0,0 +1,171 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
+Fprobe - Function entry/exit probe
+==================================
+
+.. Author: Masami Hiramatsu <[email protected]>
+
+Introduction
+============
+
+Instead of using ftrace full feature, if you only want to attach callbacks
+on function entry and exit, similar to the kprobes and kretprobes, you can
+use fprobe. Compared with kprobes and kretprobes, fprobe gives faster
+instrumentation for multiple functions with single handler. This document
+describes how to use fprobe.
+
+The usage of fprobe
+===================
+
+The fprobe is a wrapper of ftrace (+ kretprobe-like return callback) to
+attach callbacks to multiple function entry and exit. User needs to set up
+the `struct fprobe` and pass it to `register_fprobe()`.
+
+Typically, `fprobe` data structure is initialized with the `entry_handler`
+and/or `exit_handler` as below.
+
+.. code-block:: c
+
+ struct fprobe fp = {
+ .entry_handler = my_entry_callback,
+ .exit_handler = my_exit_callback,
+ };
+
+To enable the fprobe, call one of register_fprobe(), register_fprobe_ips(), and
+register_fprobe_syms(). These register the fprobe with different type of
+parameters.
+
+The register_fprobe() enables a fprobe by function-name filters.
+E.g. this enables @fp on "func*()" function except "func2()".::
+
+ register_fprobe(&fp, "func*", "func2");
+
+The register_fprobe_ips() enables a fprobe by ftrace-location addresses.
+E.g.
+
+.. code-block:: c
+
+ unsigned long ips[] = { 0x.... };
+
+ register_fprobe_ips(&fp, ips, ARRAY_SIZE(ips));
+
+And the register_fprobe_syms() enables a fprobe by symbol names.
+E.g.
+
+.. code-block:: c
+
+ char syms[] = {"func1", "func2", "func3"};
+
+ register_fprobe_syms(&fp, syms, ARRAY_SIZE(syms));
+
+To disable (remove from functions) this fprobe, call::
+
+ unregister_fprobe(&fp);
+
+You can temporally (soft) disable the fprobe by::
+
+ disable_fprobe(&fp);
+
+and resume by::
+
+ enable_fprobe(&fp);
+
+The above is defined by including the header::
+
+ #include <linux/fprobe.h>
+
+Same as ftrace, the registered callback will start being called some time
+after the register_fprobe() is called and before it returns. See
+:file:`Documentation/trace/ftrace.rst`.
+
+Also, the unregister_fprobe() will guarantee that the both enter and exit
+handlers are no longer being called by functions after unregister_fprobe()
+returns as same as unregister_ftrace_function().
+
+The fprobe entry/exit handler
+=============================
+
+The prototype of the entry/exit callback function is as follows:
+
+.. code-block:: c
+
+ void callback_func(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
+
+Note that both entry and exit callback has same ptototype. The @entry_ip is
+saved at function entry and passed to exit handler.
+
+@fp
+ This is the address of `fprobe` data structure related to this handler.
+ You can embed the `fprobe` to your data structure and get it by
+ container_of() macro from @fp. The @fp must not be NULL.
+
+@entry_ip
+ This is the entry address of the traced function (both entry and exit).
+
+@regs
+ This is the `pt_regs` data structure at the entry and exit. Note that
+ the instruction pointer of @regs may be different from the @entry_ip
+ in the entry_handler. If you need traced instruction pointer, you need
+ to use @entry_ip. On the other hand, in the exit_handler, the instruction
+ pointer of @regs is set to the currect return address.
+
+Share the callbacks with kprobes
+================================
+
+Since the recursion safety of the fprobe (and ftrace) is a bit different
+from the kprobes, this may cause an issue if user wants to run the same
+code from the fprobe and the kprobes.
+
+The kprobes has per-cpu 'current_kprobe' variable which protects the
+kprobe handler from recursion in any case. On the other hand, the fprobe
+uses only ftrace_test_recursion_trylock(), which will allow interrupt
+context calls another (or same) fprobe during the fprobe user handler is
+running.
+
+This is not a matter in cases if the common callback shared among the
+kprobes and the fprobe has its own recursion detection, or it can handle
+the recursion in the different contexts (normal/interrupt/NMI.)
+But if it relies on the 'current_kprobe' recursion lock, it has to check
+kprobe_running() and use kprobe_busy_*() APIs.
+
+Fprobe has FPROBE_FL_KPROBE_SHARED flag to do this. If your common callback
+code will be shared with kprobes, please set FPROBE_FL_KPROBE_SHARED
+*before* registering the fprobe, like:
+
+.. code-block:: c
+
+ fprobe.flags = FPROBE_FL_KPROBE_SHARED;
+
+ register_fprobe(&fprobe, "func*", NULL);
+
+This will protect your common callback from the nested call.
+
+The missed counter
+==================
+
+The `fprobe` data structure has `fprobe::nmissed` counter field as same as
+kprobes.
+This counter counts up when;
+
+ - fprobe fails to take ftrace_recursion lock. This usually means that a function
+ which is traced by other ftrace users is called from the entry_handler.
+
+ - fprobe fails to setup the function exit because of the shortage of rethook
+ (the shadow stack for hooking the function return.)
+
+Since `fprobe::nmissed` field is counted up in both case, the former case
+will skip both of entry and exit callback, and the latter case will skip exit
+callback, but in both case the counter is just increased by 1.
+
+Note that if you set the FTRACE_OPS_FL_RECURSION and/or FTRACE_OPS_FL_RCU to
+`fprobe::ops::flags` (ftrace_ops::flags) when registering the fprobe, this
+counter may not work correctly, because those will skip fprobe's callback.
+
+
+Functions and structures
+========================
+
+.. kernel-doc:: include/linux/fprobe.h
+.. kernel-doc:: kernel/trace/fprobe.c
+
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index 3769b9b7aed8..b9f3757f8269 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -9,6 +9,7 @@ Linux Tracing Technologies
tracepoint-analysis
ftrace
ftrace-uses
+ fprobe
kprobes
kprobetrace
uprobetracer
Add exit_handler to fprobe. fprobe + rethook allows us to hook the kernel
function return. The rethook will be enabled only if the
fprobe::exit_handler is set.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
Changes in v7:
- Fix unregister_fprobe() to ensure the rethook handlers are
finished when it returns.
- Update Kconfig help.
Changes in v6:
- Update according to the fprobe update.
Changes in v5:
- Add dependency for HAVE_RETHOOK.
Changes in v4:
- Check fprobe is disabled in the exit handler.
Changes in v3:
- Make sure to clear rethook->data before free.
- Handler checks the data is not NULL.
- Free rethook only if the rethook is using.
---
include/linux/fprobe.h | 6 ++
kernel/trace/Kconfig | 9 ++-
kernel/trace/fprobe.c | 130 ++++++++++++++++++++++++++++++++++++++++++++----
3 files changed, 130 insertions(+), 15 deletions(-)
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
index b920dc1b2969..acfdcc37acf6 100644
--- a/include/linux/fprobe.h
+++ b/include/linux/fprobe.h
@@ -5,19 +5,25 @@
#include <linux/compiler.h>
#include <linux/ftrace.h>
+#include <linux/rethook.h>
/**
* struct fprobe - ftrace based probe.
* @ops: The ftrace_ops.
* @nmissed: The counter for missing events.
* @flags: The status flag.
+ * @rethook: The rethook data structure. (internal data)
* @entry_handler: The callback function for function entry.
+ * @exit_handler: The callback function for function exit.
*/
struct fprobe {
struct ftrace_ops ops;
unsigned long nmissed;
unsigned int flags;
+ struct rethook *rethook;
+
void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
+ void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
};
#define FPROBE_FL_DISABLED 1
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 9e66fd29d94e..1a0f42561bf7 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -245,11 +245,14 @@ config FPROBE
bool "Kernel Function Probe (fprobe)"
depends on FUNCTION_TRACER
depends on DYNAMIC_FTRACE_WITH_REGS
+ depends on HAVE_RETHOOK
+ select RETHOOK
default n
help
- This option enables kernel function probe (fprobe) based on ftrace,
- which is similar to kprobes, but probes only for kernel function
- entries and it can probe multiple functions by one fprobe.
+ This option enables kernel function probe (fprobe) based on ftrace.
+ The fprobe is similar to kprobes, but probes only for kernel function
+ entries and exits. This also can probe multiple functions by one
+ fprobe.
If unsure, say N.
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index b5d4f8baaf43..d733c0d9cb09 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -8,12 +8,22 @@
#include <linux/fprobe.h>
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
+#include <linux/rethook.h>
#include <linux/slab.h>
#include <linux/sort.h>
+#include "trace.h"
+
+struct fprobe_rethook_node {
+ struct rethook_node node;
+ unsigned long entry_ip;
+};
+
static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
+ struct fprobe_rethook_node *fpr;
+ struct rethook_node *rh;
struct fprobe *fp;
int bit;
@@ -30,10 +40,37 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
if (fp->entry_handler)
fp->entry_handler(fp, ip, ftrace_get_regs(fregs));
+ if (fp->exit_handler) {
+ rh = rethook_try_get(fp->rethook);
+ if (!rh) {
+ fp->nmissed++;
+ goto out;
+ }
+ fpr = container_of(rh, struct fprobe_rethook_node, node);
+ fpr->entry_ip = ip;
+ rethook_hook(rh, ftrace_get_regs(fregs));
+ }
+
+out:
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(fprobe_handler);
+static void fprobe_exit_handler(struct rethook_node *rh, void *data,
+ struct pt_regs *regs)
+{
+ struct fprobe *fp = (struct fprobe *)data;
+ struct fprobe_rethook_node *fpr;
+
+ if (!fp || fprobe_disabled(fp))
+ return;
+
+ fpr = container_of(rh, struct fprobe_rethook_node, node);
+
+ fp->exit_handler(fp, fpr->entry_ip, regs);
+}
+NOKPROBE_SYMBOL(fprobe_exit_handler);
+
/* Convert ftrace location address from symbols */
static unsigned long *get_ftrace_locations(const char **syms, int num)
{
@@ -76,6 +113,48 @@ static void fprobe_init(struct fprobe *fp)
fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS;
}
+static int fprobe_init_rethook(struct fprobe *fp, int num)
+{
+ int i, size;
+
+ if (num < 0)
+ return -EINVAL;
+
+ if (!fp->exit_handler) {
+ fp->rethook = NULL;
+ return 0;
+ }
+
+ /* Initialize rethook if needed */
+ size = num * num_possible_cpus() * 2;
+ if (size < 0)
+ return -E2BIG;
+
+ fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
+ for (i = 0; i < size; i++) {
+ struct rethook_node *node;
+
+ node = kzalloc(sizeof(struct fprobe_rethook_node), GFP_KERNEL);
+ if (!node) {
+ rethook_free(fp->rethook);
+ fp->rethook = NULL;
+ return -ENOMEM;
+ }
+ rethook_add_node(fp->rethook, node);
+ }
+ return 0;
+}
+
+static void fprobe_fail_cleanup(struct fprobe *fp)
+{
+ if (fp->rethook) {
+ /* Don't need to cleanup rethook->handler because this is not used. */
+ rethook_free(fp->rethook);
+ fp->rethook = NULL;
+ }
+ ftrace_free_filter(&fp->ops);
+}
+
/**
* register_fprobe() - Register fprobe to ftrace by pattern.
* @fp: A fprobe data structure to be registered.
@@ -89,6 +168,7 @@ static void fprobe_init(struct fprobe *fp)
*/
int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
{
+ struct ftrace_hash *hash;
unsigned char *str;
int ret, len;
@@ -113,10 +193,21 @@ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter
goto out;
}
- ret = register_ftrace_function(&fp->ops);
+ /* TODO:
+ * correctly calculate the total number of filtered symbols
+ * from both filter and notfilter.
+ */
+ hash = fp->ops.local_hash.filter_hash;
+ if (WARN_ON_ONCE(!hash))
+ goto out;
+
+ ret = fprobe_init_rethook(fp, (int)hash->count);
+ if (!ret)
+ ret = register_ftrace_function(&fp->ops);
+
out:
if (ret)
- ftrace_free_filter(&fp->ops);
+ fprobe_fail_cleanup(fp);
return ret;
}
EXPORT_SYMBOL_GPL(register_fprobe);
@@ -144,12 +235,15 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
fprobe_init(fp);
ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = fprobe_init_rethook(fp, num);
if (!ret)
ret = register_ftrace_function(&fp->ops);
if (ret)
- ftrace_free_filter(&fp->ops);
-
+ fprobe_fail_cleanup(fp);
return ret;
}
EXPORT_SYMBOL_GPL(register_fprobe_ips);
@@ -180,14 +274,16 @@ int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
return PTR_ERR(addrs);
ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0);
+ kfree(addrs);
if (ret)
- goto out;
- ret = register_ftrace_function(&fp->ops);
- if (ret)
- ftrace_free_filter(&fp->ops);
+ return ret;
-out:
- kfree(addrs);
+ ret = fprobe_init_rethook(fp, num);
+ if (!ret)
+ ret = register_ftrace_function(&fp->ops);
+
+ if (ret)
+ fprobe_fail_cleanup(fp);
return ret;
}
@@ -208,10 +304,20 @@ int unregister_fprobe(struct fprobe *fp)
if (!fp || fp->ops.func != fprobe_handler)
return -EINVAL;
+ /*
+ * rethook_free() starts disabling the rethook, but the rethook handlers
+ * may be running on other processors at this point. To make sure that all
+ * current running handlers are finished, call unregister_ftrace_function()
+ * after this.
+ */
+ if (fp->rethook)
+ rethook_free(fp->rethook);
+
ret = unregister_ftrace_function(&fp->ops);
+ if (ret < 0)
+ return ret;
- if (!ret)
- ftrace_free_filter(&fp->ops);
+ ftrace_free_filter(&fp->ops);
return ret;
}
Introduce FPROBE_FL_KPROBE_SHARED flag for sharing fprobe callback with
kprobes safely from the viewpoint of recursion.
Since the recursion safety of the fprobe (and ftrace) is a bit different
from the kprobes, this may cause an issue if user wants to run the same
code from the fprobe and the kprobes.
The kprobes has per-cpu 'current_kprobe' variable which protects the
kprobe handler from recursion in any case. On the other hand, the fprobe
uses only ftrace_test_recursion_trylock(), which will allow interrupt
context calls another (or same) fprobe during the fprobe user handler is
running.
This is not a matter in cases if the common callback shared among the
kprobes and the fprobe has its own recursion detection, or it can handle
the recursion in the different contexts (normal/interrupt/NMI.)
But if it relies on the 'current_kprobe' recursion lock, it has to check
kprobe_running() and use kprobe_busy_*() APIs.
Fprobe has FPROBE_FL_KPROBE_SHARED flag to do this. If your common callback
code will be shared with kprobes, please set FPROBE_FL_KPROBE_SHARED
*before* registering the fprobe, like;
fprobe.flags = FPROBE_FL_KPROBE_SHARED;
register_fprobe(&fprobe, "func*", NULL);
This will protect your common callback from the nested call.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
include/linux/fprobe.h | 12 ++++++++++++
include/linux/kprobes.h | 3 +++
kernel/trace/fprobe.c | 19 ++++++++++++++++++-
3 files changed, 33 insertions(+), 1 deletion(-)
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
index acfdcc37acf6..94b9386d7267 100644
--- a/include/linux/fprobe.h
+++ b/include/linux/fprobe.h
@@ -26,13 +26,25 @@ struct fprobe {
void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
};
+/* This fprobe is soft-disabled. */
#define FPROBE_FL_DISABLED 1
+/*
+ * This fprobe handler will be shared with kprobes.
+ * This flag must be set before registering.
+ */
+#define FPROBE_FL_KPROBE_SHARED 2
+
static inline bool fprobe_disabled(struct fprobe *fp)
{
return (fp) ? fp->flags & FPROBE_FL_DISABLED : false;
}
+static inline bool fprobe_shared_with_kprobes(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false;
+}
+
#ifdef CONFIG_FPROBE
int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter);
int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 8c8f7a4d93af..efe4fc364f6a 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -433,6 +433,9 @@ static inline struct kprobe *kprobe_running(void)
{
return NULL;
}
+#define kprobe_busy_begin() do {} while (0)
+#define kprobe_busy_end() do {} while (0)
+
static inline int register_kprobe(struct kprobe *p)
{
return -EOPNOTSUPP;
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index d733c0d9cb09..4881b98f35ab 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -56,6 +56,20 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
}
NOKPROBE_SYMBOL(fprobe_handler);
+static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+ struct fprobe *fp = container_of(ops, struct fprobe, ops);
+
+ if (unlikely(kprobe_running())) {
+ fp->nmissed++;
+ return;
+ }
+ kprobe_busy_begin();
+ fprobe_handler(ip, parent_ip, ops, fregs);
+ kprobe_busy_end();
+}
+
static void fprobe_exit_handler(struct rethook_node *rh, void *data,
struct pt_regs *regs)
{
@@ -109,7 +123,10 @@ static unsigned long *get_ftrace_locations(const char **syms, int num)
static void fprobe_init(struct fprobe *fp)
{
fp->nmissed = 0;
- fp->ops.func = fprobe_handler;
+ if (fprobe_shared_with_kprobes(fp))
+ fp->ops.func = fprobe_kprobe_handler;
+ else
+ fp->ops.func = fprobe_handler;
fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS;
}
Add a sample program for the fprobe. The sample_fprobe puts a fprobe on
kernel_clone() by default. This dump stack and some called address info
at the function entry and exit.
The sample_fprobe.ko gets 2 parameters.
- symbol: you can specify the comma separated symbols or wildcard symbol
pattern (in this case you can not use comma)
- stackdump: a bool value to enable or disable stack dump in the fprobe
handler.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
Changes in v6:
- Dump stack on the handler as explained in the comment.
- Add "stackdump" option to enable/disable stackdump.
- Support wildcard filter.
Changes in v2:
- Fix infinit loop for multiple symbols.
- Fix memory leaks for copied string and entry array.
- Update for new fprobe APIs.
- Fix style issues.
---
samples/Kconfig | 7 ++
samples/Makefile | 1
samples/fprobe/Makefile | 3 +
samples/fprobe/fprobe_example.c | 120 +++++++++++++++++++++++++++++++++++++++
4 files changed, 131 insertions(+)
create mode 100644 samples/fprobe/Makefile
create mode 100644 samples/fprobe/fprobe_example.c
diff --git a/samples/Kconfig b/samples/Kconfig
index 43d2e9aa557f..e010c2c1256c 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -73,6 +73,13 @@ config SAMPLE_HW_BREAKPOINT
help
This builds kernel hardware breakpoint example modules.
+config SAMPLE_FPROBE
+ tristate "Build fprobe examples -- loadable modules only"
+ depends on FPROBE && m
+ help
+ This builds a fprobe example module. This module has an option 'symbol'.
+ You can specify a probed symbol or symbols separated with ','.
+
config SAMPLE_KFIFO
tristate "Build kfifo examples -- loadable modules only"
depends on m
diff --git a/samples/Makefile b/samples/Makefile
index 4bcd6b93bffa..4f73fe7aa473 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_SAMPLE_INTEL_MEI) += mei/
subdir-$(CONFIG_SAMPLE_WATCHDOG) += watchdog
subdir-$(CONFIG_SAMPLE_WATCH_QUEUE) += watch_queue
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak/
+obj-$(CONFIG_SAMPLE_FPROBE) += fprobe/
diff --git a/samples/fprobe/Makefile b/samples/fprobe/Makefile
new file mode 100644
index 000000000000..ecccbfa6e99b
--- /dev/null
+++ b/samples/fprobe/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_SAMPLE_FPROBE) += fprobe_example.o
diff --git a/samples/fprobe/fprobe_example.c b/samples/fprobe/fprobe_example.c
new file mode 100644
index 000000000000..24d3cf109140
--- /dev/null
+++ b/samples/fprobe/fprobe_example.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Here's a sample kernel module showing the use of fprobe to dump a
+ * stack trace and selected registers when kernel_clone() is called.
+ *
+ * For more information on theory of operation of kprobes, see
+ * Documentation/trace/kprobes.rst
+ *
+ * You will see the trace data in /var/log/messages and on the console
+ * whenever kernel_clone() is invoked to create a new process.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fprobe.h>
+#include <linux/sched/debug.h>
+#include <linux/slab.h>
+
+#define BACKTRACE_DEPTH 16
+#define MAX_SYMBOL_LEN 4096
+struct fprobe sample_probe;
+
+static char symbol[MAX_SYMBOL_LEN] = "kernel_clone";
+module_param_string(symbol, symbol, sizeof(symbol), 0644);
+static char nosymbol[MAX_SYMBOL_LEN] = "";
+module_param_string(nosymbol, nosymbol, sizeof(nosymbol), 0644);
+static bool stackdump = true;
+module_param(stackdump, bool, 0644);
+
+static void show_backtrace(void)
+{
+ unsigned long stacks[BACKTRACE_DEPTH];
+ unsigned int len;
+
+ len = stack_trace_save(stacks, BACKTRACE_DEPTH, 2);
+ stack_trace_print(stacks, len, 24);
+}
+
+static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
+{
+ pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
+ if (stackdump)
+ show_backtrace();
+}
+
+static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
+{
+ unsigned long rip = instruction_pointer(regs);
+
+ pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
+ (void *)ip, (void *)ip, (void *)rip, (void *)rip);
+ if (stackdump)
+ show_backtrace();
+}
+
+static int __init fprobe_init(void)
+{
+ char *p, *symbuf = NULL;
+ const char **syms;
+ int ret, count, i;
+
+ sample_probe.entry_handler = sample_entry_handler;
+ sample_probe.exit_handler = sample_exit_handler;
+
+ if (strchr(symbol, '*')) {
+ /* filter based fprobe */
+ ret = register_fprobe(&sample_probe, symbol,
+ nosymbol[0] == '\0' ? NULL : nosymbol);
+ goto out;
+ } else if (!strchr(symbol, ',')) {
+ symbuf = symbol;
+ ret = register_fprobe_syms(&sample_probe, (const char **)&symbuf, 1);
+ goto out;
+ }
+
+ /* Comma separated symbols */
+ symbuf = kstrdup(symbol, GFP_KERNEL);
+ if (!symbuf)
+ return -ENOMEM;
+ p = symbuf;
+ count = 1;
+ while ((p = strchr(++p, ',')) != NULL)
+ count++;
+
+ pr_info("%d symbols found\n", count);
+
+ syms = kcalloc(count, sizeof(char *), GFP_KERNEL);
+ if (!syms) {
+ kfree(symbuf);
+ return -ENOMEM;
+ }
+
+ p = symbuf;
+ for (i = 0; i < count; i++)
+ syms[i] = strsep(&p, ",");
+
+ ret = register_fprobe_syms(&sample_probe, syms, count);
+ kfree(syms);
+ kfree(symbuf);
+out:
+ if (ret < 0)
+ pr_err("register_fprobe failed, returned %d\n", ret);
+ else
+ pr_info("Planted fprobe at %s\n", symbol);
+
+ return ret;
+}
+
+static void __exit fprobe_exit(void)
+{
+ unregister_fprobe(&sample_probe);
+
+ pr_info("fprobe at %s unregistered\n", symbol);
+}
+
+module_init(fprobe_init)
+module_exit(fprobe_exit)
+MODULE_LICENSE("GPL");
The fprobe is a wrapper API for ftrace function tracer.
Unlike kprobes, this probes only supports the function entry, but this
can probe multiple functions by one fprobe. The usage is similar, user
will set their callback to fprobe::entry_handler and call
register_fprobe*() with probed functions.
There are 3 registration interfaces,
- register_fprobe() takes filtering patterns of the functin names.
- register_fprobe_ips() takes an array of ftrace-location addresses.
- register_fprobe_syms() takes an array of function names.
The registered fprobes can be unregistered with unregister_fprobe().
e.g.
struct fprobe fp = { .entry_handler = user_handler };
const char *targets[] = { "func1", "func2", "func3"};
...
ret = register_fprobe_syms(&fp, targets, ARRAY_SIZE(targets));
...
unregister_fprobe(&fp);
Signed-off-by: Masami Hiramatsu <[email protected]>
---
Changes in v7:
- Fix kerneldoc for the APIs.
Changes in v6:
- Remove syms, addrs, and nentry fields from struct fprobe.
- Introduce 3 variants of registration functions.
- Call ftrace_free_filter() at unregistration.
Changes in v4:
- Fix a memory leak when symbol lookup failed.
- Use ftrace location address instead of symbol address.
- Convert the given symbol address to ftrace location automatically.
- Rename fprobe::ftrace to fprobe::ops.
- Update the Kconfig description.
---
include/linux/fprobe.h | 79 +++++++++++++++++
kernel/trace/Kconfig | 12 +++
kernel/trace/Makefile | 1
kernel/trace/fprobe.c | 218 ++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 310 insertions(+)
create mode 100644 include/linux/fprobe.h
create mode 100644 kernel/trace/fprobe.c
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
new file mode 100644
index 000000000000..b920dc1b2969
--- /dev/null
+++ b/include/linux/fprobe.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Simple ftrace probe wrapper */
+#ifndef _LINUX_FPROBE_H
+#define _LINUX_FPROBE_H
+
+#include <linux/compiler.h>
+#include <linux/ftrace.h>
+
+/**
+ * struct fprobe - ftrace based probe.
+ * @ops: The ftrace_ops.
+ * @nmissed: The counter for missing events.
+ * @flags: The status flag.
+ * @entry_handler: The callback function for function entry.
+ */
+struct fprobe {
+ struct ftrace_ops ops;
+ unsigned long nmissed;
+ unsigned int flags;
+ void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
+};
+
+#define FPROBE_FL_DISABLED 1
+
+static inline bool fprobe_disabled(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_DISABLED : false;
+}
+
+#ifdef CONFIG_FPROBE
+int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter);
+int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
+int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
+int unregister_fprobe(struct fprobe *fp);
+#else
+static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int unregister_fprobe(struct fprobe *fp)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+/**
+ * disable_fprobe() - Disable fprobe
+ * @fp: The fprobe to be disabled.
+ *
+ * This will soft-disable @fp. Note that this doesn't remove the ftrace
+ * hooks from the function entry.
+ */
+static inline void disable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags |= FPROBE_FL_DISABLED;
+}
+
+/**
+ * enable_fprobe() - Enable fprobe
+ * @fp: The fprobe to be enabled.
+ *
+ * This will soft-enable @fp.
+ */
+static inline void enable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags &= ~FPROBE_FL_DISABLED;
+}
+
+#endif
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 752ed89a293b..043c8f6c4075 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -230,6 +230,18 @@ config DYNAMIC_FTRACE_WITH_ARGS
depends on DYNAMIC_FTRACE
depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
+config FPROBE
+ bool "Kernel Function Probe (fprobe)"
+ depends on FUNCTION_TRACER
+ depends on DYNAMIC_FTRACE_WITH_REGS
+ default n
+ help
+ This option enables kernel function probe (fprobe) based on ftrace,
+ which is similar to kprobes, but probes only for kernel function
+ entries and it can probe multiple functions by one fprobe.
+
+ If unsure, say N.
+
config FUNCTION_PROFILER
bool "Kernel function profiler"
depends on FUNCTION_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index bedc5caceec7..79255f9de9a4 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -97,6 +97,7 @@ obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o
obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o
+obj-$(CONFIG_FPROBE) += fprobe.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
new file mode 100644
index 000000000000..b5d4f8baaf43
--- /dev/null
+++ b/kernel/trace/fprobe.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fprobe - Simple ftrace probe wrapper for function entry.
+ */
+#define pr_fmt(fmt) "fprobe: " fmt
+
+#include <linux/err.h>
+#include <linux/fprobe.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+ struct fprobe *fp;
+ int bit;
+
+ fp = container_of(ops, struct fprobe, ops);
+ if (fprobe_disabled(fp))
+ return;
+
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0) {
+ fp->nmissed++;
+ return;
+ }
+
+ if (fp->entry_handler)
+ fp->entry_handler(fp, ip, ftrace_get_regs(fregs));
+
+ ftrace_test_recursion_unlock(bit);
+}
+NOKPROBE_SYMBOL(fprobe_handler);
+
+/* Convert ftrace location address from symbols */
+static unsigned long *get_ftrace_locations(const char **syms, int num)
+{
+ unsigned long *addrs, addr, size;
+ int i;
+
+ /* Convert symbols to symbol address */
+ addrs = kcalloc(num, sizeof(*addrs), GFP_KERNEL);
+ if (!addrs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < num; i++) {
+ addrs[i] = kallsyms_lookup_name(syms[i]);
+ if (!addrs[i]) /* Maybe wrong symbol */
+ goto error;
+ }
+
+ /* Convert symbol address to ftrace location. */
+ for (i = 0; i < num; i++) {
+ if (!kallsyms_lookup_size_offset(addrs[i], &size, NULL))
+ size = MCOUNT_INSN_SIZE;
+ addr = ftrace_location_range(addrs[i], addrs[i] + size - 1);
+ if (!addr) /* No dynamic ftrace there. */
+ goto error;
+ addrs[i] = addr;
+ }
+
+ return addrs;
+
+error:
+ kfree(addrs);
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void fprobe_init(struct fprobe *fp)
+{
+ fp->nmissed = 0;
+ fp->ops.func = fprobe_handler;
+ fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS;
+}
+
+/**
+ * register_fprobe() - Register fprobe to ftrace by pattern.
+ * @fp: A fprobe data structure to be registered.
+ * @filter: A wildcard pattern of probed symbols.
+ * @notfilter: A wildcard pattern of NOT probed symbols.
+ *
+ * Register @fp to ftrace for enabling the probe on the symbols matched to @filter.
+ * If @notfilter is not NULL, the symbols matched the @notfilter are not probed.
+ *
+ * Return 0 if @fp is registered successfully, -errno if not.
+ */
+int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
+{
+ unsigned char *str;
+ int ret, len;
+
+ if (!fp || !filter)
+ return -EINVAL;
+
+ fprobe_init(fp);
+
+ len = strlen(filter);
+ str = kstrdup(filter, GFP_KERNEL);
+ ret = ftrace_set_filter(&fp->ops, str, len, 0);
+ kfree(str);
+ if (ret)
+ return ret;
+
+ if (notfilter) {
+ len = strlen(notfilter);
+ str = kstrdup(notfilter, GFP_KERNEL);
+ ret = ftrace_set_notrace(&fp->ops, str, len, 0);
+ kfree(str);
+ if (ret)
+ goto out;
+ }
+
+ ret = register_ftrace_function(&fp->ops);
+out:
+ if (ret)
+ ftrace_free_filter(&fp->ops);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_fprobe);
+
+/**
+ * register_fprobe_ips() - Register fprobe to ftrace by address.
+ * @fp: A fprobe data structure to be registered.
+ * @addrs: An array of target ftrace location addresses.
+ * @num: The number of entries of @addrs.
+ *
+ * Register @fp to ftrace for enabling the probe on the address given by @addrs.
+ * The @addrs must be the addresses of ftrace location address, which may be
+ * the symbol address + arch-dependent offset.
+ * If you unsure what this mean, please use other registration functions.
+ *
+ * Return 0 if @fp is registered successfully, -errno if not.
+ */
+int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
+{
+ int ret;
+
+ if (!fp || !addrs || num <= 0)
+ return -EINVAL;
+
+ fprobe_init(fp);
+
+ ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0);
+ if (!ret)
+ ret = register_ftrace_function(&fp->ops);
+
+ if (ret)
+ ftrace_free_filter(&fp->ops);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_fprobe_ips);
+
+/**
+ * register_fprobe_syms() - Register fprobe to ftrace by symbols.
+ * @fp: A fprobe data structure to be registered.
+ * @syms: An array of target symbols.
+ * @num: The number of entries of @syms.
+ *
+ * Register @fp to the symbols given by @syms array. This will be useful if
+ * you are sure the symbols exist in the kernel.
+ *
+ * Return 0 if @fp is registered successfully, -errno if not.
+ */
+int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
+{
+ unsigned long *addrs;
+ int ret;
+
+ if (!fp || !syms || num <= 0)
+ return -EINVAL;
+
+ fprobe_init(fp);
+
+ addrs = get_ftrace_locations(syms, num);
+ if (IS_ERR(addrs))
+ return PTR_ERR(addrs);
+
+ ret = ftrace_set_filter_ips(&fp->ops, addrs, num, 0, 0);
+ if (ret)
+ goto out;
+ ret = register_ftrace_function(&fp->ops);
+ if (ret)
+ ftrace_free_filter(&fp->ops);
+
+out:
+ kfree(addrs);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_fprobe_syms);
+
+/**
+ * unregister_fprobe() - Unregister fprobe from ftrace
+ * @fp: A fprobe data structure to be unregistered.
+ *
+ * Unregister fprobe (and remove ftrace hooks from the function entries).
+ *
+ * Return 0 if @fp is unregistered successfully, -errno if not.
+ */
+int unregister_fprobe(struct fprobe *fp)
+{
+ int ret;
+
+ if (!fp || fp->ops.func != fprobe_handler)
+ return -EINVAL;
+
+ ret = unregister_ftrace_function(&fp->ops);
+
+ if (!ret)
+ ftrace_free_filter(&fp->ops);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(unregister_fprobe);
Add rethook for x86 implementation. Most of the code has been copied from
kretprobes on x86.
Signed-off-by: Masami Hiramatsu <[email protected]>
---
Changes in v5:
- Fix a build error if !CONFIG_KRETPROBES and !CONFIG_RETHOOK.
Changes in v4:
- fix stack backtrace as same as kretprobe does.
---
arch/x86/Kconfig | 1
arch/x86/include/asm/unwind.h | 8 ++-
arch/x86/kernel/Makefile | 1
arch/x86/kernel/kprobes/common.h | 1
arch/x86/kernel/rethook.c | 115 ++++++++++++++++++++++++++++++++++++++
5 files changed, 125 insertions(+), 1 deletion(-)
create mode 100644 arch/x86/kernel/rethook.c
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7399327d1eff..939c4c897e63 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -219,6 +219,7 @@ config X86
select HAVE_KPROBES_ON_FTRACE
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_KRETPROBES
+ select HAVE_RETHOOK
select HAVE_KVM
select HAVE_LIVEPATCH if X86_64
select HAVE_MIXED_BREAKPOINTS_REGS
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index 2a1f8734416d..192df5b2094d 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/ftrace.h>
#include <linux/kprobes.h>
+#include <linux/rethook.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
@@ -16,7 +17,7 @@ struct unwind_state {
unsigned long stack_mask;
struct task_struct *task;
int graph_idx;
-#ifdef CONFIG_KRETPROBES
+#if defined(CONFIG_KRETPROBES) || defined(CONFIG_RETHOOK)
struct llist_node *kr_cur;
#endif
bool error;
@@ -107,6 +108,11 @@ static inline
unsigned long unwind_recover_kretprobe(struct unwind_state *state,
unsigned long addr, unsigned long *addr_p)
{
+#ifdef CONFIG_RETHOOK
+ if (is_rethook_trampoline(addr))
+ return rethook_find_ret_addr(state->task, (unsigned long)addr_p,
+ &state->kr_cur);
+#endif
#ifdef CONFIG_KRETPROBES
return is_kretprobe_trampoline(addr) ?
kretprobe_find_ret_addr(state->task, addr_p, &state->kr_cur) :
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 2ff3e600f426..66593d8c4d74 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o
obj-$(CONFIG_TRACING) += trace.o
+obj-$(CONFIG_RETHOOK) += rethook.o
obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index 7d3a2e2daf01..c993521d4933 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -6,6 +6,7 @@
#include <asm/asm.h>
#include <asm/frame.h>
+#include <asm/insn.h>
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/rethook.c b/arch/x86/kernel/rethook.c
new file mode 100644
index 000000000000..f2f3b9526e43
--- /dev/null
+++ b/arch/x86/kernel/rethook.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * x86 implementation of rethook. Mostly copied from arch/x86/kernel/kprobes/core.c.
+ */
+#include <linux/bug.h>
+#include <linux/rethook.h>
+#include <linux/kprobes.h>
+
+#include "kprobes/common.h"
+
+/*
+ * Called from arch_rethook_trampoline
+ */
+__used __visible void arch_rethook_trampoline_callback(struct pt_regs *regs)
+{
+ unsigned long *frame_pointer;
+
+ /* fixup registers */
+ regs->cs = __KERNEL_CS;
+#ifdef CONFIG_X86_32
+ regs->gs = 0;
+#endif
+ regs->ip = (unsigned long)&arch_rethook_trampoline;
+ regs->orig_ax = ~0UL;
+ regs->sp += sizeof(long);
+ frame_pointer = ®s->sp + 1;
+
+ /*
+ * The return address at 'frame_pointer' is recovered by the
+ * arch_rethook_fixup_return() which called from this
+ * rethook_trampoline_handler().
+ */
+ rethook_trampoline_handler(regs, (unsigned long)frame_pointer);
+
+ /*
+ * Copy FLAGS to 'pt_regs::sp' so that arch_rethook_trapmoline()
+ * can do RET right after POPF.
+ */
+ regs->sp = regs->flags;
+}
+NOKPROBE_SYMBOL(arch_rethook_trampoline_callback);
+
+/*
+ * When a target function returns, this code saves registers and calls
+ * arch_rethook_trampoline_callback(), which calls the rethook handler.
+ */
+asm(
+ ".text\n"
+ ".global arch_rethook_trampoline\n"
+ ".type arch_rethook_trampoline, @function\n"
+ "arch_rethook_trampoline:\n"
+#ifdef CONFIG_X86_64
+ /* Push a fake return address to tell the unwinder it's a kretprobe. */
+ " pushq $arch_rethook_trampoline\n"
+ UNWIND_HINT_FUNC
+ /* Save the 'sp - 8', this will be fixed later. */
+ " pushq %rsp\n"
+ " pushfq\n"
+ SAVE_REGS_STRING
+ " movq %rsp, %rdi\n"
+ " call arch_rethook_trampoline_callback\n"
+ RESTORE_REGS_STRING
+ /* In the callback function, 'regs->flags' is copied to 'regs->sp'. */
+ " addq $8, %rsp\n"
+ " popfq\n"
+#else
+ /* Push a fake return address to tell the unwinder it's a kretprobe. */
+ " pushl $arch_rethook_trampoline\n"
+ UNWIND_HINT_FUNC
+ /* Save the 'sp - 4', this will be fixed later. */
+ " pushl %esp\n"
+ " pushfl\n"
+ SAVE_REGS_STRING
+ " movl %esp, %eax\n"
+ " call arch_rethook_trampoline_callback\n"
+ RESTORE_REGS_STRING
+ /* In the callback function, 'regs->flags' is copied to 'regs->sp'. */
+ " addl $4, %esp\n"
+ " popfl\n"
+#endif
+ " ret\n"
+ ".size arch_rethook_trampoline, .-arch_rethook_trampoline\n"
+);
+NOKPROBE_SYMBOL(arch_rethook_trampoline);
+/*
+ * arch_rethook_trampoline() skips updating frame pointer. The frame pointer
+ * saved in arch_rethook_trampoline_callback() points to the real caller
+ * function's frame pointer. Thus the arch_rethook_trampoline() doesn't have
+ * a standard stack frame with CONFIG_FRAME_POINTER=y.
+ * Let's mark it non-standard function. Anyway, FP unwinder can correctly
+ * unwind without the hint.
+ */
+STACK_FRAME_NON_STANDARD_FP(arch_rethook_trampoline);
+
+/* This is called from rethook_trampoline_handler(). */
+void arch_rethook_fixup_return(struct pt_regs *regs,
+ unsigned long correct_ret_addr)
+{
+ unsigned long *frame_pointer = ®s->sp + 1;
+
+ /* Replace fake return address with real one. */
+ *frame_pointer = correct_ret_addr;
+}
+
+void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs)
+{
+ unsigned long *stack = (unsigned long *)regs->sp;
+
+ rh->ret_addr = stack[0];
+ rh->frame = regs->sp;
+
+ /* Replace the return addr with trampoline addr */
+ stack[0] = (unsigned long) arch_rethook_trampoline;
+}
+NOKPROBE_SYMBOL(arch_rethook_prepare);
Hi Jiri,
On Mon, 31 Jan 2022 14:00:24 +0900
Masami Hiramatsu <[email protected]> wrote:
> Hi,
>
> Here is the 7th version of fprobe. This version fixes unregister_fprobe()
> ensures that exit_handler is not called after returning from the
> unregister_fprobe(), and fixes some comments and documents.
>
> The previous version is here[1];
>
> [1] https://lore.kernel.org/all/164338031590.2429999.6203979005944292576.stgit@devnote2/T/#u
>
> This series introduces the fprobe, the function entry/exit probe
> with multiple probe point support. This also introduces the rethook
> for hooking function return as same as the kretprobe does. This
> abstraction will help us to generalize the fgraph tracer,
> because we can just switch to it from the rethook in fprobe,
> depending on the kernel configuration.
>
> The patch [1/10] is from Jiri's series[2].
>
> [2] https://lore.kernel.org/all/[email protected]/T/#u
>
> And the patch [9/10] adds the FPROBE_FL_KPROBE_SHARED flag for the case
> if user wants to share the same code (or share a same resource) on the
> fprobe and the kprobes.
If you want to work on this series, I pushed my working branch on here;
https://git.kernel.org/pub/scm/linux/kernel/git/mhiramat/linux.git/ kprobes/fprobe
Thank you,
>
> Thank you,
>
> ---
>
> Jiri Olsa (1):
> ftrace: Add ftrace_set_filter_ips function
>
> Masami Hiramatsu (9):
> fprobe: Add ftrace based probe APIs
> rethook: Add a generic return hook
> rethook: x86: Add rethook x86 implementation
> ARM: rethook: Add rethook arm implementation
> arm64: rethook: Add arm64 rethook implementation
> fprobe: Add exit_handler support
> fprobe: Add sample program for fprobe
> fprobe: Introduce FPROBE_FL_KPROBE_SHARED flag for fprobe
> docs: fprobe: Add fprobe description to ftrace-use.rst
>
>
> Documentation/trace/fprobe.rst | 171 +++++++++++++
> Documentation/trace/index.rst | 1
> arch/arm/Kconfig | 1
> arch/arm/include/asm/stacktrace.h | 4
> arch/arm/kernel/stacktrace.c | 6
> arch/arm/probes/Makefile | 1
> arch/arm/probes/rethook.c | 71 +++++
> arch/arm64/Kconfig | 1
> arch/arm64/include/asm/stacktrace.h | 2
> arch/arm64/kernel/probes/Makefile | 1
> arch/arm64/kernel/probes/rethook.c | 25 ++
> arch/arm64/kernel/probes/rethook_trampoline.S | 87 ++++++
> arch/arm64/kernel/stacktrace.c | 7 -
> arch/x86/Kconfig | 1
> arch/x86/include/asm/unwind.h | 8 +
> arch/x86/kernel/Makefile | 1
> arch/x86/kernel/kprobes/common.h | 1
> arch/x86/kernel/rethook.c | 115 ++++++++
> include/linux/fprobe.h | 97 +++++++
> include/linux/ftrace.h | 3
> include/linux/kprobes.h | 3
> include/linux/rethook.h | 100 +++++++
> include/linux/sched.h | 3
> kernel/exit.c | 2
> kernel/fork.c | 3
> kernel/trace/Kconfig | 26 ++
> kernel/trace/Makefile | 2
> kernel/trace/fprobe.c | 341 +++++++++++++++++++++++++
> kernel/trace/ftrace.c | 58 ++++
> kernel/trace/rethook.c | 313 +++++++++++++++++++++++
> samples/Kconfig | 7 +
> samples/Makefile | 1
> samples/fprobe/Makefile | 3
> samples/fprobe/fprobe_example.c | 120 +++++++++
> 34 files changed, 1572 insertions(+), 14 deletions(-)
> create mode 100644 Documentation/trace/fprobe.rst
> create mode 100644 arch/arm/probes/rethook.c
> create mode 100644 arch/arm64/kernel/probes/rethook.c
> create mode 100644 arch/arm64/kernel/probes/rethook_trampoline.S
> create mode 100644 arch/x86/kernel/rethook.c
> create mode 100644 include/linux/fprobe.h
> create mode 100644 include/linux/rethook.h
> create mode 100644 kernel/trace/fprobe.c
> create mode 100644 kernel/trace/rethook.c
> create mode 100644 samples/fprobe/Makefile
> create mode 100644 samples/fprobe/fprobe_example.c
>
> --
> Masami Hiramatsu (Linaro) <[email protected]>
--
Masami Hiramatsu <[email protected]>
On Mon, Jan 31, 2022 at 06:36:42PM +0900, Masami Hiramatsu wrote:
> Hi Jiri,
>
> On Mon, 31 Jan 2022 14:00:24 +0900
> Masami Hiramatsu <[email protected]> wrote:
>
> > Hi,
> >
> > Here is the 7th version of fprobe. This version fixes unregister_fprobe()
> > ensures that exit_handler is not called after returning from the
> > unregister_fprobe(), and fixes some comments and documents.
> >
> > The previous version is here[1];
> >
> > [1] https://lore.kernel.org/all/164338031590.2429999.6203979005944292576.stgit@devnote2/T/#u
> >
> > This series introduces the fprobe, the function entry/exit probe
> > with multiple probe point support. This also introduces the rethook
> > for hooking function return as same as the kretprobe does. This
> > abstraction will help us to generalize the fgraph tracer,
> > because we can just switch to it from the rethook in fprobe,
> > depending on the kernel configuration.
> >
> > The patch [1/10] is from Jiri's series[2].
> >
> > [2] https://lore.kernel.org/all/[email protected]/T/#u
> >
> > And the patch [9/10] adds the FPROBE_FL_KPROBE_SHARED flag for the case
> > if user wants to share the same code (or share a same resource) on the
> > fprobe and the kprobes.
>
> If you want to work on this series, I pushed my working branch on here;
>
> https://git.kernel.org/pub/scm/linux/kernel/git/mhiramat/linux.git/ kprobes/fprobe
great, I was going to ask for that ;-) thanks
jirka
>
> Thank you,
>
> >
> > Thank you,
> >
> > ---
> >
> > Jiri Olsa (1):
> > ftrace: Add ftrace_set_filter_ips function
> >
> > Masami Hiramatsu (9):
> > fprobe: Add ftrace based probe APIs
> > rethook: Add a generic return hook
> > rethook: x86: Add rethook x86 implementation
> > ARM: rethook: Add rethook arm implementation
> > arm64: rethook: Add arm64 rethook implementation
> > fprobe: Add exit_handler support
> > fprobe: Add sample program for fprobe
> > fprobe: Introduce FPROBE_FL_KPROBE_SHARED flag for fprobe
> > docs: fprobe: Add fprobe description to ftrace-use.rst
> >
> >
> > Documentation/trace/fprobe.rst | 171 +++++++++++++
> > Documentation/trace/index.rst | 1
> > arch/arm/Kconfig | 1
> > arch/arm/include/asm/stacktrace.h | 4
> > arch/arm/kernel/stacktrace.c | 6
> > arch/arm/probes/Makefile | 1
> > arch/arm/probes/rethook.c | 71 +++++
> > arch/arm64/Kconfig | 1
> > arch/arm64/include/asm/stacktrace.h | 2
> > arch/arm64/kernel/probes/Makefile | 1
> > arch/arm64/kernel/probes/rethook.c | 25 ++
> > arch/arm64/kernel/probes/rethook_trampoline.S | 87 ++++++
> > arch/arm64/kernel/stacktrace.c | 7 -
> > arch/x86/Kconfig | 1
> > arch/x86/include/asm/unwind.h | 8 +
> > arch/x86/kernel/Makefile | 1
> > arch/x86/kernel/kprobes/common.h | 1
> > arch/x86/kernel/rethook.c | 115 ++++++++
> > include/linux/fprobe.h | 97 +++++++
> > include/linux/ftrace.h | 3
> > include/linux/kprobes.h | 3
> > include/linux/rethook.h | 100 +++++++
> > include/linux/sched.h | 3
> > kernel/exit.c | 2
> > kernel/fork.c | 3
> > kernel/trace/Kconfig | 26 ++
> > kernel/trace/Makefile | 2
> > kernel/trace/fprobe.c | 341 +++++++++++++++++++++++++
> > kernel/trace/ftrace.c | 58 ++++
> > kernel/trace/rethook.c | 313 +++++++++++++++++++++++
> > samples/Kconfig | 7 +
> > samples/Makefile | 1
> > samples/fprobe/Makefile | 3
> > samples/fprobe/fprobe_example.c | 120 +++++++++
> > 34 files changed, 1572 insertions(+), 14 deletions(-)
> > create mode 100644 Documentation/trace/fprobe.rst
> > create mode 100644 arch/arm/probes/rethook.c
> > create mode 100644 arch/arm64/kernel/probes/rethook.c
> > create mode 100644 arch/arm64/kernel/probes/rethook_trampoline.S
> > create mode 100644 arch/x86/kernel/rethook.c
> > create mode 100644 include/linux/fprobe.h
> > create mode 100644 include/linux/rethook.h
> > create mode 100644 kernel/trace/fprobe.c
> > create mode 100644 kernel/trace/rethook.c
> > create mode 100644 samples/fprobe/Makefile
> > create mode 100644 samples/fprobe/fprobe_example.c
> >
> > --
> > Masami Hiramatsu (Linaro) <[email protected]>
>
>
> --
> Masami Hiramatsu <[email protected]>
>
On Mon, Jan 31, 2022 at 02:00:24PM +0900, Masami Hiramatsu wrote:
> Hi,
>
> Here is the 7th version of fprobe. This version fixes unregister_fprobe()
> ensures that exit_handler is not called after returning from the
> unregister_fprobe(), and fixes some comments and documents.
>
> The previous version is here[1];
>
> [1] https://lore.kernel.org/all/164338031590.2429999.6203979005944292576.stgit@devnote2/T/#u
>
> This series introduces the fprobe, the function entry/exit probe
> with multiple probe point support. This also introduces the rethook
> for hooking function return as same as the kretprobe does. This
> abstraction will help us to generalize the fgraph tracer,
> because we can just switch to it from the rethook in fprobe,
> depending on the kernel configuration.
>
> The patch [1/10] is from Jiri's series[2].
>
> [2] https://lore.kernel.org/all/[email protected]/T/#u
>
> And the patch [9/10] adds the FPROBE_FL_KPROBE_SHARED flag for the case
> if user wants to share the same code (or share a same resource) on the
> fprobe and the kprobes.
hi,
it works fine for bpf selftests, but when I use it through bpftrace
to attach more probes with:
# ./src/bpftrace -e 'kprobe:ksys_* { }'
Attaching 27 probes
I'm getting stalls like:
krava33 login: [ 988.574069] INFO: task bpftrace:4137 blocked for more than 122 seconds.
[ 988.577577] Not tainted 5.16.0+ #89
[ 988.580173] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[ 988.585538] task:bpftrace state:D stack: 0 pid: 4137 ppid: 4123 flags:0x00004004
[ 988.589869] Call Trace:
[ 988.591312] <TASK>
[ 988.592577] __schedule+0x3a8/0xd30
[ 988.594469] ? wait_for_completion+0x84/0x110
[ 988.596753] schedule+0x4e/0xc0
[ 988.598480] schedule_timeout+0xed/0x130
[ 988.600524] ? rcu_read_lock_sched_held+0x12/0x70
[ 988.602901] ? lock_release+0x253/0x4a0
[ 988.604935] ? lock_acquired+0x1b7/0x410
[ 988.607041] ? trace_hardirqs_on+0x1b/0xe0
[ 988.609202] wait_for_completion+0xae/0x110
[ 988.613762] __wait_rcu_gp+0x127/0x130
[ 988.615787] synchronize_rcu_tasks_generic+0x46/0xa0
[ 988.618329] ? call_rcu_tasks+0x20/0x20
[ 988.620600] ? rcu_tasks_pregp_step+0x10/0x10
[ 988.623232] ftrace_shutdown.part.0+0x174/0x210
[ 988.625820] unregister_ftrace_function+0x37/0x60
[ 988.628480] unregister_fprobe+0x2d/0x50
[ 988.630928] bpf_link_free+0x4e/0x70
[ 988.633126] bpf_link_release+0x11/0x20
[ 988.635249] __fput+0xae/0x270
[ 988.637022] task_work_run+0x5c/0xa0
[ 988.639016] exit_to_user_mode_prepare+0x251/0x260
[ 988.641294] syscall_exit_to_user_mode+0x16/0x50
[ 988.646249] do_syscall_64+0x48/0x90
[ 988.648218] entry_SYSCALL_64_after_hwframe+0x44/0xae
[ 988.650787] RIP: 0033:0x7f9079e95fbb
[ 988.652761] RSP: 002b:00007ffd474fa3b0 EFLAGS: 00000293 ORIG_RAX: 0000000000000003
[ 988.656718] RAX: 0000000000000000 RBX: 00000000011bf8d0 RCX: 00007f9079e95fbb
[ 988.660110] RDX: 0000000000000000 RSI: 00007ffd474fa3b0 RDI: 0000000000000019
[ 988.663512] RBP: 00007ffd474faaf0 R08: 0000000000000000 R09: 000000000000001a
[ 988.666673] R10: 0000000000000064 R11: 0000000000000293 R12: 0000000000000001
[ 988.669770] R13: 00000000004a19a1 R14: 00007f9083428c00 R15: 00000000008c02d8
[ 988.672601] </TASK>
[ 988.675763] INFO: lockdep is turned off.
I have't investigated yet, any idea?
thanks,
jirka
Hi Jiri,
On Wed, 2 Feb 2022 01:02:43 +0100
Jiri Olsa <[email protected]> wrote:
> On Mon, Jan 31, 2022 at 02:00:24PM +0900, Masami Hiramatsu wrote:
> > Hi,
> >
> > Here is the 7th version of fprobe. This version fixes unregister_fprobe()
> > ensures that exit_handler is not called after returning from the
> > unregister_fprobe(), and fixes some comments and documents.
> >
> > The previous version is here[1];
> >
> > [1] https://lore.kernel.org/all/164338031590.2429999.6203979005944292576.stgit@devnote2/T/#u
> >
> > This series introduces the fprobe, the function entry/exit probe
> > with multiple probe point support. This also introduces the rethook
> > for hooking function return as same as the kretprobe does. This
> > abstraction will help us to generalize the fgraph tracer,
> > because we can just switch to it from the rethook in fprobe,
> > depending on the kernel configuration.
> >
> > The patch [1/10] is from Jiri's series[2].
> >
> > [2] https://lore.kernel.org/all/[email protected]/T/#u
> >
> > And the patch [9/10] adds the FPROBE_FL_KPROBE_SHARED flag for the case
> > if user wants to share the same code (or share a same resource) on the
> > fprobe and the kprobes.
>
> hi,
> it works fine for bpf selftests, but when I use it through bpftrace
> to attach more probes with:
>
> # ./src/bpftrace -e 'kprobe:ksys_* { }'
> Attaching 27 probes
>
> I'm getting stalls like:
>
> krava33 login: [ 988.574069] INFO: task bpftrace:4137 blocked for more than 122 seconds.
> [ 988.577577] Not tainted 5.16.0+ #89
> [ 988.580173] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
> [ 988.585538] task:bpftrace state:D stack: 0 pid: 4137 ppid: 4123 flags:0x00004004
> [ 988.589869] Call Trace:
> [ 988.591312] <TASK>
> [ 988.592577] __schedule+0x3a8/0xd30
> [ 988.594469] ? wait_for_completion+0x84/0x110
> [ 988.596753] schedule+0x4e/0xc0
> [ 988.598480] schedule_timeout+0xed/0x130
> [ 988.600524] ? rcu_read_lock_sched_held+0x12/0x70
> [ 988.602901] ? lock_release+0x253/0x4a0
> [ 988.604935] ? lock_acquired+0x1b7/0x410
> [ 988.607041] ? trace_hardirqs_on+0x1b/0xe0
> [ 988.609202] wait_for_completion+0xae/0x110
> [ 988.613762] __wait_rcu_gp+0x127/0x130
> [ 988.615787] synchronize_rcu_tasks_generic+0x46/0xa0
> [ 988.618329] ? call_rcu_tasks+0x20/0x20
> [ 988.620600] ? rcu_tasks_pregp_step+0x10/0x10
> [ 988.623232] ftrace_shutdown.part.0+0x174/0x210
> [ 988.625820] unregister_ftrace_function+0x37/0x60
> [ 988.628480] unregister_fprobe+0x2d/0x50
> [ 988.630928] bpf_link_free+0x4e/0x70
> [ 988.633126] bpf_link_release+0x11/0x20
> [ 988.635249] __fput+0xae/0x270
> [ 988.637022] task_work_run+0x5c/0xa0
> [ 988.639016] exit_to_user_mode_prepare+0x251/0x260
> [ 988.641294] syscall_exit_to_user_mode+0x16/0x50
> [ 988.646249] do_syscall_64+0x48/0x90
> [ 988.648218] entry_SYSCALL_64_after_hwframe+0x44/0xae
> [ 988.650787] RIP: 0033:0x7f9079e95fbb
> [ 988.652761] RSP: 002b:00007ffd474fa3b0 EFLAGS: 00000293 ORIG_RAX: 0000000000000003
> [ 988.656718] RAX: 0000000000000000 RBX: 00000000011bf8d0 RCX: 00007f9079e95fbb
> [ 988.660110] RDX: 0000000000000000 RSI: 00007ffd474fa3b0 RDI: 0000000000000019
> [ 988.663512] RBP: 00007ffd474faaf0 R08: 0000000000000000 R09: 000000000000001a
> [ 988.666673] R10: 0000000000000064 R11: 0000000000000293 R12: 0000000000000001
> [ 988.669770] R13: 00000000004a19a1 R14: 00007f9083428c00 R15: 00000000008c02d8
> [ 988.672601] </TASK>
> [ 988.675763] INFO: lockdep is turned off.
>
> I have't investigated yet, any idea?
Hmm, no, as far as I tested with my example module, it works well as below;
# insmod fprobe_example.ko symbol='ksys_*' && ls && sleep 1 && rmmod fprobe_example.ko
[ 125.820113] fprobe_init: Planted fprobe at ksys_*
[ 125.823153] sample_entry_handler: Enter <ksys_write+0x0/0xf0> ip = 0x000000008d8da91f
[ 125.824247] fprobe_handler.part.0+0xb1/0x150
[ 125.825024] fprobe_handler+0x1e/0x20
[ 125.825799] 0xffffffffa000e0e3
[ 125.826540] ksys_write+0x5/0xf0
[ 125.827344] do_syscall_64+0x3b/0x90
[ 125.828144] entry_SYSCALL_64_after_hwframe+0x44/0xae
fprobe_example.ko
[ 125.829178] sample_exit_handler: Return from <ksys_write+0x0/0xf0> ip = 0x000000008d8da91f to rip = 0x00000000be5e197e (__x64_sys_write+0x1a/0x20)
[ 125.830707] fprobe_exit_handler+0x29/0x30
[ 125.831415] rethook_trampoline_handler+0x99/0x140
[ 125.832259] arch_rethook_trampoline_callback+0x3f/0x50
[ 125.833110] arch_rethook_trampoline+0x2f/0x50
[ 125.833803] __x64_sys_write+0x1a/0x20
[ 125.834448] do_syscall_64+0x3b/0x90
[ 125.835055] entry_SYSCALL_64_after_hwframe+0x44/0xae
[ 126.878825] fprobe_exit: fprobe at ksys_* unregistered
#
Even with NR_CPUS=3, it didn't cause the stall. But maybe you'd better test
with Paul's fix as Andrii pointed.
Thank you,
>
> thanks,
> jirka
>
--
Masami Hiramatsu <[email protected]>
On Tue, Feb 1, 2022 at 4:02 PM Jiri Olsa <[email protected]> wrote:
>
> On Mon, Jan 31, 2022 at 02:00:24PM +0900, Masami Hiramatsu wrote:
> > Hi,
> >
> > Here is the 7th version of fprobe. This version fixes unregister_fprobe()
> > ensures that exit_handler is not called after returning from the
> > unregister_fprobe(), and fixes some comments and documents.
> >
> > The previous version is here[1];
> >
> > [1] https://lore.kernel.org/all/164338031590.2429999.6203979005944292576.stgit@devnote2/T/#u
> >
> > This series introduces the fprobe, the function entry/exit probe
> > with multiple probe point support. This also introduces the rethook
> > for hooking function return as same as the kretprobe does. This
> > abstraction will help us to generalize the fgraph tracer,
> > because we can just switch to it from the rethook in fprobe,
> > depending on the kernel configuration.
> >
> > The patch [1/10] is from Jiri's series[2].
> >
> > [2] https://lore.kernel.org/all/[email protected]/T/#u
> >
> > And the patch [9/10] adds the FPROBE_FL_KPROBE_SHARED flag for the case
> > if user wants to share the same code (or share a same resource) on the
> > fprobe and the kprobes.
>
> hi,
> it works fine for bpf selftests, but when I use it through bpftrace
> to attach more probes with:
>
> # ./src/bpftrace -e 'kprobe:ksys_* { }'
> Attaching 27 probes
>
> I'm getting stalls like:
>
> krava33 login: [ 988.574069] INFO: task bpftrace:4137 blocked for more than 122 seconds.
> [ 988.577577] Not tainted 5.16.0+ #89
> [ 988.580173] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
> [ 988.585538] task:bpftrace state:D stack: 0 pid: 4137 ppid: 4123 flags:0x00004004
> [ 988.589869] Call Trace:
> [ 988.591312] <TASK>
> [ 988.592577] __schedule+0x3a8/0xd30
> [ 988.594469] ? wait_for_completion+0x84/0x110
> [ 988.596753] schedule+0x4e/0xc0
> [ 988.598480] schedule_timeout+0xed/0x130
> [ 988.600524] ? rcu_read_lock_sched_held+0x12/0x70
> [ 988.602901] ? lock_release+0x253/0x4a0
> [ 988.604935] ? lock_acquired+0x1b7/0x410
> [ 988.607041] ? trace_hardirqs_on+0x1b/0xe0
> [ 988.609202] wait_for_completion+0xae/0x110
> [ 988.613762] __wait_rcu_gp+0x127/0x130
> [ 988.615787] synchronize_rcu_tasks_generic+0x46/0xa0
> [ 988.618329] ? call_rcu_tasks+0x20/0x20
> [ 988.620600] ? rcu_tasks_pregp_step+0x10/0x10
> [ 988.623232] ftrace_shutdown.part.0+0x174/0x210
> [ 988.625820] unregister_ftrace_function+0x37/0x60
> [ 988.628480] unregister_fprobe+0x2d/0x50
> [ 988.630928] bpf_link_free+0x4e/0x70
> [ 988.633126] bpf_link_release+0x11/0x20
> [ 988.635249] __fput+0xae/0x270
> [ 988.637022] task_work_run+0x5c/0xa0
> [ 988.639016] exit_to_user_mode_prepare+0x251/0x260
> [ 988.641294] syscall_exit_to_user_mode+0x16/0x50
> [ 988.646249] do_syscall_64+0x48/0x90
> [ 988.648218] entry_SYSCALL_64_after_hwframe+0x44/0xae
> [ 988.650787] RIP: 0033:0x7f9079e95fbb
> [ 988.652761] RSP: 002b:00007ffd474fa3b0 EFLAGS: 00000293 ORIG_RAX: 0000000000000003
> [ 988.656718] RAX: 0000000000000000 RBX: 00000000011bf8d0 RCX: 00007f9079e95fbb
> [ 988.660110] RDX: 0000000000000000 RSI: 00007ffd474fa3b0 RDI: 0000000000000019
> [ 988.663512] RBP: 00007ffd474faaf0 R08: 0000000000000000 R09: 000000000000001a
> [ 988.666673] R10: 0000000000000064 R11: 0000000000000293 R12: 0000000000000001
> [ 988.669770] R13: 00000000004a19a1 R14: 00007f9083428c00 R15: 00000000008c02d8
> [ 988.672601] </TASK>
> [ 988.675763] INFO: lockdep is turned off.
>
> I have't investigated yet, any idea?
>
Do you happen to have a CPU count that's not a power of 2? Check if
you have [0] in your tree, it might be that.
[0] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=a773abf72eb0cac008743891068ca6edecc44683
> thanks,
> jirka
>
On Tue, Feb 01, 2022 at 04:09:05PM -0800, Andrii Nakryiko wrote:
> On Tue, Feb 1, 2022 at 4:02 PM Jiri Olsa <[email protected]> wrote:
> >
> > On Mon, Jan 31, 2022 at 02:00:24PM +0900, Masami Hiramatsu wrote:
> > > Hi,
> > >
> > > Here is the 7th version of fprobe. This version fixes unregister_fprobe()
> > > ensures that exit_handler is not called after returning from the
> > > unregister_fprobe(), and fixes some comments and documents.
> > >
> > > The previous version is here[1];
> > >
> > > [1] https://lore.kernel.org/all/164338031590.2429999.6203979005944292576.stgit@devnote2/T/#u
> > >
> > > This series introduces the fprobe, the function entry/exit probe
> > > with multiple probe point support. This also introduces the rethook
> > > for hooking function return as same as the kretprobe does. This
> > > abstraction will help us to generalize the fgraph tracer,
> > > because we can just switch to it from the rethook in fprobe,
> > > depending on the kernel configuration.
> > >
> > > The patch [1/10] is from Jiri's series[2].
> > >
> > > [2] https://lore.kernel.org/all/[email protected]/T/#u
> > >
> > > And the patch [9/10] adds the FPROBE_FL_KPROBE_SHARED flag for the case
> > > if user wants to share the same code (or share a same resource) on the
> > > fprobe and the kprobes.
> >
> > hi,
> > it works fine for bpf selftests, but when I use it through bpftrace
> > to attach more probes with:
> >
> > # ./src/bpftrace -e 'kprobe:ksys_* { }'
> > Attaching 27 probes
> >
> > I'm getting stalls like:
> >
> > krava33 login: [ 988.574069] INFO: task bpftrace:4137 blocked for more than 122 seconds.
> > [ 988.577577] Not tainted 5.16.0+ #89
> > [ 988.580173] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
> > [ 988.585538] task:bpftrace state:D stack: 0 pid: 4137 ppid: 4123 flags:0x00004004
> > [ 988.589869] Call Trace:
> > [ 988.591312] <TASK>
> > [ 988.592577] __schedule+0x3a8/0xd30
> > [ 988.594469] ? wait_for_completion+0x84/0x110
> > [ 988.596753] schedule+0x4e/0xc0
> > [ 988.598480] schedule_timeout+0xed/0x130
> > [ 988.600524] ? rcu_read_lock_sched_held+0x12/0x70
> > [ 988.602901] ? lock_release+0x253/0x4a0
> > [ 988.604935] ? lock_acquired+0x1b7/0x410
> > [ 988.607041] ? trace_hardirqs_on+0x1b/0xe0
> > [ 988.609202] wait_for_completion+0xae/0x110
> > [ 988.613762] __wait_rcu_gp+0x127/0x130
> > [ 988.615787] synchronize_rcu_tasks_generic+0x46/0xa0
> > [ 988.618329] ? call_rcu_tasks+0x20/0x20
> > [ 988.620600] ? rcu_tasks_pregp_step+0x10/0x10
> > [ 988.623232] ftrace_shutdown.part.0+0x174/0x210
> > [ 988.625820] unregister_ftrace_function+0x37/0x60
> > [ 988.628480] unregister_fprobe+0x2d/0x50
> > [ 988.630928] bpf_link_free+0x4e/0x70
> > [ 988.633126] bpf_link_release+0x11/0x20
> > [ 988.635249] __fput+0xae/0x270
> > [ 988.637022] task_work_run+0x5c/0xa0
> > [ 988.639016] exit_to_user_mode_prepare+0x251/0x260
> > [ 988.641294] syscall_exit_to_user_mode+0x16/0x50
> > [ 988.646249] do_syscall_64+0x48/0x90
> > [ 988.648218] entry_SYSCALL_64_after_hwframe+0x44/0xae
> > [ 988.650787] RIP: 0033:0x7f9079e95fbb
> > [ 988.652761] RSP: 002b:00007ffd474fa3b0 EFLAGS: 00000293 ORIG_RAX: 0000000000000003
> > [ 988.656718] RAX: 0000000000000000 RBX: 00000000011bf8d0 RCX: 00007f9079e95fbb
> > [ 988.660110] RDX: 0000000000000000 RSI: 00007ffd474fa3b0 RDI: 0000000000000019
> > [ 988.663512] RBP: 00007ffd474faaf0 R08: 0000000000000000 R09: 000000000000001a
> > [ 988.666673] R10: 0000000000000064 R11: 0000000000000293 R12: 0000000000000001
> > [ 988.669770] R13: 00000000004a19a1 R14: 00007f9083428c00 R15: 00000000008c02d8
> > [ 988.672601] </TASK>
> > [ 988.675763] INFO: lockdep is turned off.
> >
> > I have't investigated yet, any idea?
> >
>
> Do you happen to have a CPU count that's not a power of 2? Check if
> you have [0] in your tree, it might be that.
>
> [0] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=a773abf72eb0cac008743891068ca6edecc44683
yes, that helped, thanks
jirka