2020-08-27 16:23:44

by Peter Zijlstra

[permalink] [raw]
Subject: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

The kretprobe hash is mostly superfluous, replace it with a per-task
variable.

This gets rid of the task hash and it's related locking.

The whole invalidate_rp_inst() is tedious and could go away once we
drop rp specific ri size.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
include/linux/kprobes.h | 5 -
include/linux/sched.h | 4
kernel/fork.c | 4
kernel/kprobes.c | 239 +++++++++++++++++++-----------------------------
4 files changed, 110 insertions(+), 142 deletions(-)

--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -156,7 +156,10 @@ struct kretprobe {
};

struct kretprobe_instance {
- struct hlist_node hlist;
+ union {
+ struct llist_node llist;
+ struct hlist_node hlist;
+ };
struct kretprobe *rp;
kprobe_opcode_t *ret_addr;
struct task_struct *task;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1315,6 +1315,10 @@ struct task_struct {
struct callback_head mce_kill_me;
#endif

+#ifdef CONFIG_KRETPROBES
+ struct llist_head kretprobe_instances;
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2161,6 +2161,10 @@ static __latent_entropy struct task_stru
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;

+#ifdef CONFIG_KRETPROBES
+ p->kretprobe_instances.first = NULL;
+#endif
+
/*
* Ensure that the cgroup subsystem policies allow the new process to be
* forked. It should be noted the the new process's css_set can be changed
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -53,7 +53,6 @@ static int kprobes_initialized;
* - RCU hlist traversal under disabling preempt (breakpoint handlers)
*/
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
-static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];

/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_all_disarmed;
@@ -61,9 +60,6 @@ static bool kprobes_all_disarmed;
/* This protects kprobe_table and optimizing_list */
static DEFINE_MUTEX(kprobe_mutex);
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
-static struct {
- raw_spinlock_t lock ____cacheline_aligned_in_smp;
-} kretprobe_table_locks[KPROBE_TABLE_SIZE];

kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
unsigned int __unused)
@@ -71,11 +67,6 @@ kprobe_opcode_t * __weak kprobe_lookup_n
return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
}

-static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
-{
- return &(kretprobe_table_locks[hash].lock);
-}
-
/* Blacklist -- list of struct kprobe_blacklist_entry */
static LIST_HEAD(kprobe_blacklist);

@@ -1241,49 +1232,6 @@ void recycle_rp_inst(struct kretprobe_in
}
NOKPROBE_SYMBOL(recycle_rp_inst);

-void kretprobe_hash_lock(struct task_struct *tsk,
- struct hlist_head **head, unsigned long *flags)
-__acquires(hlist_lock)
-{
- unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
- raw_spinlock_t *hlist_lock;
-
- *head = &kretprobe_inst_table[hash];
- hlist_lock = kretprobe_table_lock_ptr(hash);
- raw_spin_lock_irqsave(hlist_lock, *flags);
-}
-NOKPROBE_SYMBOL(kretprobe_hash_lock);
-
-static void kretprobe_table_lock(unsigned long hash,
- unsigned long *flags)
-__acquires(hlist_lock)
-{
- raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
- raw_spin_lock_irqsave(hlist_lock, *flags);
-}
-NOKPROBE_SYMBOL(kretprobe_table_lock);
-
-void kretprobe_hash_unlock(struct task_struct *tsk,
- unsigned long *flags)
-__releases(hlist_lock)
-{
- unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
- raw_spinlock_t *hlist_lock;
-
- hlist_lock = kretprobe_table_lock_ptr(hash);
- raw_spin_unlock_irqrestore(hlist_lock, *flags);
-}
-NOKPROBE_SYMBOL(kretprobe_hash_unlock);
-
-static void kretprobe_table_unlock(unsigned long hash,
- unsigned long *flags)
-__releases(hlist_lock)
-{
- raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
- raw_spin_unlock_irqrestore(hlist_lock, *flags);
-}
-NOKPROBE_SYMBOL(kretprobe_table_unlock);
-
struct kprobe kprobe_busy = {
.addr = (void *) get_kprobe,
};
@@ -1313,25 +1261,28 @@ void kprobe_busy_end(void)
void kprobe_flush_task(struct task_struct *tk)
{
struct kretprobe_instance *ri;
- struct hlist_head *head, empty_rp;
+ struct hlist_head empty_rp;
+ struct llist_node *node;
struct hlist_node *tmp;
- unsigned long hash, flags = 0;

+ /* Early boot, not yet initialized. */
if (unlikely(!kprobes_initialized))
- /* Early boot. kretprobe_table_locks not yet initialized. */
return;

+ INIT_HLIST_HEAD(&empty_rp);
+
kprobe_busy_begin();

- INIT_HLIST_HEAD(&empty_rp);
- hash = hash_ptr(tk, KPROBE_HASH_BITS);
- head = &kretprobe_inst_table[hash];
- kretprobe_table_lock(hash, &flags);
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task == tk)
- recycle_rp_inst(ri, &empty_rp);
+ node = current->kretprobe_instances.first;
+ current->kretprobe_instances.first = NULL;
+
+ while (node) {
+ ri = container_of(node, struct kretprobe_instance, llist);
+ node = node->next;
+
+ recycle_rp_inst(ri, &empty_rp);
}
- kretprobe_table_unlock(hash, &flags);
+
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
@@ -1352,24 +1303,70 @@ static inline void free_rp_inst(struct k
}
}

-static void cleanup_rp_inst(struct kretprobe *rp)
+/* XXX all of this only exists because we have rp specific ri's */
+
+static bool __invalidate_rp_inst(struct task_struct *t, void *rp)
{
- unsigned long flags, hash;
+ struct llist_node *node = t->kretprobe_instances.first;
struct kretprobe_instance *ri;
- struct hlist_node *next;
- struct hlist_head *head;
+
+ while (node) {
+ ri = container_of(node, struct kretprobe_instance, llist);
+ node = node->next;
+
+ if (ri->rp == rp)
+ ri->rp = NULL;
+ }
+
+ return true;
+}
+
+struct invl_rp_ipi {
+ struct task_struct *task;
+ void *rp;
+ bool done;
+};
+
+static void __invalidate_rp_ipi(void *arg)
+{
+ struct invl_rp_ipi *iri = arg;
+
+ if (iri->task == current)
+ iri->done = __invalidate_rp_inst(iri->task, iri->rp);
+}
+
+static void invalidate_rp_inst(struct task_struct *t, struct kretprobe *rp)
+{
+ struct invl_rp_ipi iri = {
+ .task = t,
+ .rp = rp,
+ .done = false
+ };
+
+ for (;;) {
+ if (try_invoke_on_locked_down_task(t, __invalidate_rp_inst, rp))
+ return;
+
+ smp_call_function_single(task_cpu(t), __invalidate_rp_ipi, &iri, 1);
+ if (iri.done)
+ return;
+ }
+}
+
+static void cleanup_rp_inst(struct kretprobe *rp)
+{
+ struct task_struct *p, *t;

/* To avoid recursive kretprobe by NMI, set kprobe busy here */
kprobe_busy_begin();
- for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
- kretprobe_table_lock(hash, &flags);
- head = &kretprobe_inst_table[hash];
- hlist_for_each_entry_safe(ri, next, head, hlist) {
- if (ri->rp == rp)
- ri->rp = NULL;
- }
- kretprobe_table_unlock(hash, &flags);
+ rcu_read_lock();
+ for_each_process_thread(p, t) {
+ if (!t->kretprobe_instances.first)
+ continue;
+
+ invalidate_rp_inst(t, rp);
}
+ rcu_read_unlock();
kprobe_busy_end();

free_rp_inst(rp);
@@ -1935,71 +1932,45 @@ unsigned long __kretprobe_trampoline_han
unsigned long trampoline_address,
void *frame_pointer)
{
+ kprobe_opcode_t *correct_ret_addr = NULL;
struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
+ unsigned long orig_ret_address = 0;
+ struct llist_node *first, *node;
+ struct hlist_head empty_rp;
struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- kprobe_opcode_t *correct_ret_addr = NULL;
- bool skipped = false;

INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);

- /*
- * It is possible to have multiple instances associated with a given
- * task either because multiple functions in the call path have
- * return probes installed on them, and/or more than one
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always pushed into the head of the list
- * - when multiple return probes are registered for the same
- * function, the (chronologically) first instance's ret_addr
- * will be the real return address, and all the rest will
- * point to kretprobe_trampoline.
- */
- hlist_for_each_entry(ri, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
- /*
- * Return probes must be pushed on this hash list correct
- * order (same as return order) so that it can be popped
- * correctly. However, if we find it is pushed it incorrect
- * order, this means we find a function which should not be
- * probed, because the wrong order entry is pushed on the
- * path of processing other kretprobe itself.
- */
- if (ri->fp != frame_pointer) {
- if (!skipped)
- pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
- skipped = true;
- continue;
- }
+ first = node = current->kretprobe_instances.first;
+ while (node) {
+ ri = container_of(node, struct kretprobe_instance, llist);

- orig_ret_address = (unsigned long)ri->ret_addr;
- if (skipped)
- pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
- ri->rp->kp.addr);
+ BUG_ON(ri->fp != frame_pointer);

- if (orig_ret_address != trampoline_address)
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ if (orig_ret_address != trampoline_address) {
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
+ }
+
+ node = node->next;
}

kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
correct_ret_addr = ri->ret_addr;
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
- if (ri->fp != frame_pointer)
- continue;
+
+ /* Unlink all nodes for this frame. */
+ current->kretprobe_instances.first = node->next;
+ node->next = NULL;
+
+ /* Run them.. */
+ while (first) {
+ ri = container_of(first, struct kretprobe_instance, llist);
+ node = first->next;

orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
@@ -2011,17 +1982,9 @@ unsigned long __kretprobe_trampoline_han

recycle_rp_inst(ri, &empty_rp);

- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
+ first = node;
}

- kretprobe_hash_unlock(current, &flags);
-
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
@@ -2062,11 +2025,8 @@ static int pre_handler_kretprobe(struct

arch_prepare_kretprobe(ri, regs);

- /* XXX(hch): why is there no hlist_move_head? */
- INIT_HLIST_NODE(&ri->hlist);
- kretprobe_table_lock(hash, &flags);
- hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
- kretprobe_table_unlock(hash, &flags);
+ __llist_add(&ri->llist, &current->kretprobe_instances);
+
} else {
rp->nmissed++;
raw_spin_unlock_irqrestore(&rp->lock, flags);
@@ -2551,11 +2511,8 @@ static int __init init_kprobes(void)

/* FIXME allocate the probe table, currently defined statically */
/* initialize all list heads */
- for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&kprobe_table[i]);
- INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
- raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
- }

err = populate_kprobe_blacklist(__start_kprobe_blacklist,
__stop_kprobe_blacklist);



2020-08-27 18:02:02

by Masami Hiramatsu

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Thu, 27 Aug 2020 18:12:40 +0200
Peter Zijlstra <[email protected]> wrote:

> +static void invalidate_rp_inst(struct task_struct *t, struct kretprobe *rp)
> +{
> + struct invl_rp_ipi iri = {
> + .task = t,
> + .rp = rp,
> + .done = false
> + };
> +
> + for (;;) {
> + if (try_invoke_on_locked_down_task(t, __invalidate_rp_inst, rp))
> + return;
> +
> + smp_call_function_single(task_cpu(t), __invalidate_rp_ipi, &iri, 1);
> + if (iri.done)
> + return;
> + }

Hmm, what about making a status place holder and point it from
each instance to tell it is valid or not?

struct kretprobe_holder {
atomic_t refcnt;
struct kretprobe *rp;
};

struct kretprobe {
...
struct kretprobe_holder *rph; // allocate at register
...
};

struct kretprobe_instance {
...
struct kretprobe_holder *rph; // free if refcnt == 0
...
};

cleanup_rp_inst(struct kretprobe *rp)
{
rp->rph->rp = NULL;
}

kretprobe_trampoline_handler()
{
...
rp = READ_ONCE(ri->rph-rp);
if (likely(rp)) {
// call rp->handler
} else
rcu_call(ri, free_rp_inst_rcu);
...
}

free_rp_inst_rcu()
{
if (!atomic_dec_return(ri->rph->refcnt))
kfree(ri->rph);
kfree(ri);
}

This increase kretprobe_instance a bit, but make things simpler.
(and still keep lockless, atomic op is in the rcu callback).

Thank you,

--
Masami Hiramatsu <[email protected]>

2020-08-28 04:45:18

by Masami Hiramatsu

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Thu, 27 Aug 2020 18:12:40 +0200
Peter Zijlstra <[email protected]> wrote:

> @@ -1313,25 +1261,28 @@ void kprobe_busy_end(void)
> void kprobe_flush_task(struct task_struct *tk)
> {
> struct kretprobe_instance *ri;
> - struct hlist_head *head, empty_rp;
> + struct hlist_head empty_rp;
> + struct llist_node *node;
> struct hlist_node *tmp;

We don't need this tmp anymore.

> @@ -1935,71 +1932,45 @@ unsigned long __kretprobe_trampoline_han
> unsigned long trampoline_address,
> void *frame_pointer)
> {
> + kprobe_opcode_t *correct_ret_addr = NULL;
> struct kretprobe_instance *ri = NULL;
> - struct hlist_head *head, empty_rp;
> + unsigned long orig_ret_address = 0;
> + struct llist_node *first, *node;
> + struct hlist_head empty_rp;
> struct hlist_node *tmp;

Here too.

I'm trying to port this patch on my v4 series. I'll add my RFC patch of
kretprobe_holder too.

Thank you,

--
Masami Hiramatsu <[email protected]>

2020-08-28 08:46:50

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Fri, Aug 28, 2020 at 03:00:59AM +0900, Masami Hiramatsu wrote:
> On Thu, 27 Aug 2020 18:12:40 +0200
> Peter Zijlstra <[email protected]> wrote:
>
> > +static void invalidate_rp_inst(struct task_struct *t, struct kretprobe *rp)
> > +{
> > + struct invl_rp_ipi iri = {
> > + .task = t,
> > + .rp = rp,
> > + .done = false
> > + };
> > +
> > + for (;;) {
> > + if (try_invoke_on_locked_down_task(t, __invalidate_rp_inst, rp))
> > + return;
> > +
> > + smp_call_function_single(task_cpu(t), __invalidate_rp_ipi, &iri, 1);
> > + if (iri.done)
> > + return;
> > + }
>
> Hmm, what about making a status place holder and point it from
> each instance to tell it is valid or not?
>
> struct kretprobe_holder {
> atomic_t refcnt;
> struct kretprobe *rp;
> };
>
> struct kretprobe {
> ...
> struct kretprobe_holder *rph; // allocate at register
> ...
> };
>
> struct kretprobe_instance {
> ...
> struct kretprobe_holder *rph; // free if refcnt == 0
> ...
> };
>
> cleanup_rp_inst(struct kretprobe *rp)
> {
> rp->rph->rp = NULL;
> }
>
> kretprobe_trampoline_handler()
> {
> ...
> rp = READ_ONCE(ri->rph-rp);
> if (likely(rp)) {
> // call rp->handler
> } else
> rcu_call(ri, free_rp_inst_rcu);
> ...
> }
>
> free_rp_inst_rcu()
> {
> if (!atomic_dec_return(ri->rph->refcnt))
> kfree(ri->rph);
> kfree(ri);
> }
>
> This increase kretprobe_instance a bit, but make things simpler.
> (and still keep lockless, atomic op is in the rcu callback).

Yes, much better.

Although I'd _love_ to get rid of rp->data_size, then we can simplify
all of this even more. I was thinking we could then have a single global
freelist thing and add some per-cpu cache to it (say 4-8 entries) to
avoid the worst contention.

And then make function-graph use this, instead of the other way around
:-)

2020-08-28 09:08:57

by Masami Hiramatsu

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Fri, 28 Aug 2020 03:00:59 +0900
Masami Hiramatsu <[email protected]> wrote:

> On Thu, 27 Aug 2020 18:12:40 +0200
> Peter Zijlstra <[email protected]> wrote:
>
> > +static void invalidate_rp_inst(struct task_struct *t, struct kretprobe *rp)
> > +{
> > + struct invl_rp_ipi iri = {
> > + .task = t,
> > + .rp = rp,
> > + .done = false
> > + };
> > +
> > + for (;;) {
> > + if (try_invoke_on_locked_down_task(t, __invalidate_rp_inst, rp))
> > + return;
> > +
> > + smp_call_function_single(task_cpu(t), __invalidate_rp_ipi, &iri, 1);
> > + if (iri.done)
> > + return;
> > + }
>
> Hmm, what about making a status place holder and point it from
> each instance to tell it is valid or not?
>
> struct kretprobe_holder {
> atomic_t refcnt;
> struct kretprobe *rp;
> };
>
> struct kretprobe {
> ...
> struct kretprobe_holder *rph; // allocate at register
> ...
> };
>
> struct kretprobe_instance {
> ...
> struct kretprobe_holder *rph; // free if refcnt == 0
> ...
> };
>
> cleanup_rp_inst(struct kretprobe *rp)
> {
> rp->rph->rp = NULL;
> }
>
> kretprobe_trampoline_handler()
> {
> ...
> rp = READ_ONCE(ri->rph-rp);
> if (likely(rp)) {
> // call rp->handler
> } else
> rcu_call(ri, free_rp_inst_rcu);
> ...
> }
>
> free_rp_inst_rcu()
> {
> if (!atomic_dec_return(ri->rph->refcnt))
> kfree(ri->rph);
> kfree(ri);
> }
>
> This increase kretprobe_instance a bit, but make things simpler.
> (and still keep lockless, atomic op is in the rcu callback).

OK, I've written the code and run a smoke test on it.
I'll send it with my 4th version of series.

Thank you,

--
Masami Hiramatsu <[email protected]>

2020-08-28 13:20:02

by [email protected]

[permalink] [raw]
Subject: RE: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

> -----Original Message-----
> From: Peter Zijlstra <[email protected]>
> Sent: Friday, August 28, 2020 12:13 AM
> To: [email protected]; [email protected]
> Cc: Eddy Wu (RD-TW) <[email protected]>; [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected]; [email protected]
> Subject: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash
>
> @@ -1935,71 +1932,45 @@ unsigned long __kretprobe_trampoline_han
> unsigned long trampoline_address,
> void *frame_pointer)
> {
> // ... removed
> // NULL here
> + first = node = current->kretprobe_instances.first;
> + while (node) {
> + ri = container_of(node, struct kretprobe_instance, llist);
>
> - orig_ret_address = (unsigned long)ri->ret_addr;
> - if (skipped)
> - pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
> - ri->rp->kp.addr);
> + BUG_ON(ri->fp != frame_pointer);
>
> - if (orig_ret_address != trampoline_address)
> + orig_ret_address = (unsigned long)ri->ret_addr;
> + if (orig_ret_address != trampoline_address) {
> /*
> * This is the real return address. Any other
> * instances associated with this task are for
> * other calls deeper on the call stack
> */
> break;
> + }
> +
> + node = node->next;
> }
>

Hi, I found a NULL pointer dereference here, where current->kretprobe_instances.first == NULL in these two scenario:

1) In task "rs:main Q:Reg"
# insmod samples/kprobes/kretprobe_example.ko func=schedule
# pkill sddm-greeter

2) In task "llvmpipe-10"
# insmod samples/kprobes/kretprobe_example.ko func=schedule
login plasmashell session from sddm graphical interface

based on Masami's v2 + Peter's lockless patch, I'll try the new branch once I can compile kernel

Stacktrace may not be really useful here:
[ 402.008630] BUG: kernel NULL pointer dereference, address: 0000000000000018
[ 402.008633] #PF: supervisor read access in kernel mode
[ 402.008642] #PF: error_code(0x0000) - not-present page
[ 402.008644] PGD 0 P4D 0
[ 402.008646] Oops: 0000 [#1] PREEMPT SMP PTI
[ 402.008649] CPU: 7 PID: 1505 Comm: llvmpipe-10 Kdump: loaded Not tainted 5.9.0-rc2-00111-g72091ec08f03-dirty #45
[ 402.008650] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/29/2019
[ 402.008653] RIP: 0010:__kretprobe_trampoline_handler+0xb8/0x17f
[ 402.008655] Code: 65 4c 8b 34 25 80 6d 01 00 4c 89 e2 48 c7 c7 91 6b 85 91 49 8d b6 38 07 00 00 e8 d1 1a f9 ff 48 85 db 74 06 48 3b 5d d0 75 16 <49> 8b 75 18 48 c7 c7 a0 6c 85 91 48
8b 56 28 e8 b2 1a f9 ff 0f 0b
[ 402.008655] RSP: 0018:ffffab408147bde0 EFLAGS: 00010246
[ 402.008656] RAX: 0000000000000021 RBX: 0000000000000000 RCX: 0000000000000002
[ 402.008657] RDX: 0000000080000002 RSI: ffffffff9189757d RDI: 00000000ffffffff
[ 402.008658] RBP: ffffab408147be20 R08: 0000000000000001 R09: 000000000000955c
[ 402.008658] R10: 0000000000000004 R11: 0000000000000000 R12: 0000000000000000
[ 402.008659] R13: 0000000000000000 R14: ffff90736d305f40 R15: 0000000000000000
[ 402.008661] FS: 00007f20f6ffd700(0000) GS:ffff9073781c0000(0000) knlGS:0000000000000000
[ 402.008675] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 402.008678] CR2: 0000000000000018 CR3: 00000001ed256006 CR4: 00000000003706e0
[ 402.008684] Call Trace:
[ 402.008689] ? elfcorehdr_read+0x40/0x40
[ 402.008690] ? elfcorehdr_read+0x40/0x40
[ 402.008691] trampoline_handler+0x42/0x60
[ 402.008692] kretprobe_trampoline+0x2a/0x50
[ 402.008693] RIP: 0010:kretprobe_trampoline+0x0/0x50

TREND MICRO EMAIL NOTICE

The information contained in this email and any attachments is confidential and may be subject to copyright or other intellectual property protection. If you are not the intended recipient, you are not authorized to use or disclose this information, and we request that you notify us by reply mail or telephone and delete the original message from your mail system.

For details about what personal information we collect and why, please see our Privacy Notice on our website at: Read privacy policy<http://www.trendmicro.com/privacy>

2020-08-28 13:44:25

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Fri, Aug 28, 2020 at 01:11:15PM +0000, [email protected] wrote:
> > -----Original Message-----
> > From: Peter Zijlstra <[email protected]>
> > Sent: Friday, August 28, 2020 12:13 AM
> > To: [email protected]; [email protected]
> > Cc: Eddy Wu (RD-TW) <[email protected]>; [email protected]; [email protected]; [email protected];
> > [email protected]; [email protected]; [email protected]; [email protected];
> > [email protected]; [email protected]; [email protected]; [email protected]
> > Subject: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash
> >
> > @@ -1935,71 +1932,45 @@ unsigned long __kretprobe_trampoline_han
> > unsigned long trampoline_address,
> > void *frame_pointer)
> > {
> > // ... removed
> > // NULL here
> > + first = node = current->kretprobe_instances.first;
> > + while (node) {
> > + ri = container_of(node, struct kretprobe_instance, llist);
> >
> > - orig_ret_address = (unsigned long)ri->ret_addr;
> > - if (skipped)
> > - pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
> > - ri->rp->kp.addr);
> > + BUG_ON(ri->fp != frame_pointer);
> >
> > - if (orig_ret_address != trampoline_address)
> > + orig_ret_address = (unsigned long)ri->ret_addr;
> > + if (orig_ret_address != trampoline_address) {
> > /*
> > * This is the real return address. Any other
> > * instances associated with this task are for
> > * other calls deeper on the call stack
> > */
> > break;
> > + }
> > +
> > + node = node->next;
> > }
> >
>
> Hi, I found a NULL pointer dereference here, where
> current->kretprobe_instances.first == NULL in these two scenario:

Hurmph, that would mean hitting the trampoline and not having a
kretprobe_instance, weird. Let me try and reproduce.

2020-08-28 13:53:25

by Masami Hiramatsu

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Fri, 28 Aug 2020 13:11:15 +0000
"[email protected]" <[email protected]> wrote:

> > -----Original Message-----
> > From: Peter Zijlstra <[email protected]>
> > Sent: Friday, August 28, 2020 12:13 AM
> > To: [email protected]; [email protected]
> > Cc: Eddy Wu (RD-TW) <[email protected]>; [email protected]; [email protected]; [email protected];
> > [email protected]; [email protected]; [email protected]; [email protected];
> > [email protected]; [email protected]; [email protected]; [email protected]
> > Subject: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash
> >
> > @@ -1935,71 +1932,45 @@ unsigned long __kretprobe_trampoline_han
> > unsigned long trampoline_address,
> > void *frame_pointer)
> > {
> > // ... removed
> > // NULL here
> > + first = node = current->kretprobe_instances.first;
> > + while (node) {
> > + ri = container_of(node, struct kretprobe_instance, llist);
> >
> > - orig_ret_address = (unsigned long)ri->ret_addr;
> > - if (skipped)
> > - pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
> > - ri->rp->kp.addr);
> > + BUG_ON(ri->fp != frame_pointer);
> >
> > - if (orig_ret_address != trampoline_address)
> > + orig_ret_address = (unsigned long)ri->ret_addr;
> > + if (orig_ret_address != trampoline_address) {
> > /*
> > * This is the real return address. Any other
> > * instances associated with this task are for
> > * other calls deeper on the call stack
> > */
> > break;
> > + }
> > +
> > + node = node->next;
> > }
> >
>
> Hi, I found a NULL pointer dereference here, where current->kretprobe_instances.first == NULL in these two scenario:

Thanks! that may be what I'm chasing.

>
> 1) In task "rs:main Q:Reg"
> # insmod samples/kprobes/kretprobe_example.ko func=schedule
> # pkill sddm-greeter
>
> 2) In task "llvmpipe-10"
> # insmod samples/kprobes/kretprobe_example.ko func=schedule
> login plasmashell session from sddm graphical interface

OK, schedule function will be the key. I guess the senario is..

1) kretporbe replace the return address with kretprobe_trampoline on task1's kernel stack
2) the task1 forks task2 before returning to the kretprobe_trampoline
3) while copying the process with the kernel stack, task2->kretprobe_instances.first = NULL
4) task2 returns to the kretprobe_trampoline
5) Bomb!

Hmm, we need to fixup the kernel stack when copying process.

Thank you,

>
> based on Masami's v2 + Peter's lockless patch, I'll try the new branch once I can compile kernel
>
> Stacktrace may not be really useful here:
> [ 402.008630] BUG: kernel NULL pointer dereference, address: 0000000000000018
> [ 402.008633] #PF: supervisor read access in kernel mode
> [ 402.008642] #PF: error_code(0x0000) - not-present page
> [ 402.008644] PGD 0 P4D 0
> [ 402.008646] Oops: 0000 [#1] PREEMPT SMP PTI
> [ 402.008649] CPU: 7 PID: 1505 Comm: llvmpipe-10 Kdump: loaded Not tainted 5.9.0-rc2-00111-g72091ec08f03-dirty #45
> [ 402.008650] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/29/2019
> [ 402.008653] RIP: 0010:__kretprobe_trampoline_handler+0xb8/0x17f
> [ 402.008655] Code: 65 4c 8b 34 25 80 6d 01 00 4c 89 e2 48 c7 c7 91 6b 85 91 49 8d b6 38 07 00 00 e8 d1 1a f9 ff 48 85 db 74 06 48 3b 5d d0 75 16 <49> 8b 75 18 48 c7 c7 a0 6c 85 91 48
> 8b 56 28 e8 b2 1a f9 ff 0f 0b
> [ 402.008655] RSP: 0018:ffffab408147bde0 EFLAGS: 00010246
> [ 402.008656] RAX: 0000000000000021 RBX: 0000000000000000 RCX: 0000000000000002
> [ 402.008657] RDX: 0000000080000002 RSI: ffffffff9189757d RDI: 00000000ffffffff
> [ 402.008658] RBP: ffffab408147be20 R08: 0000000000000001 R09: 000000000000955c
> [ 402.008658] R10: 0000000000000004 R11: 0000000000000000 R12: 0000000000000000
> [ 402.008659] R13: 0000000000000000 R14: ffff90736d305f40 R15: 0000000000000000
> [ 402.008661] FS: 00007f20f6ffd700(0000) GS:ffff9073781c0000(0000) knlGS:0000000000000000
> [ 402.008675] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [ 402.008678] CR2: 0000000000000018 CR3: 00000001ed256006 CR4: 00000000003706e0
> [ 402.008684] Call Trace:
> [ 402.008689] ? elfcorehdr_read+0x40/0x40
> [ 402.008690] ? elfcorehdr_read+0x40/0x40
> [ 402.008691] trampoline_handler+0x42/0x60
> [ 402.008692] kretprobe_trampoline+0x2a/0x50
> [ 402.008693] RIP: 0010:kretprobe_trampoline+0x0/0x50
>
> TREND MICRO EMAIL NOTICE
>
> The information contained in this email and any attachments is confidential and may be subject to copyright or other intellectual property protection. If you are not the intended recipient, you are not authorized to use or disclose this information, and we request that you notify us by reply mail or telephone and delete the original message from your mail system.
>
> For details about what personal information we collect and why, please see our Privacy Notice on our website at: Read privacy policy<http://www.trendmicro.com/privacy>


--
Masami Hiramatsu <[email protected]>

2020-08-28 14:01:11

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Fri, Aug 28, 2020 at 10:51:13PM +0900, Masami Hiramatsu wrote:

> OK, schedule function will be the key. I guess the senario is..
>
> 1) kretporbe replace the return address with kretprobe_trampoline on task1's kernel stack
> 2) the task1 forks task2 before returning to the kretprobe_trampoline
> 3) while copying the process with the kernel stack, task2->kretprobe_instances.first = NULL
> 4) task2 returns to the kretprobe_trampoline
> 5) Bomb!
>
> Hmm, we need to fixup the kernel stack when copying process.

How would that scenario have been avoided in the old code? Because there
task2 would have a different has and not have found a kretprobe_instance
either.

2020-08-28 14:15:27

by [email protected]

[permalink] [raw]
Subject: RE: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

> From: Masami Hiramatsu <[email protected]>
>
> OK, schedule function will be the key. I guess the senario is..
>
> 1) kretporbe replace the return address with kretprobe_trampoline on task1's kernel stack
> 2) the task1 forks task2 before returning to the kretprobe_trampoline
> 3) while copying the process with the kernel stack, task2->kretprobe_instances.first = NULL

I think new process created by fork/clone uses a brand new kernel stack? I thought only user stack are copied.
Otherwise any process launch should crash in the same way

By the way, I can reproduce this on the latest branch(v4)
TREND MICRO EMAIL NOTICE

The information contained in this email and any attachments is confidential and may be subject to copyright or other intellectual property protection. If you are not the intended recipient, you are not authorized to use or disclose this information, and we request that you notify us by reply mail or telephone and delete the original message from your mail system.

For details about what personal information we collect and why, please see our Privacy Notice on our website at: Read privacy policy<http://www.trendmicro.com/privacy>

2020-08-28 14:20:33

by Masami Hiramatsu

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Fri, 28 Aug 2020 15:58:24 +0200
[email protected] wrote:

> On Fri, Aug 28, 2020 at 10:51:13PM +0900, Masami Hiramatsu wrote:
>
> > OK, schedule function will be the key. I guess the senario is..
> >
> > 1) kretporbe replace the return address with kretprobe_trampoline on task1's kernel stack
> > 2) the task1 forks task2 before returning to the kretprobe_trampoline
> > 3) while copying the process with the kernel stack, task2->kretprobe_instances.first = NULL
> > 4) task2 returns to the kretprobe_trampoline
> > 5) Bomb!
> >
> > Hmm, we need to fixup the kernel stack when copying process.
>
> How would that scenario have been avoided in the old code? Because there
> task2 would have a different has and not have found a kretprobe_instance
> either.

Good question, I think this bug has not been solved in old code too.
Let me check.

Thanks,

--
Masami Hiramatsu <[email protected]>

2020-08-28 14:22:40

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Fri, Aug 28, 2020 at 02:11:18PM +0000, [email protected] wrote:
> > From: Masami Hiramatsu <[email protected]>
> >
> > OK, schedule function will be the key. I guess the senario is..
> >
> > 1) kretporbe replace the return address with kretprobe_trampoline on task1's kernel stack
> > 2) the task1 forks task2 before returning to the kretprobe_trampoline
> > 3) while copying the process with the kernel stack, task2->kretprobe_instances.first = NULL
>
> I think new process created by fork/clone uses a brand new kernel
> stack? I thought only user stack are copied. Otherwise any process
> launch should crash in the same way

I was under the same impression, we create a brand new stack-frame for
the new task, this 'fake' frame we can schedule into.

It either points to ret_from_fork() for new user tasks, or
kthread_frame_init() for kernel threads.

2020-08-28 14:46:47

by Masami Hiramatsu

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Fri, 28 Aug 2020 16:19:17 +0200
[email protected] wrote:

> On Fri, Aug 28, 2020 at 02:11:18PM +0000, [email protected] wrote:
> > > From: Masami Hiramatsu <[email protected]>
> > >
> > > OK, schedule function will be the key. I guess the senario is..
> > >
> > > 1) kretporbe replace the return address with kretprobe_trampoline on task1's kernel stack
> > > 2) the task1 forks task2 before returning to the kretprobe_trampoline
> > > 3) while copying the process with the kernel stack, task2->kretprobe_instances.first = NULL
> >
> > I think new process created by fork/clone uses a brand new kernel
> > stack? I thought only user stack are copied. Otherwise any process
> > launch should crash in the same way
>
> I was under the same impression, we create a brand new stack-frame for
> the new task, this 'fake' frame we can schedule into.
>
> It either points to ret_from_fork() for new user tasks, or
> kthread_frame_init() for kernel threads.

Ah sorry, then it's my misreading... anyway, I could reproduce the crash with
probing on schedule(). Hmm, it is better to dump the current comm with
BUG().

Thank you,



--
Masami Hiramatsu <[email protected]>

2020-08-28 14:50:50

by Masami Hiramatsu

[permalink] [raw]
Subject: Re: [RFC][PATCH 3/7] kprobes: Remove kretprobe hash

On Fri, 28 Aug 2020 13:11:15 +0000
"[email protected]" <[email protected]> wrote:

> > -----Original Message----
> Hi, I found a NULL pointer dereference here, where current->kretprobe_instances.first == NULL in these two scenario:
>
> 1) In task "rs:main Q:Reg"
> # insmod samples/kprobes/kretprobe_example.ko func=schedule
> # pkill sddm-greeter
>
> 2) In task "llvmpipe-10"
> # insmod samples/kprobes/kretprobe_example.ko func=schedule
> login plasmashell session from sddm graphical interface
>
> based on Masami's v2 + Peter's lockless patch, I'll try the new branch once I can compile kernel
>
> Stacktrace may not be really useful here:
> [ 402.008630] BUG: kernel NULL pointer dereference, address: 0000000000000018
> [ 402.008633] #PF: supervisor read access in kernel mode
> [ 402.008642] #PF: error_code(0x0000) - not-present page
> [ 402.008644] PGD 0 P4D 0
> [ 402.008646] Oops: 0000 [#1] PREEMPT SMP PTI
> [ 402.008649] CPU: 7 PID: 1505 Comm: llvmpipe-10 Kdump: loaded Not tainted 5.9.0-rc2-00111-g72091ec08f03-dirty #45

Hmm, this case llvmpipe will be the user task (not kthread, I guess)

Here are some logs, both happened with following command and wait 5min or so.

cd /sys/kernel/debug/tracing/
echo r:event1 vfs_read >> kprobe_events
echo r:event2 vfs_read %ax >> kprobe_events
echo r:event3 rw_verify_area %ax >> kprobe_events
echo r:schedule schedule >> kprobe_events
echo 1 > events/kprobes/enable


[ 332.986337] ------------[ cut here ]------------
[ 332.987312] kernel BUG at kernel/kprobes.c:1893!
[ 332.988237] invalid opcode: 0000 [#1] PREEMPT SMP PTI
[ 332.989108] CPU: 7 PID: 55 Comm: kcompactd0 Not tainted 5.9.0-rc2+ #54
[ 332.990480] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1 04/01/2014
[ 332.994600] RIP: 0010:__kretprobe_trampoline_handler+0xf2/0x100
[ 332.995551] Code: 48 c7 05 e5 40 ec 7e c0 cc 28 82 4c 89 ff e8 c5 fe ff ff 48 85 db 75 92 48 83 c4 08 4c 89 e0 5b 41 5c 41 5d 41 5e 41 5f 5d c3 <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 90 55 48 89 e5 41 56 41 55
[ 332.998498] RSP: 0000:ffffc90000217cf8 EFLAGS: 00010246
[ 332.999405] RAX: ffff88807cfe9700 RBX: 0000000000000000 RCX: 0000000000000000
[ 333.000597] RDX: ffffc90000217de8 RSI: ffffffff810471e0 RDI: ffffc90000217d50
[ 333.002058] RBP: ffffc90000217d28 R08: 0000000000000001 R09: 0000000000000001
[ 333.003594] R10: 0000000000000000 R11: 0000000000000001 R12: ffffc90000217d50
[ 333.005219] R13: ffff88807d7dbac0 R14: ffffc90000217e00 R15: ffff88807d7dbac0
[ 333.006826] FS: 0000000000000000(0000) GS:ffff88807d7c0000(0000) knlGS:0000000000000000
[ 333.008787] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 333.010249] CR2: 0000000000000000 CR3: 0000000002220000 CR4: 00000000000006a0
[ 333.011895] Call Trace:
[ 333.012529] trampoline_handler+0x43/0x60
[ 333.013214] kretprobe_trampoline+0x2a/0x50
[ 333.014028] RIP: 0010:kretprobe_trampoline+0x0/0x50
[ 333.014856] Code: c7 e9 2d 04 82 e8 a0 f2 0d 00 5d c3 31 f6 e9 79 ff ff ff be 01 00 00 00 e9 6f ff ff ff cc cc cc cc cc cc cc cc cc cc cc cc cc <54> 9c 48 83 ec 18 57 56 52 51 50 41 50 41 51 41 52 41 53 53 55 41
[ 333.017750] RSP: 81170fba:ffffc90000217df0 EFLAGS: 00000246
[ 333.018894] RAX: 0000000040200040 RBX: ffff88807d7dbac0 RCX: 0000000000000000
[ 333.020232] RDX: 0000000000000001 RSI: ffffffff818e51b4 RDI: ffffffff818e51b4
[ 333.021476] RBP: ffffc90000217e88 R08: 0000000000000001 R09: 0000000000000001
[ 333.022603] R10: 0000000000000000 R11: 0000000000000001 R12: 0000000100008044
[ 333.024221] R13: ffff88807d7dbac0 R14: ffffc90000217e00 R15: ffff88807d7dbac0
[ 333.025851] ? schedule+0x54/0x100
[ 333.026717] ? schedule+0x54/0x100
[ 333.027400] ? trace_preempt_on+0x2a/0xd0
[ 333.028161] ? __next_timer_interrupt+0x110/0x110
[ 333.029080] kcompactd+0x20e/0x350
[ 333.029882] ? wait_woken+0x80/0x80
[ 333.030593] ? kcompactd_do_work+0x3a0/0x3a0
[ 333.031347] kthread+0x13c/0x180
[ 333.031988] ? kthread_park+0x90/0x90
[ 333.032734] ret_from_fork+0x22/0x30
[ 333.033557] Modules linked in:
[ 333.034451] ---[ end trace 901e8137e8d04982 ]---
[ 333.035601] RIP: 0010:__kretprobe_trampoline_handler+0xf2/0x100
[ 333.037073] Code: 48 c7 05 e5 40 ec 7e c0 cc 28 82 4c 89 ff e8 c5 fe ff ff 48 85 db 75 92 48 83 c4 08 4c 89 e0 5b 41 5c 41 5d 41 5e 41 5f 5d c3 <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 90 55 48 89 e5 41 56 41 55
[ 333.041089] RSP: 0000:ffffc90000217cf8 EFLAGS: 00010246
[ 333.042201] RAX: ffff88807cfe9700 RBX: 0000000000000000 RCX: 0000000000000000
[ 333.043747] RDX: ffffc90000217de8 RSI: ffffffff810471e0 RDI: ffffc90000217d50
[ 333.045063] RBP: ffffc90000217d28 R08: 0000000000000001 R09: 0000000000000001
[ 333.046547] R10: 0000000000000000 R11: 0000000000000001 R12: ffffc90000217d50
[ 333.048055] R13: ffff88807d7dbac0 R14: ffffc90000217e00 R15: ffff88807d7dbac0
[ 333.049616] FS: 0000000000000000(0000) GS:ffff88807d7c0000(0000) knlGS:0000000000000000
[ 333.051487] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 333.052737] CR2: 0000000000000000 CR3: 0000000002220000 CR4: 00000000000006a0
[ 333.054127] Kernel panic - not syncing: Fatal exception
[ 333.055450] Kernel Offset: disabled
[ 333.056207] ---[ end Kernel panic - not syncing: Fatal exception ]---

Another one is here.

[ 335.258721] ------------[ cut here ]------------
[ 335.264413] kernel BUG at kernel/kprobes.c:1893!
[ 335.267757] invalid opcode: 0000 [#1] PREEMPT SMP PTI
[ 335.272090] CPU: 7 PID: 71 Comm: kworker/7:1 Not tainted 5.9.0-rc2+ #54
[ 335.277787] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1 04/01/2014
[ 335.285971] Workqueue: 0x0 (mm_percpu_wq)
[ 335.288156] RIP: 0010:__kretprobe_trampoline_handler+0xf2/0x100
[ 335.295194] Code: 48 c7 05 e5 40 ec 7e c0 cc 28 82 4c 89 ff e8 c5 fe ff ff 48 85 db 75 92 48 83 c4 08 4c 89 e0 5b 41 5c 41 5d 41 5e 41 5f 5d c3 <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 90 55 48 89 e5 41 56 41 55
[ 335.300922] RSP: 0018:ffffc9000028fdb8 EFLAGS: 00010246
[ 335.302336] RAX: ffff88807c4e9700 RBX: 0000000000000000 RCX: 0000000000000000
[ 335.304154] RDX: ffffc9000028fea8 RSI: ffffffff810471e0 RDI: ffffc9000028fe10
[ 335.305688] RBP: ffffc9000028fde8 R08: 0000000000000001 R09: 0000000000000001
[ 335.307486] R10: 0000000000000000 R11: 0000000000000001 R12: ffffc9000028fe10
[ 335.309131] R13: ffff88807d7ea440 R14: ffffc900001cbd58 R15: ffff88807c4e4000
[ 335.310472] FS: 0000000000000000(0000) GS:ffff88807d7c0000(0000) knlGS:0000000000000000
[ 335.312121] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 335.313261] CR2: 00000000005c0a56 CR3: 0000000002220000 CR4: 00000000000006a0
[ 335.314561] Call Trace:
[ 335.315089] trampoline_handler+0x43/0x60
[ 335.315844] kretprobe_trampoline+0x2a/0x50
[ 335.316774] RIP: 0010:kretprobe_trampoline+0x0/0x50
[ 335.317651] Code: c7 e9 2d 04 82 e8 a0 f2 0d 00 5d c3 31 f6 e9 79 ff ff ff be 01 00 00 00 e9 6f ff ff ff cc cc cc cc cc cc cc cc cc cc cc cc cc <54> 9c 48 83 ec 18 57 56 52 51 50 41 50 41 51 41 52 41 53 53 55 41
[ 335.320480] RSP: 7c4e9700:ffffc9000028feb0 EFLAGS: 00000246
[ 335.321410] RAX: ffff88807c4e4000 RBX: ffff88807d7ea440 RCX: 0000000000000000
[ 335.322508] RDX: 0000000000000000 RSI: ffffffff818e51b4 RDI: ffff88807c4e9700
[ 335.323611] RBP: ffffc9000028ff00 R08: 0000000000000001 R09: 0000000000000001
[ 335.324699] R10: 0000000000000000 R11: 0000000000000001 R12: ffff88807c4e4028
[ 335.325903] R13: ffff88807d7ea440 R14: ffffc900001cbd58 R15: ffff88807c4e4000
[ 335.327012] ? schedule+0x54/0x100
[ 335.327570] ? process_one_work+0x5c0/0x5c0
[ 335.328127] kthread+0x13c/0x180
[ 335.328583] ? kthread_park+0x90/0x90
[ 335.329063] ret_from_fork+0x22/0x30
[ 335.329558] Modules linked in:
[ 335.329974] ---[ end trace bd6d1f4d3806b3de ]---
[ 335.330562] RIP: 0010:__kretprobe_trampoline_handler+0xf2/0x100
[ 335.331294] Code: 48 c7 05 e5 40 ec 7e c0 cc 28 82 4c 89 ff e8 c5 fe ff ff 48 85 db 75 92 48 83 c4 08 4c 89 e0 5b 41 5c 41 5d 41 5e 41 5f 5d c3 <0f> 0b 66 66 2e 0f 1f 84 00 00 00 00 00 90 55 48 89 e5 41 56 41 55
[ 335.333433] RSP: 0018:ffffc9000028fdb8 EFLAGS: 00010246
[ 335.334091] RAX: ffff88807c4e9700 RBX: 0000000000000000 RCX: 0000000000000000
[ 335.334959] RDX: ffffc9000028fea8 RSI: ffffffff810471e0 RDI: ffffc9000028fe10
[ 335.335697] RBP: ffffc9000028fde8 R08: 0000000000000001 R09: 0000000000000001
[ 335.336447] R10: 0000000000000000 R11: 0000000000000001 R12: ffffc9000028fe10
[ 335.337192] R13: ffff88807d7ea440 R14: ffffc900001cbd58 R15: ffff88807c4e4000
[ 335.337956] FS: 0000000000000000(0000) GS:ffff88807d7c0000(0000) knlGS:0000000000000000
[ 335.338917] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 335.339618] CR2: 00000000005c0a56 CR3: 0000000002220000 CR4: 00000000000006a0
[ 335.340373] Kernel panic - not syncing: Fatal exception
[ 335.341086] Kernel Offset: disabled
[ 335.341587] ---[ end Kernel panic - not syncing: Fatal exception ]---



--
Masami Hiramatsu <[email protected]>