arm_kprobe_ftrace() could fail, especially after introducing ftrace IPMODIFY
flag and LifePatching. This patch set adds the error handling and also some
related fixes.
1st patch includes the most important change. It helps to keep Kprobes
in a sane state.
2nd and 3rd patch allows to propagate the error where needed.
The other patches fix problems with the global kprobes_all_disarmed flag.
They were there even before but they become more visible and critical
after the arming errors became propagated.
The first patch looks rather safe and might be suitable even for 4.0.
However, I would feel more comfortable if the other patches get some
testing in linux-next. I did quite some testing and did my best. But
I started with the three patches and was surprised by the effect of
the propagated errors. They triggered that BUG_ON() in
__unregister_kprobe_top() are required the other patches
to get it working. I wonder if there is any other scenario that
I have missed.
Of course, I also wait for feedback how to make things better.
Petr Mladek (7):
kprobes: Disable Kprobe when ftrace arming fails
kprobes: Propagate error from arm_kprobe_ftrace()
kprobes: Propagate error from disarm_kprobe_ftrace()
kprobes: Keep consistent state of kprobes_all_disarmed
kprobes: Do not try to disarm already disarmed Kprobe
kprobes: Check kprobes_all_disarmed in kprobe_disarmed()
kprobes: Mark globally disabled Kprobes in debugfs interface
Documentation/kprobes.txt | 5 +-
kernel/kprobes.c | 279 ++++++++++++++++++++++++++++++++++------------
2 files changed, 213 insertions(+), 71 deletions(-)
--
1.8.5.6
arm_kprobe_ftrace() could fail, especially after introducing ftrace IPMODIFY
flag and LifePatching. But this situation is not properly handled.
This patch adds the most important changes.
First, it does not make sense to register "kprobe_ftrace_ops" if the filter was
not set.
Second, we should remove the filter if the registration of "kprobe_ftrace_ops"
fails. The failure might be caused by conflict between the Kprobe and
a life patch via the IPMODIFY flag. If we remove the filter, we will allow
to register "kprobe_ftrace_ops" for another non-conflicting Kprobe later.
Third, we need to make sure that "kprobe_ftrace_enabled" is incremented only
when "kprobe_ftrace_ops" is successfully registered. Otherwise, another
Kprobe will not try to register it again. Note that we could move the
manipulation with this counter because it is accessed only under "kprobe_mutex".
Four, we should mark the probe as disabled if the ftrace stuff is not usable.
It will be the correct status. Also it will prevent the unregistration code
from producing another failure.
It looks more safe to disable the Kprobe directly in "kprobe_ftrace_ops". Note
that we need to disable also all listed Kprobes in case of an aggregated probe.
It would be enough to disable only the new one but we do not know which one it
was. They should be in sync anyway.
Signed-off-by: Petr Mladek <[email protected]>
---
kernel/kprobes.c | 25 +++++++++++++++++++++----
1 file changed, 21 insertions(+), 4 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ee619929cf90..d1b9db690b9c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -931,16 +931,33 @@ static int prepare_kprobe(struct kprobe *p)
/* Caller must lock kprobe_mutex */
static void arm_kprobe_ftrace(struct kprobe *p)
{
+ struct kprobe *kp;
int ret;
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 0, 0);
- WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
- kprobe_ftrace_enabled++;
- if (kprobe_ftrace_enabled == 1) {
+ if (WARN(ret < 0,
+ "Failed to arm kprobe-ftrace at %p (%d). The kprobe gets disabled.\n",
+ p->addr, ret))
+ goto err_filter;
+
+ if (!kprobe_ftrace_enabled) {
ret = register_ftrace_function(&kprobe_ftrace_ops);
- WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
+ if (WARN(ret < 0,
+ "Failed to init kprobe-ftrace (%d). The probe at %p gets disabled\n",
+ ret, p->addr))
+ goto err_function;
}
+ kprobe_ftrace_enabled++;
+ return;
+
+err_function:
+ ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
+err_filter:
+ p->flags |= KPROBE_FLAG_DISABLED;
+ if (kprobe_aggrprobe(p))
+ list_for_each_entry_rcu(kp, &p->list, list)
+ kp->flags |= KPROBE_FLAG_DISABLED;
}
/* Caller must lock kprobe_mutex */
--
1.8.5.6
arm_kprobe_ftrace() could fail, especially after introducing ftrace IPMODIFY
flag and LifePatching.
registry_kprobe() and registry_aggr_kprobe() do not mind about the error
because the kprobe gets disabled and they keep it registered.
But enable_kprobe() should propagate the error because its tasks
fails if ftrace fails.
Also arm_all_kprobes() should return error if it happens. The behavior
is a bit questionable here. This patch keeps the existing behavior and does
the best effort. It tries to enable as many Kprobes as possible. It returns
only the last error code if any. kprobes_all_disarmed is always cleared and
the message about finished action is always printed. There is going to be
a separate patch that will improve the behavior.
Signed-off-by: Petr Mladek <[email protected]>
---
kernel/kprobes.c | 47 ++++++++++++++++++++++++++++++++---------------
1 file changed, 32 insertions(+), 15 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d1b9db690b9c..a69d23add983 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -929,7 +929,7 @@ static int prepare_kprobe(struct kprobe *p)
}
/* Caller must lock kprobe_mutex */
-static void arm_kprobe_ftrace(struct kprobe *p)
+static int arm_kprobe_ftrace(struct kprobe *p)
{
struct kprobe *kp;
int ret;
@@ -949,7 +949,7 @@ static void arm_kprobe_ftrace(struct kprobe *p)
goto err_function;
}
kprobe_ftrace_enabled++;
- return;
+ return ret;
err_function:
ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
@@ -958,6 +958,7 @@ err_filter:
if (kprobe_aggrprobe(p))
list_for_each_entry_rcu(kp, &p->list, list)
kp->flags |= KPROBE_FLAG_DISABLED;
+ return ret;
}
/* Caller must lock kprobe_mutex */
@@ -976,17 +977,15 @@ static void disarm_kprobe_ftrace(struct kprobe *p)
}
#else /* !CONFIG_KPROBES_ON_FTRACE */
#define prepare_kprobe(p) arch_prepare_kprobe(p)
-#define arm_kprobe_ftrace(p) do {} while (0)
+#define arm_kprobe_ftrace(p) (0)
#define disarm_kprobe_ftrace(p) do {} while (0)
#endif
/* Arm a kprobe with text_mutex */
-static void arm_kprobe(struct kprobe *kp)
+static int arm_kprobe(struct kprobe *kp)
{
- if (unlikely(kprobe_ftrace(kp))) {
- arm_kprobe_ftrace(kp);
- return;
- }
+ if (unlikely(kprobe_ftrace(kp)))
+ return arm_kprobe_ftrace(kp);
/*
* Here, since __arm_kprobe() doesn't use stop_machine(),
* this doesn't cause deadlock on text_mutex. So, we don't
@@ -995,6 +994,7 @@ static void arm_kprobe(struct kprobe *kp)
mutex_lock(&text_mutex);
__arm_kprobe(kp);
mutex_unlock(&text_mutex);
+ return 0;
}
/* Disarm a kprobe with text_mutex */
@@ -1332,10 +1332,15 @@ out:
put_online_cpus();
jump_label_unlock();
+ /* Arm when this is the first enabled kprobe at this address */
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED;
if (!kprobes_all_disarmed)
- /* Arm the breakpoint again. */
+ /*
+ * The kprobe is disabled and warning is printed
+ * on error. But we ignore the error code here
+ * because we keep it registered.
+ */
arm_kprobe(ap);
}
return ret;
@@ -1540,6 +1545,11 @@ int register_kprobe(struct kprobe *p)
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p))
+ /*
+ * The kprobe is disabled and warning is printed on error.
+ * But we ignore the error code here because we keep it
+ * registered.
+ */
arm_kprobe(p);
/* Try to optimize kprobe */
@@ -2040,7 +2050,7 @@ int enable_kprobe(struct kprobe *kp)
if (!kprobes_all_disarmed && kprobe_disabled(p)) {
p->flags &= ~KPROBE_FLAG_DISABLED;
- arm_kprobe(p);
+ ret = arm_kprobe(p);
}
out:
mutex_unlock(&kprobe_mutex);
@@ -2325,11 +2335,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
.release = seq_release,
};
-static void arm_all_kprobes(void)
+static int arm_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
+ int err, ret = 0;
mutex_lock(&kprobe_mutex);
@@ -2341,8 +2352,11 @@ static void arm_all_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, head, hlist)
- if (!kprobe_disabled(p))
- arm_kprobe(p);
+ if (!kprobe_disabled(p)) {
+ err = arm_kprobe(p);
+ if (err)
+ ret = err;
+ }
}
kprobes_all_disarmed = false;
@@ -2350,7 +2364,7 @@ static void arm_all_kprobes(void)
already_enabled:
mutex_unlock(&kprobe_mutex);
- return;
+ return ret;
}
static void disarm_all_kprobes(void)
@@ -2407,6 +2421,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
{
char buf[32];
size_t buf_size;
+ int err = 0;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
@@ -2417,7 +2432,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
case 'y':
case 'Y':
case '1':
- arm_all_kprobes();
+ err = arm_all_kprobes();
break;
case 'n':
case 'N':
@@ -2428,6 +2443,8 @@ static ssize_t write_enabled_file_bool(struct file *file,
return -EINVAL;
}
+ if (err)
+ return err;
return count;
}
--
1.8.5.6
Also disarm_kprobe_ftrace() could fail, for example if there is an internal
error in the Kprobe code and we try to unregister some Kprobe that is not
registered.
If we fail to unregister the ftrace function, we still could try to disarm
the Kprobe by removing the filter. This is why the first error code is not
fatal and we try to continue.
__disable_kprobe() has to return the error code via ERR_PTR. It allows to
pass -EINVAL instead of NULL for invalid Kprobes. Then the NULL pointer does
not need to be transformed into -EINVAL later.
In addition, __disable_kprobe() should disable the child probe only when
the parent probe has been successfully disabled. Note that we could always
clear KPROBE_FLAG_DISABLED in case of error. It does not harm even when
p == orig_p. It keeps the code nesting on reasonable level.
The error handling is a bit complicated in __unregister_kprobe_top().
It is used in unregister_*probes() that are used in module removal callbacks.
Any error here could not stop the removal of the module and the related Kprobe
handlers. Therefore such an error is fatal. There is already a similar check
in the "disarmed:" goto target.
The only exception is when we try to unregister an invalid Kprobe. It does not
exist and therefore should not harm the system. This error was weak even before.
disable_kprobe() just passes the error as it did before.
disarm_all_kprobes() uses similar approach like arm_all_kprobes(). It keeps
the current behavior and does the best effort. It tries to disable as many
Kprobes as possible. It always reports success. It returns the last error
code. There is going to be separate patch that will improve this behavior.
Signed-off-by: Petr Mladek <[email protected]>
---
kernel/kprobes.c | 77 +++++++++++++++++++++++++++++++++++++-------------------
1 file changed, 51 insertions(+), 26 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a69d23add983..ba57147bd52c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -962,23 +962,26 @@ err_filter:
}
/* Caller must lock kprobe_mutex */
-static void disarm_kprobe_ftrace(struct kprobe *p)
+static int disarm_kprobe_ftrace(struct kprobe *p)
{
- int ret;
+ int ret = 0;
- kprobe_ftrace_enabled--;
- if (kprobe_ftrace_enabled == 0) {
+ if (kprobe_ftrace_enabled == 1) {
ret = unregister_ftrace_function(&kprobe_ftrace_ops);
- WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
+ WARN(ret < 0, "Failed to remove kprobe-ftrace (%d)\n", ret);
}
+ if (!ret)
+ kprobe_ftrace_enabled--;
+
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
- (unsigned long)p->addr, 1, 0);
+ (unsigned long)p->addr, 1, 0);
WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+ return ret;
}
#else /* !CONFIG_KPROBES_ON_FTRACE */
#define prepare_kprobe(p) arch_prepare_kprobe(p)
#define arm_kprobe_ftrace(p) (0)
-#define disarm_kprobe_ftrace(p) do {} while (0)
+#define disarm_kprobe_ftrace(p) (0)
#endif
/* Arm a kprobe with text_mutex */
@@ -998,16 +1001,15 @@ static int arm_kprobe(struct kprobe *kp)
}
/* Disarm a kprobe with text_mutex */
-static void disarm_kprobe(struct kprobe *kp, bool reopt)
+static int disarm_kprobe(struct kprobe *kp, bool reopt)
{
- if (unlikely(kprobe_ftrace(kp))) {
- disarm_kprobe_ftrace(kp);
- return;
- }
+ if (unlikely(kprobe_ftrace(kp)))
+ return disarm_kprobe_ftrace(kp);
/* Ditto */
mutex_lock(&text_mutex);
__disarm_kprobe(kp, reopt);
mutex_unlock(&text_mutex);
+ return 0;
}
/*
@@ -1585,11 +1587,12 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
static struct kprobe *__disable_kprobe(struct kprobe *p)
{
struct kprobe *orig_p;
+ int err;
/* Get an original kprobe for return */
orig_p = __get_valid_kprobe(p);
if (unlikely(orig_p == NULL))
- return NULL;
+ return ERR_PTR(-EINVAL);
if (!kprobe_disabled(p)) {
/* Disable probe if it is a child probe */
@@ -1598,7 +1601,11 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
/* Try to disarm and disable this/parent probe */
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
- disarm_kprobe(orig_p, true);
+ err = disarm_kprobe(orig_p, true);
+ if (err) {
+ p->flags &= ~KPROBE_FLAG_DISABLED;
+ return ERR_PTR(err);
+ }
orig_p->flags |= KPROBE_FLAG_DISABLED;
}
}
@@ -1615,8 +1622,17 @@ static int __unregister_kprobe_top(struct kprobe *p)
/* Disable kprobe. This will disarm it if needed. */
ap = __disable_kprobe(p);
- if (ap == NULL)
- return -EINVAL;
+ /*
+ * We could not prevent Kprobe handlers from going. Therefore
+ * we rather do not continue when the Kprobe is still enabled.
+ * The only exception is an invalid Kprobe. It does not exist
+ * and therefore could not harm the system.
+ */
+ if (IS_ERR(ap)) {
+ if (PTR_ERR(ap) == -EINVAL)
+ return PTR_ERR(ap);
+ BUG();
+ }
if (ap == p)
/*
@@ -2012,12 +2028,14 @@ static void kill_kprobe(struct kprobe *p)
int disable_kprobe(struct kprobe *kp)
{
int ret = 0;
+ struct kprobe *p;
mutex_lock(&kprobe_mutex);
/* Disable this kprobe */
- if (__disable_kprobe(kp) == NULL)
- ret = -EINVAL;
+ p = __disable_kprobe(kp);
+ if (IS_ERR(p))
+ ret = PTR_ERR(p);
mutex_unlock(&kprobe_mutex);
return ret;
@@ -2367,34 +2385,41 @@ already_enabled:
return ret;
}
-static void disarm_all_kprobes(void)
+static int disarm_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
+ int err, ret = 0;
mutex_lock(&kprobe_mutex);
/* If kprobes are already disarmed, just return */
if (kprobes_all_disarmed) {
mutex_unlock(&kprobe_mutex);
- return;
+ return 0;
}
- kprobes_all_disarmed = true;
- printk(KERN_INFO "Kprobes globally disabled\n");
-
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, head, hlist) {
- if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
- disarm_kprobe(p, false);
+ if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
+ err = disarm_kprobe(p, false);
+ if (err)
+ ret = err;
+ }
}
}
+
+ kprobes_all_disarmed = true;
+ pr_info("Kprobes globally disabled\n");
+
mutex_unlock(&kprobe_mutex);
/* Wait for disarming all kprobes by optimizer */
wait_for_kprobe_optimizer();
+
+ return ret;
}
/*
@@ -2437,7 +2462,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
case 'n':
case 'N':
case '0':
- disarm_all_kprobes();
+ err = disarm_all_kprobes();
break;
default:
return -EINVAL;
--
1.8.5.6
kprobes_all_disarmed global flag says that Kprobes are disarmed even
when the Kprobe-specific KPROBE_FLAG_DISABLED is not set.
The global flag is currently set by arm_all_probes() and disarm_all_probes()
functions even when they were not able to switch all Kprobes. It might result
in further errors.
This patch tries to restore the consistent state when some particular
Kprobe cannot be (dis)armed. In this case, it reverts the already switched
Kprobes to the previous state.
The implementation splits the cycle modifying all the probes into
separate functions, so that they could be reused to restore the
original state.
The kprobes_all_disarmed flag is modified only when all Kprobes were
successfully switched.
In case of error, we call wait_for_kprobe_optimizer() also in
arm_all_kprobes() to be on the safe side.
Signed-off-by: Petr Mladek <[email protected]>
---
kernel/kprobes.c | 124 ++++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 95 insertions(+), 29 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ba57147bd52c..1fcb19095b43 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2353,44 +2353,117 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
.release = seq_release,
};
-static int arm_all_kprobes(void)
+static int __disarm_all_kprobes(unsigned int last_table,
+ struct kprobe *last_kprobe);
+
+/*
+ * Arm all enabled Kprobes until you reach the one given by parameters.
+ * It tries to restore the original state on error.
+ *
+ * All Kprobes are handled when @last_table == KPROBE_TABLE_SIZE - 1
+ * and last_kprobe == NULL.
+ *
+ * This function need to be called under kprobe_mutex
+ */
+static int __arm_all_kprobes(unsigned int last_table,
+ struct kprobe *last_kprobe)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
int err, ret = 0;
- mutex_lock(&kprobe_mutex);
-
- /* If kprobes are armed, just return */
- if (!kprobes_all_disarmed)
- goto already_enabled;
-
/* Arming kprobes doesn't optimize kprobe itself */
- for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ for (i = 0; i <= last_table ; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, head, hlist)
+ hlist_for_each_entry_rcu(p, head, hlist) {
+ if (i == last_table && p == last_kprobe)
+ return 0;
if (!kprobe_disabled(p)) {
err = arm_kprobe(p);
- if (err)
- ret = err;
+ if (!err)
+ continue;
+ /*
+ * Try to restore the original consistent state.
+ * But only when all Kprobes are proceed here
+ * to avoid an infinite loop.
+ */
+ if (!last_kprobe)
+ WARN_ON(__disarm_all_kprobes(i, p));
+ return ret;
}
+ }
}
- kprobes_all_disarmed = false;
- printk(KERN_INFO "Kprobes globally enabled\n");
+ return 0;
+}
+
+static int arm_all_kprobes(void)
+{
+ int ret = 0;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* If kprobes are armed, just return */
+ if (!kprobes_all_disarmed) {
+ mutex_unlock(&kprobe_mutex);
+ return 0;
+ }
+
+ ret = __arm_all_kprobes(KPROBE_TABLE_SIZE - 1, NULL);
+ if (!ret) {
+ kprobes_all_disarmed = false;
+ pr_info("Kprobes globally enabled\n");
+ }
-already_enabled:
mutex_unlock(&kprobe_mutex);
+
+ /*
+ * On error, some Kprobes were armed and disarmed again. Be on the safe
+ * side and wait for disarming all kprobes by optimizer in this case.
+ */
+ if (ret)
+ wait_for_kprobe_optimizer();
+
return ret;
}
-static int disarm_all_kprobes(void)
+/* Reverse operation for __arm_all_kprobes(), see above for details */
+static int __disarm_all_kprobes(unsigned int last_table,
+ struct kprobe *last_kprobe)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
- int err, ret = 0;
+ int err;
+
+ for (i = 0; i <= last_table; i++) {
+ head = &kprobe_table[i];
+ hlist_for_each_entry_rcu(p, head, hlist) {
+ if (i == last_table && p == last_kprobe)
+ return 0;
+ if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
+ err = disarm_kprobe(p, false);
+ if (!err)
+ continue;
+ /*
+ * Try to restore the original consistent state.
+ * But only when all Kprobes are proceed here
+ * to avoid an infinite loop.
+ */
+ if (!last_kprobe)
+ WARN_ON(__arm_all_kprobes(i, p));
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int disarm_all_kprobes(void)
+{
+ int ret;
mutex_lock(&kprobe_mutex);
@@ -2400,24 +2473,17 @@ static int disarm_all_kprobes(void)
return 0;
}
- for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
- head = &kprobe_table[i];
- hlist_for_each_entry_rcu(p, head, hlist) {
- if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
- err = disarm_kprobe(p, false);
- if (err)
- ret = err;
- }
- }
+ ret = __disarm_all_kprobes(KPROBE_TABLE_SIZE - 1, NULL);
+ if (!ret) {
+ kprobes_all_disarmed = true;
+ pr_info("Kprobes globally disabled\n");
}
- kprobes_all_disarmed = true;
- pr_info("Kprobes globally disabled\n");
-
mutex_unlock(&kprobe_mutex);
/* Wait for disarming all kprobes by optimizer */
- wait_for_kprobe_optimizer();
+ if (!ret)
+ wait_for_kprobe_optimizer();
return ret;
}
--
1.8.5.6
The global kprobes_all_disarmed flag says that all Kprobes are disarmed
even when they are marked as enabled. This is properly handled in
register_kprobe() but it is ignored in __disable_kprobe().
This problem gets more serious after we started handling errors from
disarm_kprobe(). The second disarming fails and we do not longer
set KPROBE_FLAG_DISABLED. It might trigger BUG_ON(!kprobe_disarmed(ap))
in __unregister_kprobe_top() even when Kprobes were globally
disarmed.
Well, kprobe_disarmed(ap) should return false when the global
kprobes_all_disarmed flag is set. But let's solve this separately.
This patch fixes __disable_kprobe(), so that it does not disarm
when the Kprobe is not armed.
Note that I reverted the condition "if (kprobe_disabled(p))" and used "goto
out" there. It helped to keep the code nesting on a reasonable level.
Signed-off-by: Petr Mladek <[email protected]>
---
kernel/kprobes.c | 19 +++++++++++--------
1 file changed, 11 insertions(+), 8 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1fcb19095b43..54bc2a6960b4 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1594,22 +1594,25 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
if (unlikely(orig_p == NULL))
return ERR_PTR(-EINVAL);
- if (!kprobe_disabled(p)) {
- /* Disable probe if it is a child probe */
- if (p != orig_p)
- p->flags |= KPROBE_FLAG_DISABLED;
+ if (kprobe_disabled(p))
+ goto out;
+
+ /* Disable probe if it is a child probe */
+ if (p != orig_p)
+ p->flags |= KPROBE_FLAG_DISABLED;
- /* Try to disarm and disable this/parent probe */
- if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
+ /* Try to disarm and disable this/parent probe */
+ if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
+ if (!kprobes_all_disarmed) {
err = disarm_kprobe(orig_p, true);
if (err) {
p->flags &= ~KPROBE_FLAG_DISABLED;
return ERR_PTR(err);
}
- orig_p->flags |= KPROBE_FLAG_DISABLED;
}
+ orig_p->flags |= KPROBE_FLAG_DISABLED;
}
-
+out:
return orig_p;
}
--
1.8.5.6
The global kprobes_all_disarmed flag says that all Kprobes are disarmed
even when they are marked as enabled. Therefore the flag should get
checked by kprobe_disarmed().
Signed-off-by: Petr Mladek <[email protected]>
---
kernel/kprobes.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 54bc2a6960b4..799f56a50cf9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -391,6 +391,9 @@ static inline int kprobe_disarmed(struct kprobe *p)
{
struct optimized_kprobe *op;
+ if (kprobes_all_disarmed)
+ return 1;
+
/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
if (!kprobe_aggrprobe(p))
return kprobe_disabled(p);
@@ -890,9 +893,17 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
#define try_to_optimize_kprobe(p) do {} while (0)
#define __arm_kprobe(p) arch_arm_kprobe(p)
#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
-#define kprobe_disarmed(p) kprobe_disabled(p)
#define wait_for_kprobe_optimizer() do {} while (0)
+/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
+static inline int kprobe_disarmed(struct kprobe *p)
+{
+ if (kprobes_all_disarmed)
+ return 1;
+
+ return kprobe_disabled(p);
+}
+
/* There should be no unused kprobes can be reused without optimization */
static void reuse_unused_kprobe(struct kprobe *ap)
{
--
1.8.5.6
Kprobes might get globally disabled by writing to
/sys/kernel/debug/kprobes/enabled but this situation
is not visible in /sys/kernel/debug/kprobes/list.
This patch updates the list, so that it shows [GLOBALLY DISABLED]
when the related Kprobe is enabled but globally disabled.
It also updates the Kprobes documentation.
Signed-off-by: Petr Mladek <[email protected]>
---
Documentation/kprobes.txt | 5 +++--
kernel/kprobes.c | 4 +++-
2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 4227ec2e3ab2..8de83af9cccb 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -701,8 +701,9 @@ is also specified. Following columns show probe status. If the probe is on
a virtual address that is no longer valid (module init sections, module
virtual addresses that correspond to modules that've been unloaded),
such probes are marked with [GONE]. If the probe is temporarily disabled,
-such probes are marked with [DISABLED]. If the probe is optimized, it is
-marked with [OPTIMIZED].
+such probes are marked with [DISABLED]. It the probe is enabled but all probes
+are globally disabled, such probes are marked with [GLOBALLY DISABLED].
+If the probe is optimized, it is marked with [OPTIMIZED].
/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 799f56a50cf9..7fc77c9cdd3b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2259,9 +2259,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
if (!pp)
pp = p;
- seq_printf(pi, "%s%s%s%s\n",
+ seq_printf(pi, "%s%s%s%s%s\n",
(kprobe_gone(p) ? "[GONE]" : ""),
((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
+ ((!kprobe_disabled(p) && !kprobe_gone(p) &&
+ kprobes_all_disarmed) ? "[GLOBALLY DISABLED]" : ""),
(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
}
--
1.8.5.6
(2015/02/27 1:13), Petr Mladek wrote:
> arm_kprobe_ftrace() could fail, especially after introducing ftrace IPMODIFY
> flag and LifePatching. But this situation is not properly handled.
> This patch adds the most important changes.
Hmm, as you know, I actually working on it to drop IPMODIFY from kprobes except jprobe.
however, yes, that is not enough. We might better set DISABLED flag as
this does.
> First, it does not make sense to register "kprobe_ftrace_ops" if the filter was
> not set.
>
> Second, we should remove the filter if the registration of "kprobe_ftrace_ops"
> fails. The failure might be caused by conflict between the Kprobe and
> a life patch via the IPMODIFY flag. If we remove the filter, we will allow
> to register "kprobe_ftrace_ops" for another non-conflicting Kprobe later.
>
> Third, we need to make sure that "kprobe_ftrace_enabled" is incremented only
> when "kprobe_ftrace_ops" is successfully registered. Otherwise, another
> Kprobe will not try to register it again. Note that we could move the
> manipulation with this counter because it is accessed only under "kprobe_mutex".
>
> Four, we should mark the probe as disabled if the ftrace stuff is not usable.
> It will be the correct status. Also it will prevent the unregistration code
> from producing another failure.
>
> It looks more safe to disable the Kprobe directly in "kprobe_ftrace_ops". Note
> that we need to disable also all listed Kprobes in case of an aggregated probe.
> It would be enough to disable only the new one but we do not know which one it
> was. They should be in sync anyway.
>
Acked-by: Masami Hiramatsu <[email protected]>
Thank you!
> Signed-off-by: Petr Mladek <[email protected]>
> ---
> kernel/kprobes.c | 25 +++++++++++++++++++++----
> 1 file changed, 21 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index ee619929cf90..d1b9db690b9c 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -931,16 +931,33 @@ static int prepare_kprobe(struct kprobe *p)
> /* Caller must lock kprobe_mutex */
> static void arm_kprobe_ftrace(struct kprobe *p)
> {
> + struct kprobe *kp;
> int ret;
>
> ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
> (unsigned long)p->addr, 0, 0);
> - WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
> - kprobe_ftrace_enabled++;
> - if (kprobe_ftrace_enabled == 1) {
> + if (WARN(ret < 0,
> + "Failed to arm kprobe-ftrace at %p (%d). The kprobe gets disabled.\n",
> + p->addr, ret))
> + goto err_filter;
> +
> + if (!kprobe_ftrace_enabled) {
> ret = register_ftrace_function(&kprobe_ftrace_ops);
> - WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
> + if (WARN(ret < 0,
> + "Failed to init kprobe-ftrace (%d). The probe at %p gets disabled\n",
> + ret, p->addr))
> + goto err_function;
> }
> + kprobe_ftrace_enabled++;
> + return;
> +
> +err_function:
> + ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
> +err_filter:
> + p->flags |= KPROBE_FLAG_DISABLED;
> + if (kprobe_aggrprobe(p))
> + list_for_each_entry_rcu(kp, &p->list, list)
> + kp->flags |= KPROBE_FLAG_DISABLED;
> }
>
> /* Caller must lock kprobe_mutex */
>
--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Research Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]
Hi Petr,
(2015/02/27 1:13), Petr Mladek wrote:
> arm_kprobe_ftrace() could fail, especially after introducing ftrace IPMODIFY
> flag and LifePatching. This patch set adds the error handling and also some
> related fixes.
Hmm, I'd like to drop IPMODIFY from kprobes except for jprobes,
since it actually doesn't change regs->ip which was sent before.
It seems that this series partly covers that work.
> 1st patch includes the most important change. It helps to keep Kprobes
> in a sane state.
>
> 2nd and 3rd patch allows to propagate the error where needed.
OK, I think the 1st one could be merged. 2nd and 3rd one still have some
issues as far as I reviewed.
> The other patches fix problems with the global kprobes_all_disarmed flag.
> They were there even before but they become more visible and critical
> after the arming errors became propagated.
Could you separate the series? And also I doubt we need to show global
disable status, since we can check it via debugfs too (and looks redundant).
Thank you,
> The first patch looks rather safe and might be suitable even for 4.0.
>
> However, I would feel more comfortable if the other patches get some
> testing in linux-next. I did quite some testing and did my best. But
> I started with the three patches and was surprised by the effect of
> the propagated errors. They triggered that BUG_ON() in
> __unregister_kprobe_top() are required the other patches
> to get it working. I wonder if there is any other scenario that
> I have missed.
>
> Of course, I also wait for feedback how to make things better.
>
>
> Petr Mladek (7):
> kprobes: Disable Kprobe when ftrace arming fails
> kprobes: Propagate error from arm_kprobe_ftrace()
> kprobes: Propagate error from disarm_kprobe_ftrace()
> kprobes: Keep consistent state of kprobes_all_disarmed
> kprobes: Do not try to disarm already disarmed Kprobe
> kprobes: Check kprobes_all_disarmed in kprobe_disarmed()
> kprobes: Mark globally disabled Kprobes in debugfs interface
>
> Documentation/kprobes.txt | 5 +-
> kernel/kprobes.c | 279 ++++++++++++++++++++++++++++++++++------------
> 2 files changed, 213 insertions(+), 71 deletions(-)
>
--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Research Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]
(2015/02/27 1:13), Petr Mladek wrote:
> arm_kprobe_ftrace() could fail, especially after introducing ftrace IPMODIFY
> flag and LifePatching.
>
> registry_kprobe() and registry_aggr_kprobe() do not mind about the error
> because the kprobe gets disabled and they keep it registered.
>
> But enable_kprobe() should propagate the error because its tasks
> fails if ftrace fails.
>
> Also arm_all_kprobes() should return error if it happens. The behavior
> is a bit questionable here. This patch keeps the existing behavior and does
> the best effort. It tries to enable as many Kprobes as possible. It returns
> only the last error code if any. kprobes_all_disarmed is always cleared and
> the message about finished action is always printed. There is going to be
> a separate patch that will improve the behavior.
When I applied it on -tip/master, there is a hunk which is not cleanly applied.
Please rebase it on the latest tip/master, since some logic are changed.
Here I have some comments on it.
>
> Signed-off-by: Petr Mladek <[email protected]>
> ---
> kernel/kprobes.c | 47 ++++++++++++++++++++++++++++++++---------------
> 1 file changed, 32 insertions(+), 15 deletions(-)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index d1b9db690b9c..a69d23add983 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -929,7 +929,7 @@ static int prepare_kprobe(struct kprobe *p)
> }
>
> /* Caller must lock kprobe_mutex */
> -static void arm_kprobe_ftrace(struct kprobe *p)
> +static int arm_kprobe_ftrace(struct kprobe *p)
> {
> struct kprobe *kp;
> int ret;
> @@ -949,7 +949,7 @@ static void arm_kprobe_ftrace(struct kprobe *p)
> goto err_function;
> }
> kprobe_ftrace_enabled++;
> - return;
> + return ret;
>
> err_function:
> ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
> @@ -958,6 +958,7 @@ err_filter:
> if (kprobe_aggrprobe(p))
> list_for_each_entry_rcu(kp, &p->list, list)
> kp->flags |= KPROBE_FLAG_DISABLED;
> + return ret;
> }
>
> /* Caller must lock kprobe_mutex */
> @@ -976,17 +977,15 @@ static void disarm_kprobe_ftrace(struct kprobe *p)
> }
> #else /* !CONFIG_KPROBES_ON_FTRACE */
> #define prepare_kprobe(p) arch_prepare_kprobe(p)
> -#define arm_kprobe_ftrace(p) do {} while (0)
> +#define arm_kprobe_ftrace(p) (0)
> #define disarm_kprobe_ftrace(p) do {} while (0)
> #endif
>
> /* Arm a kprobe with text_mutex */
> -static void arm_kprobe(struct kprobe *kp)
> +static int arm_kprobe(struct kprobe *kp)
> {
> - if (unlikely(kprobe_ftrace(kp))) {
> - arm_kprobe_ftrace(kp);
> - return;
> - }
> + if (unlikely(kprobe_ftrace(kp)))
> + return arm_kprobe_ftrace(kp);
> /*
> * Here, since __arm_kprobe() doesn't use stop_machine(),
> * this doesn't cause deadlock on text_mutex. So, we don't
> @@ -995,6 +994,7 @@ static void arm_kprobe(struct kprobe *kp)
> mutex_lock(&text_mutex);
> __arm_kprobe(kp);
> mutex_unlock(&text_mutex);
> + return 0;
> }
>
> /* Disarm a kprobe with text_mutex */
> @@ -1332,10 +1332,15 @@ out:
> put_online_cpus();
> jump_label_unlock();
>
> + /* Arm when this is the first enabled kprobe at this address */
> if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
> ap->flags &= ~KPROBE_FLAG_DISABLED;
> if (!kprobes_all_disarmed)
> - /* Arm the breakpoint again. */
> + /*
> + * The kprobe is disabled and warning is printed
> + * on error. But we ignore the error code here
> + * because we keep it registered.
> + */
Why? if we can't arm it, we'd better make it fail.
> arm_kprobe(ap);
> }
> return ret;
> @@ -1540,6 +1545,11 @@ int register_kprobe(struct kprobe *p)
> &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
>
> if (!kprobes_all_disarmed && !kprobe_disabled(p))
> + /*
> + * The kprobe is disabled and warning is printed on error.
> + * But we ignore the error code here because we keep it
> + * registered.
> + */
> arm_kprobe(p);
Ditto. If we failed to enable it. We should make it fail and report an error
to caller.
Thank you,
>
> /* Try to optimize kprobe */
> @@ -2040,7 +2050,7 @@ int enable_kprobe(struct kprobe *kp)
>
> if (!kprobes_all_disarmed && kprobe_disabled(p)) {
> p->flags &= ~KPROBE_FLAG_DISABLED;
> - arm_kprobe(p);
> + ret = arm_kprobe(p);
> }
> out:
> mutex_unlock(&kprobe_mutex);
> @@ -2325,11 +2335,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
> .release = seq_release,
> };
>
> -static void arm_all_kprobes(void)
> +static int arm_all_kprobes(void)
> {
> struct hlist_head *head;
> struct kprobe *p;
> unsigned int i;
> + int err, ret = 0;
>
> mutex_lock(&kprobe_mutex);
>
> @@ -2341,8 +2352,11 @@ static void arm_all_kprobes(void)
> for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
> head = &kprobe_table[i];
> hlist_for_each_entry_rcu(p, head, hlist)
> - if (!kprobe_disabled(p))
> - arm_kprobe(p);
> + if (!kprobe_disabled(p)) {
> + err = arm_kprobe(p);
> + if (err)
> + ret = err;
> + }
> }
>
> kprobes_all_disarmed = false;
> @@ -2350,7 +2364,7 @@ static void arm_all_kprobes(void)
>
> already_enabled:
> mutex_unlock(&kprobe_mutex);
> - return;
> + return ret;
> }
>
> static void disarm_all_kprobes(void)
> @@ -2407,6 +2421,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
> {
> char buf[32];
> size_t buf_size;
> + int err = 0;
>
> buf_size = min(count, (sizeof(buf)-1));
> if (copy_from_user(buf, user_buf, buf_size))
> @@ -2417,7 +2432,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
> case 'y':
> case 'Y':
> case '1':
> - arm_all_kprobes();
> + err = arm_all_kprobes();
> break;
> case 'n':
> case 'N':
> @@ -2428,6 +2443,8 @@ static ssize_t write_enabled_file_bool(struct file *file,
> return -EINVAL;
> }
>
> + if (err)
> + return err;
> return count;
> }
>
>
--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Research Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]
(2015/02/27 1:13), Petr Mladek wrote:
> Also disarm_kprobe_ftrace() could fail, for example if there is an internal
> error in the Kprobe code and we try to unregister some Kprobe that is not
> registered.
No, no registered kprobes are rejected at the beginning of __disable_kprobe.
And I'm not sure unregister_ftrace_function() and ftrace_set_filter_ip() can
fail if correctly set. That seems a bug in the kernel (not a normal
situation).
> If we fail to unregister the ftrace function, we still could try to disarm
> the Kprobe by removing the filter. This is why the first error code is not
> fatal and we try to continue.
>
> __disable_kprobe() has to return the error code via ERR_PTR. It allows to
> pass -EINVAL instead of NULL for invalid Kprobes. Then the NULL pointer does
> not need to be transformed into -EINVAL later.
>
> In addition, __disable_kprobe() should disable the child probe only when
> the parent probe has been successfully disabled. Note that we could always
> clear KPROBE_FLAG_DISABLED in case of error. It does not harm even when
> p == orig_p. It keeps the code nesting on reasonable level.
>
> The error handling is a bit complicated in __unregister_kprobe_top().
> It is used in unregister_*probes() that are used in module removal callbacks.
> Any error here could not stop the removal of the module and the related Kprobe
> handlers. Therefore such an error is fatal. There is already a similar check
> in the "disarmed:" goto target.
OK.
>
> The only exception is when we try to unregister an invalid Kprobe. It does not
> exist and therefore should not harm the system. This error was weak even before.
>
> disable_kprobe() just passes the error as it did before.
>
> disarm_all_kprobes() uses similar approach like arm_all_kprobes(). It keeps
> the current behavior and does the best effort. It tries to disable as many
> Kprobes as possible. It always reports success. It returns the last error
> code. There is going to be separate patch that will improve this behavior.
>
OK, this looks good to me except for the comment above.
But could you also update this for the latest -tip tree?
Thank you,
> Signed-off-by: Petr Mladek <[email protected]>
> ---
> kernel/kprobes.c | 77 +++++++++++++++++++++++++++++++++++++-------------------
> 1 file changed, 51 insertions(+), 26 deletions(-)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index a69d23add983..ba57147bd52c 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -962,23 +962,26 @@ err_filter:
> }
>
> /* Caller must lock kprobe_mutex */
> -static void disarm_kprobe_ftrace(struct kprobe *p)
> +static int disarm_kprobe_ftrace(struct kprobe *p)
> {
> - int ret;
> + int ret = 0;
>
> - kprobe_ftrace_enabled--;
> - if (kprobe_ftrace_enabled == 0) {
> + if (kprobe_ftrace_enabled == 1) {
> ret = unregister_ftrace_function(&kprobe_ftrace_ops);
> - WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
> + WARN(ret < 0, "Failed to remove kprobe-ftrace (%d)\n", ret);
> }
> + if (!ret)
> + kprobe_ftrace_enabled--;
> +
> ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
> - (unsigned long)p->addr, 1, 0);
> + (unsigned long)p->addr, 1, 0);
> WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
> + return ret;
> }
> #else /* !CONFIG_KPROBES_ON_FTRACE */
> #define prepare_kprobe(p) arch_prepare_kprobe(p)
> #define arm_kprobe_ftrace(p) (0)
> -#define disarm_kprobe_ftrace(p) do {} while (0)
> +#define disarm_kprobe_ftrace(p) (0)
> #endif
>
> /* Arm a kprobe with text_mutex */
> @@ -998,16 +1001,15 @@ static int arm_kprobe(struct kprobe *kp)
> }
>
> /* Disarm a kprobe with text_mutex */
> -static void disarm_kprobe(struct kprobe *kp, bool reopt)
> +static int disarm_kprobe(struct kprobe *kp, bool reopt)
> {
> - if (unlikely(kprobe_ftrace(kp))) {
> - disarm_kprobe_ftrace(kp);
> - return;
> - }
> + if (unlikely(kprobe_ftrace(kp)))
> + return disarm_kprobe_ftrace(kp);
> /* Ditto */
> mutex_lock(&text_mutex);
> __disarm_kprobe(kp, reopt);
> mutex_unlock(&text_mutex);
> + return 0;
> }
>
> /*
> @@ -1585,11 +1587,12 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
> static struct kprobe *__disable_kprobe(struct kprobe *p)
> {
> struct kprobe *orig_p;
> + int err;
>
> /* Get an original kprobe for return */
> orig_p = __get_valid_kprobe(p);
> if (unlikely(orig_p == NULL))
> - return NULL;
> + return ERR_PTR(-EINVAL);
>
> if (!kprobe_disabled(p)) {
> /* Disable probe if it is a child probe */
> @@ -1598,7 +1601,11 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
>
> /* Try to disarm and disable this/parent probe */
> if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
> - disarm_kprobe(orig_p, true);
> + err = disarm_kprobe(orig_p, true);
> + if (err) {
> + p->flags &= ~KPROBE_FLAG_DISABLED;
> + return ERR_PTR(err);
> + }
> orig_p->flags |= KPROBE_FLAG_DISABLED;
> }
> }
> @@ -1615,8 +1622,17 @@ static int __unregister_kprobe_top(struct kprobe *p)
>
> /* Disable kprobe. This will disarm it if needed. */
> ap = __disable_kprobe(p);
> - if (ap == NULL)
> - return -EINVAL;
> + /*
> + * We could not prevent Kprobe handlers from going. Therefore
> + * we rather do not continue when the Kprobe is still enabled.
> + * The only exception is an invalid Kprobe. It does not exist
> + * and therefore could not harm the system.
> + */
> + if (IS_ERR(ap)) {
> + if (PTR_ERR(ap) == -EINVAL)
> + return PTR_ERR(ap);
> + BUG();
Yeah, this must be done.
> + }
>
> if (ap == p)
> /*
> @@ -2012,12 +2028,14 @@ static void kill_kprobe(struct kprobe *p)
> int disable_kprobe(struct kprobe *kp)
> {
> int ret = 0;
> + struct kprobe *p;
>
> mutex_lock(&kprobe_mutex);
>
> /* Disable this kprobe */
> - if (__disable_kprobe(kp) == NULL)
> - ret = -EINVAL;
> + p = __disable_kprobe(kp);
> + if (IS_ERR(p))
> + ret = PTR_ERR(p);
>
> mutex_unlock(&kprobe_mutex);
> return ret;
> @@ -2367,34 +2385,41 @@ already_enabled:
> return ret;
> }
>
> -static void disarm_all_kprobes(void)
> +static int disarm_all_kprobes(void)
> {
> struct hlist_head *head;
> struct kprobe *p;
> unsigned int i;
> + int err, ret = 0;
>
> mutex_lock(&kprobe_mutex);
>
> /* If kprobes are already disarmed, just return */
> if (kprobes_all_disarmed) {
> mutex_unlock(&kprobe_mutex);
> - return;
> + return 0;
> }
>
> - kprobes_all_disarmed = true;
> - printk(KERN_INFO "Kprobes globally disabled\n");
> -
> for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
> head = &kprobe_table[i];
> hlist_for_each_entry_rcu(p, head, hlist) {
> - if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
> - disarm_kprobe(p, false);
> + if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
> + err = disarm_kprobe(p, false);
> + if (err)
> + ret = err;
> + }
> }
> }
> +
> + kprobes_all_disarmed = true;
> + pr_info("Kprobes globally disabled\n");
> +
> mutex_unlock(&kprobe_mutex);
>
> /* Wait for disarming all kprobes by optimizer */
> wait_for_kprobe_optimizer();
> +
> + return ret;
> }
>
> /*
> @@ -2437,7 +2462,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
> case 'n':
> case 'N':
> case '0':
> - disarm_all_kprobes();
> + err = disarm_all_kprobes();
> break;
> default:
> return -EINVAL;
>
--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Research Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]