2013-04-05 13:27:13

by Oskar Andero

[permalink] [raw]
Subject: [PATCH v3 0/4] kprobes: split blacklist into common and arch

Hi,

This is version 3 of the patch-set for splitting arch and common kprobe
blackpoints.

Changes since last version are:
- 1/4: Add write memory barrier at blacklist initialization.
- 1/4: Change kprobe_blacklist_initialized type to boolean.
- 2/4: Fix racing of kprobe_blacklist.
- 2/4: Define arch_kprobes_blacksyms.* as __weak symbols.

-Oskar

Björn Davidsson (1):
kprobes: move x86-specific blacklist symbols to arch directory

Oskar Andero (2):
kprobes: split blacklist into common and arch
kprobes: replace printk with pr_-functions

Toby Collett (1):
kprobes: delay blacklist symbol lookup until we actually need it

arch/x86/kernel/kprobes/core.c | 7 ++
kernel/kprobes.c | 159 +++++++++++++++++++++++++++--------------
2 files changed, 112 insertions(+), 54 deletions(-)

--
1.8.1.5


2013-04-05 13:27:17

by Oskar Andero

[permalink] [raw]
Subject: [PATCH v3 4/4] kprobes: replace printk with pr_-functions

Instead of using printk use pr_info/pr_err/pr_warn. This was
detected by the checkpatch.pl script.

Cc: Masami Hiramatsu <[email protected]>
Cc: David S. Miller <[email protected]>
Signed-off-by: Oskar Andero <[email protected]>
Acked-by: Masami Hiramatsu <[email protected]>
---
kernel/kprobes.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b289384..08facfc 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -172,7 +172,7 @@ static void init_kprobe_blacklist(void)
kprobe_lookup_name(kretprobe_blacklist[i].name,
kretprobe_blacklist[i].addr);
if (!kretprobe_blacklist[i].addr)
- printk("kretprobe: lookup failed: %s\n",
+ pr_err("kretprobe: lookup failed: %s\n",
kretprobe_blacklist[i].name);
}
}
@@ -787,7 +787,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
*/
op = container_of(ap, struct optimized_kprobe, kp);
if (unlikely(list_empty(&op->list)))
- printk(KERN_WARNING "Warning: found a stray unused "
+ pr_warn("Warning: found a stray unused "
"aggrprobe@%p\n", ap->addr);
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
@@ -894,7 +894,7 @@ static void __kprobes optimize_all_kprobes(void)
if (!kprobe_disabled(p))
optimize_kprobe(p);
}
- printk(KERN_INFO "Kprobes globally optimized\n");
+ pr_info("Kprobes globally optimized\n");
}

/* This should be called with kprobe_mutex locked */
@@ -918,7 +918,7 @@ static void __kprobes unoptimize_all_kprobes(void)
}
/* Wait for unoptimizing completion */
wait_for_kprobe_optimizer();
- printk(KERN_INFO "Kprobes globally unoptimized\n");
+ pr_info("Kprobes globally unoptimized\n");
}

int sysctl_kprobes_optimization;
@@ -989,7 +989,7 @@ static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
/* There should be no unused kprobes can be reused without optimization */
static void reuse_unused_kprobe(struct kprobe *ap)
{
- printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
+ pr_err("Error: There should be no unused kprobe here.\n");
BUG_ON(kprobe_unused(ap));
}

@@ -2103,8 +2103,8 @@ EXPORT_SYMBOL_GPL(enable_kprobe);

void __kprobes dump_kprobe(struct kprobe *kp)
{
- printk(KERN_WARNING "Dumping kprobe:\n");
- printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
+ pr_warn("Dumping kprobe:\n");
+ pr_warn("Name: %s\nAddress: %p\nOffset: %x\n",
kp->symbol_name, kp->addr, kp->offset);
}

@@ -2300,7 +2300,7 @@ static void __kprobes arm_all_kprobes(void)
}

kprobes_all_disarmed = false;
- printk(KERN_INFO "Kprobes globally enabled\n");
+ pr_info("Kprobes globally enabled\n");

already_enabled:
mutex_unlock(&kprobe_mutex);
@@ -2322,7 +2322,7 @@ static void __kprobes disarm_all_kprobes(void)
}

kprobes_all_disarmed = true;
- printk(KERN_INFO "Kprobes globally disabled\n");
+ pr_info("Kprobes globally disabled\n");

for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
--
1.8.1.5

2013-04-05 13:27:15

by Oskar Andero

[permalink] [raw]
Subject: [PATCH v3 2/4] kprobes: split blacklist into common and arch

Some blackpoints are only valid for specific architectures. To let each
architecture specify its own blackpoints the list has been split in two
lists: common and arch. The common list is kept in kernel/kprobes.c and
the arch list is kept in the arch/ directory.

Cc: Masami Hiramatsu <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: [email protected]
Signed-off-by: Oskar Andero <[email protected]>
---
kernel/kprobes.c | 88 +++++++++++++++++++++++++++++++++++++-------------------
1 file changed, 59 insertions(+), 29 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c8c2281..2458ae1 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -68,7 +68,6 @@
#endif

static int kprobes_initialized;
-static bool kprobe_blacklist_initialized;
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];

@@ -94,31 +93,64 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
*
* For such cases, we now have a blacklist
*/
-static struct kprobe_blackpoint kprobe_blacklist[] = {
- {"preempt_schedule",},
- {"native_get_debugreg",},
- {"irq_entries_start",},
- {"common_interrupt",},
- {"mcount",}, /* mcount can be called from everywhere */
- {NULL} /* Terminator */
+static const char * const common_kprobes_blacksyms[] = {
+ "preempt_schedule",
+ "native_get_debugreg",
+ "irq_entries_start",
+ "common_interrupt",
+ "mcount", /* mcount can be called from everywhere */
};
+static const size_t common_kprobes_blacksyms_size =
+ ARRAY_SIZE(common_kprobes_blacksyms);
+
+/*
+ * These weak symbols can be overridden from the arch/ directory for
+ * architecure specific blackpoints.
+ */
+const char * const __weak arch_kprobes_blacksyms[] = {};
+const size_t __weak arch_kprobes_blacksyms_size;
+
+static struct kprobe_blackpoint *kprobe_blacklist;
+static size_t kprobe_blacklist_size;
+
+static void init_kprobe_blacklist_entry(struct kprobe_blackpoint *kb,
+ const char * const name)
+{
+ const char *symbol_name;
+ char *modname, namebuf[128];
+ void *addr;
+ unsigned long offset = 0, size = 0;
+
+ kb->name = name;
+ kprobe_lookup_name(kb->name, addr);
+ if (!addr)
+ return;
+
+ kb->start_addr = (unsigned long)addr;
+ symbol_name = kallsyms_lookup(kb->start_addr,
+ &size, &offset, &modname, namebuf);
+ if (!symbol_name)
+ kb->range = 0;
+ else
+ kb->range = size;
+}

/* it can take some time ( > 100ms ) to initialise the
* blacklist so we delay this until we actually need it
*/
static void init_kprobe_blacklist(void)
{
- int i;
- unsigned long offset = 0, size = 0;
- char *modname, namebuf[128];
- const char *symbol_name;
- void *addr;
+ int i, j = 0;
struct kprobe_blackpoint *kb;

mutex_lock(&kprobe_mutex);
- if (kprobe_blacklist_initialized)
+ if (kprobe_blacklist)
goto out;

+ kprobe_blacklist_size = common_kprobes_blacksyms_size +
+ arch_kprobes_blacksyms_size;
+ kb = kzalloc(sizeof(*kb) * kprobe_blacklist_size, GFP_KERNEL);
+
/*
* Lookup and populate the kprobe_blacklist.
*
@@ -127,18 +159,14 @@ static void init_kprobe_blacklist(void)
* since a kprobe need not necessarily be at the beginning
* of a function.
*/
- for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
- kprobe_lookup_name(kb->name, addr);
- if (!addr)
- continue;
+ for (i = 0; i < common_kprobes_blacksyms_size; i++, j++) {
+ init_kprobe_blacklist_entry(&kb[j],
+ common_kprobes_blacksyms[i]);
+ }

- kb->start_addr = (unsigned long)addr;
- symbol_name = kallsyms_lookup(kb->start_addr,
- &size, &offset, &modname, namebuf);
- if (!symbol_name)
- kb->range = 0;
- else
- kb->range = size;
+ for (i = 0; i < arch_kprobes_blacksyms_size; i++, j++) {
+ init_kprobe_blacklist_entry(&kb[j],
+ arch_kprobes_blacksyms[i]);
}

if (kretprobe_blacklist_size) {
@@ -153,7 +181,7 @@ static void init_kprobe_blacklist(void)
}

smp_wmb();
- kprobe_blacklist_initialized = true;
+ kprobe_blacklist = kb;

out:
mutex_unlock(&kprobe_mutex);
@@ -1384,18 +1412,20 @@ out:
static int __kprobes in_kprobes_functions(unsigned long addr)
{
struct kprobe_blackpoint *kb;
+ int i;

if (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end)
return -EINVAL;

- if (unlikely(!kprobe_blacklist_initialized))
+ if (unlikely(!kprobe_blacklist))
init_kprobe_blacklist();
/*
* If there exists a kprobe_blacklist, verify and
* fail any probe registration in the prohibited area
*/
- for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
+ for (i = 0; i < kprobe_blacklist_size; i++) {
+ kb = &kprobe_blacklist[i];
if (kb->start_addr) {
if (addr >= kb->start_addr &&
addr < (kb->start_addr + kb->range))
@@ -1876,7 +1906,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
void *addr;

if (kretprobe_blacklist_size) {
- if (unlikely(!kprobe_blacklist_initialized))
+ if (unlikely(!kprobe_blacklist))
init_kprobe_blacklist();
addr = kprobe_addr(&rp->kp);
if (IS_ERR(addr))
--
1.8.1.5

2013-04-05 13:27:40

by Oskar Andero

[permalink] [raw]
Subject: [PATCH v3 3/4] kprobes: move x86-specific blacklist symbols to arch directory

From: Björn Davidsson <[email protected]>

The common kprobes blacklist contains x86-specific symbols.
Looking for these in kallsyms takes unnecessary time during startup
on non-X86 platform. The symbols where moved to
arch/x86/kernel/kprobes/core.c.

Cc: Masami Hiramatsu <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: [email protected]
Reviewed-by: Radovan Lekanovic <[email protected]>
Signed-off-by: Björn Davidsson <[email protected]>
Signed-off-by: Oskar Andero <[email protected]>
Acked-by: Masami Hiramatsu <[email protected]>
---
arch/x86/kernel/kprobes/core.c | 7 +++++++
kernel/kprobes.c | 3 ---
2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7bfe318..41ce6c1 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -65,6 +65,13 @@ void jprobe_return_end(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

+const char * const arch_kprobes_blacksyms[] = {
+ "native_get_debugreg",
+ "irq_entries_start",
+ "common_interrupt",
+};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
+
#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))

#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 2458ae1..b289384 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -95,9 +95,6 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
*/
static const char * const common_kprobes_blacksyms[] = {
"preempt_schedule",
- "native_get_debugreg",
- "irq_entries_start",
- "common_interrupt",
"mcount", /* mcount can be called from everywhere */
};
static const size_t common_kprobes_blacksyms_size =
--
1.8.1.5

2013-04-05 13:28:06

by Oskar Andero

[permalink] [raw]
Subject: [PATCH v3 1/4] kprobes: delay blacklist symbol lookup until we actually need it

From: Toby Collett <[email protected]>

The symbol lookup can take a long time and kprobes is
initialised very early in boot, so delay symbol lookup
until the blacklist is first used.

Cc: Masami Hiramatsu <[email protected]>
Cc: David S. Miller <[email protected]>
Reviewed-by: Radovan Lekanovic <[email protected]>
Signed-off-by: Toby Collett <[email protected]>
Signed-off-by: Oskar Andero <[email protected]>
---
kernel/kprobes.c | 100 ++++++++++++++++++++++++++++++++++---------------------
1 file changed, 62 insertions(+), 38 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index e35be53..c8c2281 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -68,6 +68,7 @@
#endif

static int kprobes_initialized;
+static bool kprobe_blacklist_initialized;
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];

@@ -102,6 +103,62 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
{NULL} /* Terminator */
};

+/* it can take some time ( > 100ms ) to initialise the
+ * blacklist so we delay this until we actually need it
+ */
+static void init_kprobe_blacklist(void)
+{
+ int i;
+ unsigned long offset = 0, size = 0;
+ char *modname, namebuf[128];
+ const char *symbol_name;
+ void *addr;
+ struct kprobe_blackpoint *kb;
+
+ mutex_lock(&kprobe_mutex);
+ if (kprobe_blacklist_initialized)
+ goto out;
+
+ /*
+ * Lookup and populate the kprobe_blacklist.
+ *
+ * Unlike the kretprobe blacklist, we'll need to determine
+ * the range of addresses that belong to the said functions,
+ * since a kprobe need not necessarily be at the beginning
+ * of a function.
+ */
+ for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
+ kprobe_lookup_name(kb->name, addr);
+ if (!addr)
+ continue;
+
+ kb->start_addr = (unsigned long)addr;
+ symbol_name = kallsyms_lookup(kb->start_addr,
+ &size, &offset, &modname, namebuf);
+ if (!symbol_name)
+ kb->range = 0;
+ else
+ kb->range = size;
+ }
+
+ if (kretprobe_blacklist_size) {
+ /* lookup the function address from its name */
+ for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
+ kprobe_lookup_name(kretprobe_blacklist[i].name,
+ kretprobe_blacklist[i].addr);
+ if (!kretprobe_blacklist[i].addr)
+ printk("kretprobe: lookup failed: %s\n",
+ kretprobe_blacklist[i].name);
+ }
+ }
+
+ smp_wmb();
+ kprobe_blacklist_initialized = true;
+
+out:
+ mutex_unlock(&kprobe_mutex);
+}
+
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
@@ -1331,6 +1388,9 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
if (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end)
return -EINVAL;
+
+ if (unlikely(!kprobe_blacklist_initialized))
+ init_kprobe_blacklist();
/*
* If there exists a kprobe_blacklist, verify and
* fail any probe registration in the prohibited area
@@ -1816,6 +1876,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
void *addr;

if (kretprobe_blacklist_size) {
+ if (unlikely(!kprobe_blacklist_initialized))
+ init_kprobe_blacklist();
addr = kprobe_addr(&rp->kp);
if (IS_ERR(addr))
return PTR_ERR(addr);
@@ -2065,11 +2127,6 @@ static struct notifier_block kprobe_module_nb = {
static int __init init_kprobes(void)
{
int i, err = 0;
- unsigned long offset = 0, size = 0;
- char *modname, namebuf[128];
- const char *symbol_name;
- void *addr;
- struct kprobe_blackpoint *kb;

/* FIXME allocate the probe table, currently defined statically */
/* initialize all list heads */
@@ -2079,39 +2136,6 @@ static int __init init_kprobes(void)
raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
}

- /*
- * Lookup and populate the kprobe_blacklist.
- *
- * Unlike the kretprobe blacklist, we'll need to determine
- * the range of addresses that belong to the said functions,
- * since a kprobe need not necessarily be at the beginning
- * of a function.
- */
- for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
- kprobe_lookup_name(kb->name, addr);
- if (!addr)
- continue;
-
- kb->start_addr = (unsigned long)addr;
- symbol_name = kallsyms_lookup(kb->start_addr,
- &size, &offset, &modname, namebuf);
- if (!symbol_name)
- kb->range = 0;
- else
- kb->range = size;
- }
-
- if (kretprobe_blacklist_size) {
- /* lookup the function address from its name */
- for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
- kprobe_lookup_name(kretprobe_blacklist[i].name,
- kretprobe_blacklist[i].addr);
- if (!kretprobe_blacklist[i].addr)
- printk("kretprobe: lookup failed: %s\n",
- kretprobe_blacklist[i].name);
- }
- }
-
#if defined(CONFIG_OPTPROBES)
#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
/* Init kprobe_optinsn_slots */
--
1.8.1.5

Subject: Re: [PATCH v3 1/4] kprobes: delay blacklist symbol lookup until we actually need it

(2013/04/05 22:26), Oskar Andero wrote:
> From: Toby Collett <[email protected]>
>
> The symbol lookup can take a long time and kprobes is
> initialised very early in boot, so delay symbol lookup
> until the blacklist is first used.

Acked-by: Masami Hiramatsu <[email protected]>

Thanks!

>
> Cc: Masami Hiramatsu <[email protected]>
> Cc: David S. Miller <[email protected]>
> Reviewed-by: Radovan Lekanovic <[email protected]>
> Signed-off-by: Toby Collett <[email protected]>
> Signed-off-by: Oskar Andero <[email protected]>
> ---
> kernel/kprobes.c | 100 ++++++++++++++++++++++++++++++++++---------------------
> 1 file changed, 62 insertions(+), 38 deletions(-)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index e35be53..c8c2281 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -68,6 +68,7 @@
> #endif
>
> static int kprobes_initialized;
> +static bool kprobe_blacklist_initialized;
> static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
> static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
>
> @@ -102,6 +103,62 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
> {NULL} /* Terminator */
> };
>
> +/* it can take some time ( > 100ms ) to initialise the
> + * blacklist so we delay this until we actually need it
> + */
> +static void init_kprobe_blacklist(void)
> +{
> + int i;
> + unsigned long offset = 0, size = 0;
> + char *modname, namebuf[128];
> + const char *symbol_name;
> + void *addr;
> + struct kprobe_blackpoint *kb;
> +
> + mutex_lock(&kprobe_mutex);
> + if (kprobe_blacklist_initialized)
> + goto out;
> +
> + /*
> + * Lookup and populate the kprobe_blacklist.
> + *
> + * Unlike the kretprobe blacklist, we'll need to determine
> + * the range of addresses that belong to the said functions,
> + * since a kprobe need not necessarily be at the beginning
> + * of a function.
> + */
> + for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
> + kprobe_lookup_name(kb->name, addr);
> + if (!addr)
> + continue;
> +
> + kb->start_addr = (unsigned long)addr;
> + symbol_name = kallsyms_lookup(kb->start_addr,
> + &size, &offset, &modname, namebuf);
> + if (!symbol_name)
> + kb->range = 0;
> + else
> + kb->range = size;
> + }
> +
> + if (kretprobe_blacklist_size) {
> + /* lookup the function address from its name */
> + for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
> + kprobe_lookup_name(kretprobe_blacklist[i].name,
> + kretprobe_blacklist[i].addr);
> + if (!kretprobe_blacklist[i].addr)
> + printk("kretprobe: lookup failed: %s\n",
> + kretprobe_blacklist[i].name);
> + }
> + }
> +
> + smp_wmb();
> + kprobe_blacklist_initialized = true;
> +
> +out:
> + mutex_unlock(&kprobe_mutex);
> +}
> +
> #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
> /*
> * kprobe->ainsn.insn points to the copy of the instruction to be
> @@ -1331,6 +1388,9 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
> if (addr >= (unsigned long)__kprobes_text_start &&
> addr < (unsigned long)__kprobes_text_end)
> return -EINVAL;
> +
> + if (unlikely(!kprobe_blacklist_initialized))
> + init_kprobe_blacklist();
> /*
> * If there exists a kprobe_blacklist, verify and
> * fail any probe registration in the prohibited area
> @@ -1816,6 +1876,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
> void *addr;
>
> if (kretprobe_blacklist_size) {
> + if (unlikely(!kprobe_blacklist_initialized))
> + init_kprobe_blacklist();
> addr = kprobe_addr(&rp->kp);
> if (IS_ERR(addr))
> return PTR_ERR(addr);
> @@ -2065,11 +2127,6 @@ static struct notifier_block kprobe_module_nb = {
> static int __init init_kprobes(void)
> {
> int i, err = 0;
> - unsigned long offset = 0, size = 0;
> - char *modname, namebuf[128];
> - const char *symbol_name;
> - void *addr;
> - struct kprobe_blackpoint *kb;
>
> /* FIXME allocate the probe table, currently defined statically */
> /* initialize all list heads */
> @@ -2079,39 +2136,6 @@ static int __init init_kprobes(void)
> raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
> }
>
> - /*
> - * Lookup and populate the kprobe_blacklist.
> - *
> - * Unlike the kretprobe blacklist, we'll need to determine
> - * the range of addresses that belong to the said functions,
> - * since a kprobe need not necessarily be at the beginning
> - * of a function.
> - */
> - for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
> - kprobe_lookup_name(kb->name, addr);
> - if (!addr)
> - continue;
> -
> - kb->start_addr = (unsigned long)addr;
> - symbol_name = kallsyms_lookup(kb->start_addr,
> - &size, &offset, &modname, namebuf);
> - if (!symbol_name)
> - kb->range = 0;
> - else
> - kb->range = size;
> - }
> -
> - if (kretprobe_blacklist_size) {
> - /* lookup the function address from its name */
> - for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
> - kprobe_lookup_name(kretprobe_blacklist[i].name,
> - kretprobe_blacklist[i].addr);
> - if (!kretprobe_blacklist[i].addr)
> - printk("kretprobe: lookup failed: %s\n",
> - kretprobe_blacklist[i].name);
> - }
> - }
> -
> #if defined(CONFIG_OPTPROBES)
> #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
> /* Init kprobe_optinsn_slots */
>


--
Masami HIRAMATSU
IT Management Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

Subject: Re: [PATCH v3 2/4] kprobes: split blacklist into common and arch

(2013/04/05 22:26), Oskar Andero wrote:
> Some blackpoints are only valid for specific architectures. To let each
> architecture specify its own blackpoints the list has been split in two
> lists: common and arch. The common list is kept in kernel/kprobes.c and
> the arch list is kept in the arch/ directory.

Looks good for me:)

Acked-by: Masami Hiramatsu <[email protected]>

Thank you!

>
> Cc: Masami Hiramatsu <[email protected]>
> Cc: David S. Miller <[email protected]>
> Cc: [email protected]
> Signed-off-by: Oskar Andero <[email protected]>
> ---
> kernel/kprobes.c | 88 +++++++++++++++++++++++++++++++++++++-------------------
> 1 file changed, 59 insertions(+), 29 deletions(-)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index c8c2281..2458ae1 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -68,7 +68,6 @@
> #endif
>
> static int kprobes_initialized;
> -static bool kprobe_blacklist_initialized;
> static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
> static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
>
> @@ -94,31 +93,64 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
> *
> * For such cases, we now have a blacklist
> */
> -static struct kprobe_blackpoint kprobe_blacklist[] = {
> - {"preempt_schedule",},
> - {"native_get_debugreg",},
> - {"irq_entries_start",},
> - {"common_interrupt",},
> - {"mcount",}, /* mcount can be called from everywhere */
> - {NULL} /* Terminator */
> +static const char * const common_kprobes_blacksyms[] = {
> + "preempt_schedule",
> + "native_get_debugreg",
> + "irq_entries_start",
> + "common_interrupt",
> + "mcount", /* mcount can be called from everywhere */
> };
> +static const size_t common_kprobes_blacksyms_size =
> + ARRAY_SIZE(common_kprobes_blacksyms);
> +
> +/*
> + * These weak symbols can be overridden from the arch/ directory for
> + * architecure specific blackpoints.
> + */
> +const char * const __weak arch_kprobes_blacksyms[] = {};
> +const size_t __weak arch_kprobes_blacksyms_size;
> +
> +static struct kprobe_blackpoint *kprobe_blacklist;
> +static size_t kprobe_blacklist_size;
> +
> +static void init_kprobe_blacklist_entry(struct kprobe_blackpoint *kb,
> + const char * const name)
> +{
> + const char *symbol_name;
> + char *modname, namebuf[128];
> + void *addr;
> + unsigned long offset = 0, size = 0;
> +
> + kb->name = name;
> + kprobe_lookup_name(kb->name, addr);
> + if (!addr)
> + return;
> +
> + kb->start_addr = (unsigned long)addr;
> + symbol_name = kallsyms_lookup(kb->start_addr,
> + &size, &offset, &modname, namebuf);
> + if (!symbol_name)
> + kb->range = 0;
> + else
> + kb->range = size;
> +}
>
> /* it can take some time ( > 100ms ) to initialise the
> * blacklist so we delay this until we actually need it
> */
> static void init_kprobe_blacklist(void)
> {
> - int i;
> - unsigned long offset = 0, size = 0;
> - char *modname, namebuf[128];
> - const char *symbol_name;
> - void *addr;
> + int i, j = 0;
> struct kprobe_blackpoint *kb;
>
> mutex_lock(&kprobe_mutex);
> - if (kprobe_blacklist_initialized)
> + if (kprobe_blacklist)
> goto out;
>
> + kprobe_blacklist_size = common_kprobes_blacksyms_size +
> + arch_kprobes_blacksyms_size;
> + kb = kzalloc(sizeof(*kb) * kprobe_blacklist_size, GFP_KERNEL);
> +
> /*
> * Lookup and populate the kprobe_blacklist.
> *
> @@ -127,18 +159,14 @@ static void init_kprobe_blacklist(void)
> * since a kprobe need not necessarily be at the beginning
> * of a function.
> */
> - for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
> - kprobe_lookup_name(kb->name, addr);
> - if (!addr)
> - continue;
> + for (i = 0; i < common_kprobes_blacksyms_size; i++, j++) {
> + init_kprobe_blacklist_entry(&kb[j],
> + common_kprobes_blacksyms[i]);
> + }
>
> - kb->start_addr = (unsigned long)addr;
> - symbol_name = kallsyms_lookup(kb->start_addr,
> - &size, &offset, &modname, namebuf);
> - if (!symbol_name)
> - kb->range = 0;
> - else
> - kb->range = size;
> + for (i = 0; i < arch_kprobes_blacksyms_size; i++, j++) {
> + init_kprobe_blacklist_entry(&kb[j],
> + arch_kprobes_blacksyms[i]);
> }
>
> if (kretprobe_blacklist_size) {
> @@ -153,7 +181,7 @@ static void init_kprobe_blacklist(void)
> }
>
> smp_wmb();
> - kprobe_blacklist_initialized = true;
> + kprobe_blacklist = kb;
>
> out:
> mutex_unlock(&kprobe_mutex);
> @@ -1384,18 +1412,20 @@ out:
> static int __kprobes in_kprobes_functions(unsigned long addr)
> {
> struct kprobe_blackpoint *kb;
> + int i;
>
> if (addr >= (unsigned long)__kprobes_text_start &&
> addr < (unsigned long)__kprobes_text_end)
> return -EINVAL;
>
> - if (unlikely(!kprobe_blacklist_initialized))
> + if (unlikely(!kprobe_blacklist))
> init_kprobe_blacklist();
> /*
> * If there exists a kprobe_blacklist, verify and
> * fail any probe registration in the prohibited area
> */
> - for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
> + for (i = 0; i < kprobe_blacklist_size; i++) {
> + kb = &kprobe_blacklist[i];
> if (kb->start_addr) {
> if (addr >= kb->start_addr &&
> addr < (kb->start_addr + kb->range))
> @@ -1876,7 +1906,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
> void *addr;
>
> if (kretprobe_blacklist_size) {
> - if (unlikely(!kprobe_blacklist_initialized))
> + if (unlikely(!kprobe_blacklist))
> init_kprobe_blacklist();
> addr = kprobe_addr(&rp->kp);
> if (IS_ERR(addr))
>


--
Masami HIRAMATSU
IT Management Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]