2013-04-04 12:52:08

by Oskar Andero

[permalink] [raw]
Subject: [PATCH v2 0/4] kprobes: split blacklist into common and arch

Hi,

This is version 2 of the patch-set for splitting arch and common kprobe
blackpoints.

Changes since last version are:
- Don't use double pointer for blacklist.
- Add mutex around blacklist initialization code.
- Use unlikely in if-statement around init_kprobe_blacklist() calls.

I have also included linux-arch in Cc for this post.

-Oskar

Björn Davidsson (1):
kprobes: move x86-specific blacklist symbols to arch directory

Oskar Andero (2):
kprobes: split blacklist into common and arch
kprobes: replace printk with pr_-functions

Toby Collett (1):
kprobes: delay blacklist symbol lookup until we actually need it

arch/arc/kernel/kprobes.c | 3 +
arch/arm/kernel/kprobes.c | 2 +
arch/avr32/kernel/kprobes.c | 3 +
arch/ia64/kernel/kprobes.c | 3 +
arch/mips/kernel/kprobes.c | 3 +
arch/mn10300/kernel/kprobes.c | 2 +
arch/powerpc/kernel/kprobes.c | 3 +
arch/s390/kernel/kprobes.c | 3 +
arch/sh/kernel/kprobes.c | 3 +
arch/sparc/kernel/kprobes.c | 3 +
arch/x86/kernel/kprobes/core.c | 7 ++
kernel/kprobes.c | 152 ++++++++++++++++++++++++++---------------
12 files changed, 133 insertions(+), 54 deletions(-)

--
1.8.1.5


2013-04-04 12:52:09

by Oskar Andero

[permalink] [raw]
Subject: [PATCH v2 1/4] kprobes: delay blacklist symbol lookup until we actually need it

From: Toby Collett <[email protected]>

The symbol lookup can take a long time and kprobes is
initialised very early in boot, so delay symbol lookup
until the blacklist is first used.

Cc: Masami Hiramatsu <[email protected]>
Cc: David S. Miller <[email protected]>
Reviewed-by: Radovan Lekanovic <[email protected]>
Signed-off-by: Toby Collett <[email protected]>
Signed-off-by: Oskar Andero <[email protected]>
---
kernel/kprobes.c | 98 ++++++++++++++++++++++++++++++++++----------------------
1 file changed, 60 insertions(+), 38 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index e35be53..0a270e5 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -68,6 +68,7 @@
#endif

static int kprobes_initialized;
+static int kprobe_blacklist_initialized;
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];

@@ -102,6 +103,60 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
{NULL} /* Terminator */
};

+/* it can take some time ( > 100ms ) to initialise the
+ * blacklist so we delay this until we actually need it
+ */
+static void init_kprobe_blacklist(void)
+{
+ int i;
+ unsigned long offset = 0, size = 0;
+ char *modname, namebuf[128];
+ const char *symbol_name;
+ void *addr;
+ struct kprobe_blackpoint *kb;
+
+ mutex_lock(&kprobe_mutex);
+ if (kprobe_blacklist_initialized)
+ goto out;
+
+ /*
+ * Lookup and populate the kprobe_blacklist.
+ *
+ * Unlike the kretprobe blacklist, we'll need to determine
+ * the range of addresses that belong to the said functions,
+ * since a kprobe need not necessarily be at the beginning
+ * of a function.
+ */
+ for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
+ kprobe_lookup_name(kb->name, addr);
+ if (!addr)
+ continue;
+
+ kb->start_addr = (unsigned long)addr;
+ symbol_name = kallsyms_lookup(kb->start_addr,
+ &size, &offset, &modname, namebuf);
+ if (!symbol_name)
+ kb->range = 0;
+ else
+ kb->range = size;
+ }
+
+ if (kretprobe_blacklist_size) {
+ /* lookup the function address from its name */
+ for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
+ kprobe_lookup_name(kretprobe_blacklist[i].name,
+ kretprobe_blacklist[i].addr);
+ if (!kretprobe_blacklist[i].addr)
+ printk("kretprobe: lookup failed: %s\n",
+ kretprobe_blacklist[i].name);
+ }
+ }
+ kprobe_blacklist_initialized = 1;
+
+out:
+ mutex_unlock(&kprobe_mutex);
+}
+
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
@@ -1331,6 +1386,9 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
if (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end)
return -EINVAL;
+
+ if (unlikely(!kprobe_blacklist_initialized))
+ init_kprobe_blacklist();
/*
* If there exists a kprobe_blacklist, verify and
* fail any probe registration in the prohibited area
@@ -1816,6 +1874,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
void *addr;

if (kretprobe_blacklist_size) {
+ if (unlikely(!kprobe_blacklist_initialized))
+ init_kprobe_blacklist();
addr = kprobe_addr(&rp->kp);
if (IS_ERR(addr))
return PTR_ERR(addr);
@@ -2065,11 +2125,6 @@ static struct notifier_block kprobe_module_nb = {
static int __init init_kprobes(void)
{
int i, err = 0;
- unsigned long offset = 0, size = 0;
- char *modname, namebuf[128];
- const char *symbol_name;
- void *addr;
- struct kprobe_blackpoint *kb;

/* FIXME allocate the probe table, currently defined statically */
/* initialize all list heads */
@@ -2079,39 +2134,6 @@ static int __init init_kprobes(void)
raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
}

- /*
- * Lookup and populate the kprobe_blacklist.
- *
- * Unlike the kretprobe blacklist, we'll need to determine
- * the range of addresses that belong to the said functions,
- * since a kprobe need not necessarily be at the beginning
- * of a function.
- */
- for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
- kprobe_lookup_name(kb->name, addr);
- if (!addr)
- continue;
-
- kb->start_addr = (unsigned long)addr;
- symbol_name = kallsyms_lookup(kb->start_addr,
- &size, &offset, &modname, namebuf);
- if (!symbol_name)
- kb->range = 0;
- else
- kb->range = size;
- }
-
- if (kretprobe_blacklist_size) {
- /* lookup the function address from its name */
- for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
- kprobe_lookup_name(kretprobe_blacklist[i].name,
- kretprobe_blacklist[i].addr);
- if (!kretprobe_blacklist[i].addr)
- printk("kretprobe: lookup failed: %s\n",
- kretprobe_blacklist[i].name);
- }
- }
-
#if defined(CONFIG_OPTPROBES)
#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
/* Init kprobe_optinsn_slots */
--
1.8.1.5

2013-04-04 12:52:24

by Oskar Andero

[permalink] [raw]
Subject: [PATCH v2 4/4] kprobes: replace printk with pr_-functions

Instead of using printk use pr_info/pr_err/pr_warn. This was
detected by the checkpatch.pl script.

Cc: Masami Hiramatsu <[email protected]>
Cc: David S. Miller <[email protected]>
Signed-off-by: Oskar Andero <[email protected]>
Acked-by: Masami Hiramatsu <[email protected]>
---
kernel/kprobes.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 58c9f4e..d61cbad 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -168,7 +168,7 @@ static void init_kprobe_blacklist(void)
kprobe_lookup_name(kretprobe_blacklist[i].name,
kretprobe_blacklist[i].addr);
if (!kretprobe_blacklist[i].addr)
- printk("kretprobe: lookup failed: %s\n",
+ pr_err("kretprobe: lookup failed: %s\n",
kretprobe_blacklist[i].name);
}
}
@@ -780,7 +780,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
*/
op = container_of(ap, struct optimized_kprobe, kp);
if (unlikely(list_empty(&op->list)))
- printk(KERN_WARNING "Warning: found a stray unused "
+ pr_warn("Warning: found a stray unused "
"aggrprobe@%p\n", ap->addr);
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
@@ -887,7 +887,7 @@ static void __kprobes optimize_all_kprobes(void)
if (!kprobe_disabled(p))
optimize_kprobe(p);
}
- printk(KERN_INFO "Kprobes globally optimized\n");
+ pr_info("Kprobes globally optimized\n");
}

/* This should be called with kprobe_mutex locked */
@@ -911,7 +911,7 @@ static void __kprobes unoptimize_all_kprobes(void)
}
/* Wait for unoptimizing completion */
wait_for_kprobe_optimizer();
- printk(KERN_INFO "Kprobes globally unoptimized\n");
+ pr_info("Kprobes globally unoptimized\n");
}

int sysctl_kprobes_optimization;
@@ -982,7 +982,7 @@ static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
/* There should be no unused kprobes can be reused without optimization */
static void reuse_unused_kprobe(struct kprobe *ap)
{
- printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
+ pr_err("Error: There should be no unused kprobe here.\n");
BUG_ON(kprobe_unused(ap));
}

@@ -2096,8 +2096,8 @@ EXPORT_SYMBOL_GPL(enable_kprobe);

void __kprobes dump_kprobe(struct kprobe *kp)
{
- printk(KERN_WARNING "Dumping kprobe:\n");
- printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
+ pr_warn("Dumping kprobe:\n");
+ pr_warn("Name: %s\nAddress: %p\nOffset: %x\n",
kp->symbol_name, kp->addr, kp->offset);
}

@@ -2293,7 +2293,7 @@ static void __kprobes arm_all_kprobes(void)
}

kprobes_all_disarmed = false;
- printk(KERN_INFO "Kprobes globally enabled\n");
+ pr_info("Kprobes globally enabled\n");

already_enabled:
mutex_unlock(&kprobe_mutex);
@@ -2315,7 +2315,7 @@ static void __kprobes disarm_all_kprobes(void)
}

kprobes_all_disarmed = true;
- printk(KERN_INFO "Kprobes globally disabled\n");
+ pr_info("Kprobes globally disabled\n");

for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
--
1.8.1.5

2013-04-04 12:52:41

by Oskar Andero

[permalink] [raw]
Subject: [PATCH v2 3/4] kprobes: move x86-specific blacklist symbols to arch directory

From: Björn Davidsson <[email protected]>

The common kprobes blacklist contains x86-specific symbols.
Looking for these in kallsyms takes unnecessary time during startup
on non-X86 platform. The symbols where moved to
arch/x86/kernel/kprobes/core.c.

Cc: Masami Hiramatsu <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: [email protected]
Reviewed-by: Radovan Lekanovic <[email protected]>
Signed-off-by: Björn Davidsson <[email protected]>
Signed-off-by: Oskar Andero <[email protected]>
---
arch/x86/kernel/kprobes/core.c | 6 +++++-
kernel/kprobes.c | 3 ---
2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4aa71a5..41ce6c1 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -65,7 +65,11 @@ void jprobe_return_end(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

-const char * const arch_kprobes_blacksyms[] = {};
+const char * const arch_kprobes_blacksyms[] = {
+ "native_get_debugreg",
+ "irq_entries_start",
+ "common_interrupt",
+};
const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);

#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 7654278..58c9f4e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -95,9 +95,6 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
*/
static const char * const common_kprobes_blacksyms[] = {
"preempt_schedule",
- "native_get_debugreg",
- "irq_entries_start",
- "common_interrupt",
"mcount", /* mcount can be called from everywhere */
};
static const size_t common_kprobes_blacksyms_size =
--
1.8.1.5

2013-04-04 12:53:07

by Oskar Andero

[permalink] [raw]
Subject: [PATCH v2 2/4] kprobes: split blacklist into common and arch

Some blackpoints are only valid for specific architectures. To let each
architecture specify its own blackpoints the list has been split in two
lists: common and arch. The common list is kept in kernel/kprobes.c and
the arch list is kept in the arch/ directory.

Cc: Masami Hiramatsu <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: [email protected]
Signed-off-by: Oskar Andero <[email protected]>
---
arch/arc/kernel/kprobes.c | 3 ++
arch/arm/kernel/kprobes.c | 2 +
arch/avr32/kernel/kprobes.c | 3 ++
arch/ia64/kernel/kprobes.c | 3 ++
arch/mips/kernel/kprobes.c | 3 ++
arch/mn10300/kernel/kprobes.c | 2 +
arch/powerpc/kernel/kprobes.c | 3 ++
arch/s390/kernel/kprobes.c | 3 ++
arch/sh/kernel/kprobes.c | 3 ++
arch/sparc/kernel/kprobes.c | 3 ++
arch/x86/kernel/kprobes/core.c | 3 ++
kernel/kprobes.c | 85 +++++++++++++++++++++++++++---------------
12 files changed, 86 insertions(+), 30 deletions(-)

diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index 3bfeacb..894eee6 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -24,6 +24,9 @@
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
+
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
/* Attempt to probe at unaligned address */
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 170e9f3..772d9ec 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -46,6 +46,8 @@
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);

int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
diff --git a/arch/avr32/kernel/kprobes.c b/arch/avr32/kernel/kprobes.c
index f820e9f..3b02c1e 100644
--- a/arch/avr32/kernel/kprobes.c
+++ b/arch/avr32/kernel/kprobes.c
@@ -24,6 +24,9 @@ static struct pt_regs jprobe_saved_regs;

struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
+
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index f8280a7..239f2fd 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -42,6 +42,9 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
+
enum instruction_type {A, I, M, F, B, L, X, u};
static enum instruction_type bundle_encoding[32][3] = {
{ M, I, I }, /* 00 */
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 12bc4eb..de6a1aa 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -53,6 +53,9 @@ static const union mips_instruction breakpoint2_insn = {
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
+
static int __kprobes insn_has_delayslot(union mips_instruction insn)
{
switch (insn.i_format.opcode) {
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c
index 0311a7f..ed57094 100644
--- a/arch/mn10300/kernel/kprobes.c
+++ b/arch/mn10300/kernel/kprobes.c
@@ -41,6 +41,8 @@ static unsigned long cur_kprobe_bp_addr;

DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);

/* singlestep flag bits */
#define SINGLESTEP_BRANCH 1
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 11f5b03..b18ba45 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -47,6 +47,9 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
+
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 3388b2b..2077bb0 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -37,6 +37,9 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

struct kretprobe_blackpoint kretprobe_blacklist[] = { };

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
+
static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
{
switch (insn[0] >> 8) {
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 42b46e6..8acfb02 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -24,6 +24,9 @@ static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
+
#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
#define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000)
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index e722121..627e35c 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -45,6 +45,9 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
+
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
if ((unsigned long) p->addr & 0x3UL)
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7bfe318..4aa71a5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -65,6 +65,9 @@ void jprobe_return_end(void);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);

+const char * const arch_kprobes_blacksyms[] = {};
+const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
+
#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))

#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 0a270e5..7654278 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -68,7 +68,6 @@
#endif

static int kprobes_initialized;
-static int kprobe_blacklist_initialized;
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];

@@ -94,31 +93,60 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
*
* For such cases, we now have a blacklist
*/
-static struct kprobe_blackpoint kprobe_blacklist[] = {
- {"preempt_schedule",},
- {"native_get_debugreg",},
- {"irq_entries_start",},
- {"common_interrupt",},
- {"mcount",}, /* mcount can be called from everywhere */
- {NULL} /* Terminator */
+static const char * const common_kprobes_blacksyms[] = {
+ "preempt_schedule",
+ "native_get_debugreg",
+ "irq_entries_start",
+ "common_interrupt",
+ "mcount", /* mcount can be called from everywhere */
};
+static const size_t common_kprobes_blacksyms_size =
+ ARRAY_SIZE(common_kprobes_blacksyms);
+
+extern const char * const arch_kprobes_blacksyms[];
+extern const size_t arch_kprobes_blacksyms_size;
+
+static struct kprobe_blackpoint *kprobe_blacklist;
+static size_t kprobe_blacklist_size;
+
+static void init_kprobe_blacklist_entry(struct kprobe_blackpoint *kb,
+ const char * const name)
+{
+ const char *symbol_name;
+ char *modname, namebuf[128];
+ void *addr;
+ unsigned long offset = 0, size = 0;
+
+ kb->name = name;
+ kprobe_lookup_name(kb->name, addr);
+ if (!addr)
+ return;
+
+ kb->start_addr = (unsigned long)addr;
+ symbol_name = kallsyms_lookup(kb->start_addr,
+ &size, &offset, &modname, namebuf);
+ if (!symbol_name)
+ kb->range = 0;
+ else
+ kb->range = size;
+}

/* it can take some time ( > 100ms ) to initialise the
* blacklist so we delay this until we actually need it
*/
static void init_kprobe_blacklist(void)
{
- int i;
- unsigned long offset = 0, size = 0;
- char *modname, namebuf[128];
- const char *symbol_name;
- void *addr;
- struct kprobe_blackpoint *kb;
+ int i, j = 0;

mutex_lock(&kprobe_mutex);
- if (kprobe_blacklist_initialized)
+ if (kprobe_blacklist)
goto out;

+ kprobe_blacklist_size = common_kprobes_blacksyms_size +
+ arch_kprobes_blacksyms_size;
+ kprobe_blacklist = kzalloc(sizeof(*kprobe_blacklist) *
+ kprobe_blacklist_size, GFP_KERNEL);
+
/*
* Lookup and populate the kprobe_blacklist.
*
@@ -127,18 +155,14 @@ static void init_kprobe_blacklist(void)
* since a kprobe need not necessarily be at the beginning
* of a function.
*/
- for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
- kprobe_lookup_name(kb->name, addr);
- if (!addr)
- continue;
+ for (i = 0; i < common_kprobes_blacksyms_size; i++, j++) {
+ init_kprobe_blacklist_entry(&kprobe_blacklist[j],
+ common_kprobes_blacksyms[i]);
+ }

- kb->start_addr = (unsigned long)addr;
- symbol_name = kallsyms_lookup(kb->start_addr,
- &size, &offset, &modname, namebuf);
- if (!symbol_name)
- kb->range = 0;
- else
- kb->range = size;
+ for (i = 0; i < arch_kprobes_blacksyms_size; i++, j++) {
+ init_kprobe_blacklist_entry(&kprobe_blacklist[j],
+ arch_kprobes_blacksyms[i]);
}

if (kretprobe_blacklist_size) {
@@ -151,7 +175,6 @@ static void init_kprobe_blacklist(void)
kretprobe_blacklist[i].name);
}
}
- kprobe_blacklist_initialized = 1;

out:
mutex_unlock(&kprobe_mutex);
@@ -1382,18 +1405,20 @@ out:
static int __kprobes in_kprobes_functions(unsigned long addr)
{
struct kprobe_blackpoint *kb;
+ int i;

if (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end)
return -EINVAL;

- if (unlikely(!kprobe_blacklist_initialized))
+ if (unlikely(!kprobe_blacklist))
init_kprobe_blacklist();
/*
* If there exists a kprobe_blacklist, verify and
* fail any probe registration in the prohibited area
*/
- for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
+ for (i = 0; i < kprobe_blacklist_size; i++) {
+ kb = &kprobe_blacklist[i];
if (kb->start_addr) {
if (addr >= kb->start_addr &&
addr < (kb->start_addr + kb->range))
@@ -1874,7 +1899,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
void *addr;

if (kretprobe_blacklist_size) {
- if (unlikely(!kprobe_blacklist_initialized))
+ if (unlikely(!kprobe_blacklist))
init_kprobe_blacklist();
addr = kprobe_addr(&rp->kp);
if (IS_ERR(addr))
--
1.8.1.5

2013-04-05 00:56:10

by Joonsoo Kim

[permalink] [raw]
Subject: Re: [PATCH v2 1/4] kprobes: delay blacklist symbol lookup until we actually need it

Hello, Oskar.

On Thu, Apr 04, 2013 at 02:51:26PM +0200, Oskar Andero wrote:
> From: Toby Collett <[email protected]>
>
> The symbol lookup can take a long time and kprobes is
> initialised very early in boot, so delay symbol lookup
> until the blacklist is first used.
>
> Cc: Masami Hiramatsu <[email protected]>
> Cc: David S. Miller <[email protected]>
> Reviewed-by: Radovan Lekanovic <[email protected]>
> Signed-off-by: Toby Collett <[email protected]>
> Signed-off-by: Oskar Andero <[email protected]>
> ---
> kernel/kprobes.c | 98 ++++++++++++++++++++++++++++++++++----------------------
> 1 file changed, 60 insertions(+), 38 deletions(-)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index e35be53..0a270e5 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -68,6 +68,7 @@
> #endif
>
> static int kprobes_initialized;
> +static int kprobe_blacklist_initialized;
> static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
> static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
>
> @@ -102,6 +103,60 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
> {NULL} /* Terminator */
> };
>
> +/* it can take some time ( > 100ms ) to initialise the
> + * blacklist so we delay this until we actually need it
> + */
> +static void init_kprobe_blacklist(void)
> +{
> + int i;
> + unsigned long offset = 0, size = 0;
> + char *modname, namebuf[128];
> + const char *symbol_name;
> + void *addr;
> + struct kprobe_blackpoint *kb;
> +
> + mutex_lock(&kprobe_mutex);
> + if (kprobe_blacklist_initialized)
> + goto out;
> +
> + /*
> + * Lookup and populate the kprobe_blacklist.
> + *
> + * Unlike the kretprobe blacklist, we'll need to determine
> + * the range of addresses that belong to the said functions,
> + * since a kprobe need not necessarily be at the beginning
> + * of a function.
> + */
> + for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
> + kprobe_lookup_name(kb->name, addr);
> + if (!addr)
> + continue;
> +
> + kb->start_addr = (unsigned long)addr;
> + symbol_name = kallsyms_lookup(kb->start_addr,
> + &size, &offset, &modname, namebuf);
> + if (!symbol_name)
> + kb->range = 0;
> + else
> + kb->range = size;
> + }
> +
> + if (kretprobe_blacklist_size) {
> + /* lookup the function address from its name */
> + for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
> + kprobe_lookup_name(kretprobe_blacklist[i].name,
> + kretprobe_blacklist[i].addr);
> + if (!kretprobe_blacklist[i].addr)
> + printk("kretprobe: lookup failed: %s\n",
> + kretprobe_blacklist[i].name);
> + }
> + }
> + kprobe_blacklist_initialized = 1;

You need smp_wmb() before assigning 'kprobe_blacklist_initialized = 1'.
This guarantee that who see kprobe_blacklist_initialized = 1 will get
updated data of kprobe_blacklist.

Please refer my previous patch once more :)

And How about define kprobe_blacklist_initialized as boolean?

Thanks.

> +
> +out:
> + mutex_unlock(&kprobe_mutex);
> +}
> +
> #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
> /*
> * kprobe->ainsn.insn points to the copy of the instruction to be
> @@ -1331,6 +1386,9 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
> if (addr >= (unsigned long)__kprobes_text_start &&
> addr < (unsigned long)__kprobes_text_end)
> return -EINVAL;
> +
> + if (unlikely(!kprobe_blacklist_initialized))
> + init_kprobe_blacklist();
> /*
> * If there exists a kprobe_blacklist, verify and
> * fail any probe registration in the prohibited area
> @@ -1816,6 +1874,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
> void *addr;
>
> if (kretprobe_blacklist_size) {
> + if (unlikely(!kprobe_blacklist_initialized))
> + init_kprobe_blacklist();
> addr = kprobe_addr(&rp->kp);
> if (IS_ERR(addr))
> return PTR_ERR(addr);
> @@ -2065,11 +2125,6 @@ static struct notifier_block kprobe_module_nb = {
> static int __init init_kprobes(void)
> {
> int i, err = 0;
> - unsigned long offset = 0, size = 0;
> - char *modname, namebuf[128];
> - const char *symbol_name;
> - void *addr;
> - struct kprobe_blackpoint *kb;
>
> /* FIXME allocate the probe table, currently defined statically */
> /* initialize all list heads */
> @@ -2079,39 +2134,6 @@ static int __init init_kprobes(void)
> raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
> }
>
> - /*
> - * Lookup and populate the kprobe_blacklist.
> - *
> - * Unlike the kretprobe blacklist, we'll need to determine
> - * the range of addresses that belong to the said functions,
> - * since a kprobe need not necessarily be at the beginning
> - * of a function.
> - */
> - for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
> - kprobe_lookup_name(kb->name, addr);
> - if (!addr)
> - continue;
> -
> - kb->start_addr = (unsigned long)addr;
> - symbol_name = kallsyms_lookup(kb->start_addr,
> - &size, &offset, &modname, namebuf);
> - if (!symbol_name)
> - kb->range = 0;
> - else
> - kb->range = size;
> - }
> -
> - if (kretprobe_blacklist_size) {
> - /* lookup the function address from its name */
> - for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
> - kprobe_lookup_name(kretprobe_blacklist[i].name,
> - kretprobe_blacklist[i].addr);
> - if (!kretprobe_blacklist[i].addr)
> - printk("kretprobe: lookup failed: %s\n",
> - kretprobe_blacklist[i].name);
> - }
> - }
> -
> #if defined(CONFIG_OPTPROBES)
> #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
> /* Init kprobe_optinsn_slots */
> --
> 1.8.1.5
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/

Subject: Re: Re: [PATCH v2 1/4] kprobes: delay blacklist symbol lookup until we actually need it

(2013/04/05 9:56), Joonsoo Kim wrote:
> Hello, Oskar.
>
> On Thu, Apr 04, 2013 at 02:51:26PM +0200, Oskar Andero wrote:
>> From: Toby Collett <[email protected]>
>>
>> The symbol lookup can take a long time and kprobes is
>> initialised very early in boot, so delay symbol lookup
>> until the blacklist is first used.
>>
>> Cc: Masami Hiramatsu <[email protected]>
>> Cc: David S. Miller <[email protected]>
>> Reviewed-by: Radovan Lekanovic <[email protected]>
>> Signed-off-by: Toby Collett <[email protected]>
>> Signed-off-by: Oskar Andero <[email protected]>
>> ---
>> kernel/kprobes.c | 98 ++++++++++++++++++++++++++++++++++----------------------
>> 1 file changed, 60 insertions(+), 38 deletions(-)
>>
>> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
>> index e35be53..0a270e5 100644
>> --- a/kernel/kprobes.c
>> +++ b/kernel/kprobes.c
>> @@ -68,6 +68,7 @@
>> #endif
>>
>> static int kprobes_initialized;
>> +static int kprobe_blacklist_initialized;
>> static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
>> static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
>>
>> @@ -102,6 +103,60 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
>> {NULL} /* Terminator */
>> };
>>
>> +/* it can take some time ( > 100ms ) to initialise the
>> + * blacklist so we delay this until we actually need it
>> + */
>> +static void init_kprobe_blacklist(void)
>> +{
>> + int i;
>> + unsigned long offset = 0, size = 0;
>> + char *modname, namebuf[128];
>> + const char *symbol_name;
>> + void *addr;
>> + struct kprobe_blackpoint *kb;
>> +
>> + mutex_lock(&kprobe_mutex);
>> + if (kprobe_blacklist_initialized)
>> + goto out;
>> +
>> + /*
>> + * Lookup and populate the kprobe_blacklist.
>> + *
>> + * Unlike the kretprobe blacklist, we'll need to determine
>> + * the range of addresses that belong to the said functions,
>> + * since a kprobe need not necessarily be at the beginning
>> + * of a function.
>> + */
>> + for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
>> + kprobe_lookup_name(kb->name, addr);
>> + if (!addr)
>> + continue;
>> +
>> + kb->start_addr = (unsigned long)addr;
>> + symbol_name = kallsyms_lookup(kb->start_addr,
>> + &size, &offset, &modname, namebuf);
>> + if (!symbol_name)
>> + kb->range = 0;
>> + else
>> + kb->range = size;
>> + }
>> +
>> + if (kretprobe_blacklist_size) {
>> + /* lookup the function address from its name */
>> + for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
>> + kprobe_lookup_name(kretprobe_blacklist[i].name,
>> + kretprobe_blacklist[i].addr);
>> + if (!kretprobe_blacklist[i].addr)
>> + printk("kretprobe: lookup failed: %s\n",
>> + kretprobe_blacklist[i].name);
>> + }
>> + }
>> + kprobe_blacklist_initialized = 1;
>
> You need smp_wmb() before assigning 'kprobe_blacklist_initialized = 1'.
> This guarantee that who see kprobe_blacklist_initialized = 1 will get
> updated data of kprobe_blacklist.

Right, to ensure blacklist is updated, memory barrier is required.

> Please refer my previous patch once more :)
>
> And How about define kprobe_blacklist_initialized as boolean?

Good idea :)

Thank you,

--
Masami HIRAMATSU
IT Management Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

Subject: Re: [PATCH v2 2/4] kprobes: split blacklist into common and arch

(2013/04/04 21:51), Oskar Andero wrote:
> Some blackpoints are only valid for specific architectures. To let each
> architecture specify its own blackpoints the list has been split in two
> lists: common and arch. The common list is kept in kernel/kprobes.c and
> the arch list is kept in the arch/ directory.
>

Here I missed one racing issue.

> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index 0a270e5..7654278 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
[...]
>
> /* it can take some time ( > 100ms ) to initialise the
> * blacklist so we delay this until we actually need it
> */
> static void init_kprobe_blacklist(void)
> {
> - int i;
> - unsigned long offset = 0, size = 0;
> - char *modname, namebuf[128];
> - const char *symbol_name;
> - void *addr;
> - struct kprobe_blackpoint *kb;
> + int i, j = 0;
>
> mutex_lock(&kprobe_mutex);
> - if (kprobe_blacklist_initialized)
> + if (kprobe_blacklist)
> goto out;
>
> + kprobe_blacklist_size = common_kprobes_blacksyms_size +
> + arch_kprobes_blacksyms_size;
> + kprobe_blacklist = kzalloc(sizeof(*kprobe_blacklist) *
> + kprobe_blacklist_size, GFP_KERNEL);

If you'd like to use kprobe_blacklist itself as an initialized flag,
you must prepare the "blacklist" local pointer to allocate and
initialize entries.

> @@ -151,7 +175,6 @@ static void init_kprobe_blacklist(void)
> kretprobe_blacklist[i].name);
> }
> }
> - kprobe_blacklist_initialized = 1;

And after initialized, assign blacklist to kprobe_blacklist.

Without that, other thread may refer the uninitialized (but allocated)
black list.

Thank you,

--
Masami HIRAMATSU
IT Management Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

Subject: Re: [PATCH v2 3/4] kprobes: move x86-specific blacklist symbols to arch directory

(2013/04/04 21:51), Oskar Andero wrote:
> From: Björn Davidsson <[email protected]>
>
> The common kprobes blacklist contains x86-specific symbols.
> Looking for these in kallsyms takes unnecessary time during startup
> on non-X86 platform. The symbols where moved to
> arch/x86/kernel/kprobes/core.c.

OK, it looks good for me. :)

Acked-by: Masami Hiramatsu <[email protected]>

>
> Cc: Masami Hiramatsu <[email protected]>
> Cc: David S. Miller <[email protected]>
> Cc: [email protected]
> Reviewed-by: Radovan Lekanovic <[email protected]>
> Signed-off-by: Björn Davidsson <[email protected]>
> Signed-off-by: Oskar Andero <[email protected]>
> ---
> arch/x86/kernel/kprobes/core.c | 6 +++++-
> kernel/kprobes.c | 3 ---
> 2 files changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
> index 4aa71a5..41ce6c1 100644
> --- a/arch/x86/kernel/kprobes/core.c
> +++ b/arch/x86/kernel/kprobes/core.c
> @@ -65,7 +65,11 @@ void jprobe_return_end(void);
> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
> DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>
> -const char * const arch_kprobes_blacksyms[] = {};
> +const char * const arch_kprobes_blacksyms[] = {
> + "native_get_debugreg",
> + "irq_entries_start",
> + "common_interrupt",
> +};
> const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
>
> #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index 7654278..58c9f4e 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -95,9 +95,6 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
> */
> static const char * const common_kprobes_blacksyms[] = {
> "preempt_schedule",
> - "native_get_debugreg",
> - "irq_entries_start",
> - "common_interrupt",
> "mcount", /* mcount can be called from everywhere */
> };
> static const size_t common_kprobes_blacksyms_size =
>


--
Masami HIRAMATSU
IT Management Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

2013-04-05 08:05:11

by Vineet Gupta

[permalink] [raw]
Subject: Re: [PATCH v2 2/4] kprobes: split blacklist into common and arch

On 04/04/2013 06:21 PM, Oskar Andero wrote:
> Some blackpoints are only valid for specific architectures. To let each
> architecture specify its own blackpoints the list has been split in two
> lists: common and arch. The common list is kept in kernel/kprobes.c and
> the arch list is kept in the arch/ directory.
>
> Cc: Masami Hiramatsu <[email protected]>
> Cc: David S. Miller <[email protected]>
> Cc: [email protected]
> Signed-off-by: Oskar Andero <[email protected]>
> ---
> arch/arc/kernel/kprobes.c | 3 ++
> arch/arm/kernel/kprobes.c | 2 +
> arch/avr32/kernel/kprobes.c | 3 ++
> arch/ia64/kernel/kprobes.c | 3 ++
> arch/mips/kernel/kprobes.c | 3 ++
> arch/mn10300/kernel/kprobes.c | 2 +
> arch/powerpc/kernel/kprobes.c | 3 ++
> arch/s390/kernel/kprobes.c | 3 ++
> arch/sh/kernel/kprobes.c | 3 ++
> arch/sparc/kernel/kprobes.c | 3 ++
> arch/x86/kernel/kprobes/core.c | 3 ++
> kernel/kprobes.c | 85 +++++++++++++++++++++++++++---------------
> 12 files changed, 86 insertions(+), 30 deletions(-)
>
> diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
> index 3bfeacb..894eee6 100644
> --- a/arch/arc/kernel/kprobes.c
> +++ b/arch/arc/kernel/kprobes.c
> @@ -24,6 +24,9 @@
> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
> DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
> +
> int __kprobes arch_prepare_kprobe(struct kprobe *p)
> {
> /* Attempt to probe at unaligned address */
> diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
> index 170e9f3..772d9ec 100644
> --- a/arch/arm/kernel/kprobes.c
> +++ b/arch/arm/kernel/kprobes.c
> @@ -46,6 +46,8 @@
> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
> DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
>
> int __kprobes arch_prepare_kprobe(struct kprobe *p)
> {
> diff --git a/arch/avr32/kernel/kprobes.c b/arch/avr32/kernel/kprobes.c
> index f820e9f..3b02c1e 100644
> --- a/arch/avr32/kernel/kprobes.c
> +++ b/arch/avr32/kernel/kprobes.c
> @@ -24,6 +24,9 @@ static struct pt_regs jprobe_saved_regs;
>
> struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
> +
> int __kprobes arch_prepare_kprobe(struct kprobe *p)
> {
> int ret = 0;
> diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
> index f8280a7..239f2fd 100644
> --- a/arch/ia64/kernel/kprobes.c
> +++ b/arch/ia64/kernel/kprobes.c
> @@ -42,6 +42,9 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>
> struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
> +
> enum instruction_type {A, I, M, F, B, L, X, u};
> static enum instruction_type bundle_encoding[32][3] = {
> { M, I, I }, /* 00 */
> diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
> index 12bc4eb..de6a1aa 100644
> --- a/arch/mips/kernel/kprobes.c
> +++ b/arch/mips/kernel/kprobes.c
> @@ -53,6 +53,9 @@ static const union mips_instruction breakpoint2_insn = {
> DEFINE_PER_CPU(struct kprobe *, current_kprobe);
> DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
> +
> static int __kprobes insn_has_delayslot(union mips_instruction insn)
> {
> switch (insn.i_format.opcode) {
> diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c
> index 0311a7f..ed57094 100644
> --- a/arch/mn10300/kernel/kprobes.c
> +++ b/arch/mn10300/kernel/kprobes.c
> @@ -41,6 +41,8 @@ static unsigned long cur_kprobe_bp_addr;
>
> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
>
> /* singlestep flag bits */
> #define SINGLESTEP_BRANCH 1
> diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
> index 11f5b03..b18ba45 100644
> --- a/arch/powerpc/kernel/kprobes.c
> +++ b/arch/powerpc/kernel/kprobes.c
> @@ -47,6 +47,9 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>
> struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
> +
> int __kprobes arch_prepare_kprobe(struct kprobe *p)
> {
> int ret = 0;
> diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
> index 3388b2b..2077bb0 100644
> --- a/arch/s390/kernel/kprobes.c
> +++ b/arch/s390/kernel/kprobes.c
> @@ -37,6 +37,9 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>
> struct kretprobe_blackpoint kretprobe_blacklist[] = { };
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
> +
> static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
> {
> switch (insn[0] >> 8) {
> diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
> index 42b46e6..8acfb02 100644
> --- a/arch/sh/kernel/kprobes.c
> +++ b/arch/sh/kernel/kprobes.c
> @@ -24,6 +24,9 @@ static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
> static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
> static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
> +
> #define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
> #define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
> #define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000)
> diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
> index e722121..627e35c 100644
> --- a/arch/sparc/kernel/kprobes.c
> +++ b/arch/sparc/kernel/kprobes.c
> @@ -45,6 +45,9 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>
> struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
> +
> int __kprobes arch_prepare_kprobe(struct kprobe *p)
> {
> if ((unsigned long) p->addr & 0x3UL)
> diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
> index 7bfe318..4aa71a5 100644
> --- a/arch/x86/kernel/kprobes/core.c
> +++ b/arch/x86/kernel/kprobes/core.c
> @@ -65,6 +65,9 @@ void jprobe_return_end(void);
> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
> DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
>
> +const char * const arch_kprobes_blacksyms[] = {};
> +const size_t arch_kprobes_blacksyms_size = ARRAY_SIZE(arch_kprobes_blacksyms);
> +
> #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
>
> #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index 0a270e5..7654278 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -68,7 +68,6 @@
> #endif
>
> static int kprobes_initialized;
> -static int kprobe_blacklist_initialized;
> static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
> static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
>
> @@ -94,31 +93,60 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
> *
> * For such cases, we now have a blacklist
> */
> -static struct kprobe_blackpoint kprobe_blacklist[] = {
> - {"preempt_schedule",},
> - {"native_get_debugreg",},
> - {"irq_entries_start",},
> - {"common_interrupt",},
> - {"mcount",}, /* mcount can be called from everywhere */
> - {NULL} /* Terminator */
> +static const char * const common_kprobes_blacksyms[] = {
> + "preempt_schedule",
> + "native_get_debugreg",
> + "irq_entries_start",
> + "common_interrupt",
> + "mcount", /* mcount can be called from everywhere */
> };
> +static const size_t common_kprobes_blacksyms_size =
> + ARRAY_SIZE(common_kprobes_blacksyms);
> +
> +extern const char * const arch_kprobes_blacksyms[];
> +extern const size_t arch_kprobes_blacksyms_size;

Instead of this, if you define arch_kprobes_blacksyms{,_size} as weak symbols
here, you don't need to patch all the arches for dummy empty entries.
Any arch which cares for a blacklist can easily override the weak def by defining
it's own copy. A lot of core kernel code - where arch variant is NOT typically use
dis written this way !

Thx,
-Vineet

2013-04-05 10:00:55

by Oskar Andero

[permalink] [raw]
Subject: Re: Re: [PATCH v2 1/4] kprobes: delay blacklist symbol lookup until we actually need it

Thanks for your input guys!

On 04:16 Fri 05 Apr , Masami Hiramatsu wrote:
> (2013/04/05 9:56), Joonsoo Kim wrote:
> > Hello, Oskar.
> >
> > On Thu, Apr 04, 2013 at 02:51:26PM +0200, Oskar Andero wrote:
> >> From: Toby Collett <[email protected]>
> >>
> >> The symbol lookup can take a long time and kprobes is
> >> initialised very early in boot, so delay symbol lookup
> >> until the blacklist is first used.
> >>
> >> Cc: Masami Hiramatsu <[email protected]>
> >> Cc: David S. Miller <[email protected]>
> >> Reviewed-by: Radovan Lekanovic <[email protected]>
> >> Signed-off-by: Toby Collett <[email protected]>
> >> Signed-off-by: Oskar Andero <[email protected]>
> >> ---
> >> kernel/kprobes.c | 98 ++++++++++++++++++++++++++++++++++----------------------
> >> 1 file changed, 60 insertions(+), 38 deletions(-)
> >>
> >> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> >> index e35be53..0a270e5 100644
> >> --- a/kernel/kprobes.c
> >> +++ b/kernel/kprobes.c
> >> @@ -68,6 +68,7 @@
> >> #endif
> >>
> >> static int kprobes_initialized;
> >> +static int kprobe_blacklist_initialized;
> >> static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
> >> static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
> >>
> >> @@ -102,6 +103,60 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
> >> {NULL} /* Terminator */
> >> };
> >>
> >> +/* it can take some time ( > 100ms ) to initialise the
> >> + * blacklist so we delay this until we actually need it
> >> + */
> >> +static void init_kprobe_blacklist(void)
> >> +{
> >> + int i;
> >> + unsigned long offset = 0, size = 0;
> >> + char *modname, namebuf[128];
> >> + const char *symbol_name;
> >> + void *addr;
> >> + struct kprobe_blackpoint *kb;
> >> +
> >> + mutex_lock(&kprobe_mutex);
> >> + if (kprobe_blacklist_initialized)
> >> + goto out;
> >> +
> >> + /*
> >> + * Lookup and populate the kprobe_blacklist.
> >> + *
> >> + * Unlike the kretprobe blacklist, we'll need to determine
> >> + * the range of addresses that belong to the said functions,
> >> + * since a kprobe need not necessarily be at the beginning
> >> + * of a function.
> >> + */
> >> + for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
> >> + kprobe_lookup_name(kb->name, addr);
> >> + if (!addr)
> >> + continue;
> >> +
> >> + kb->start_addr = (unsigned long)addr;
> >> + symbol_name = kallsyms_lookup(kb->start_addr,
> >> + &size, &offset, &modname, namebuf);
> >> + if (!symbol_name)
> >> + kb->range = 0;
> >> + else
> >> + kb->range = size;
> >> + }
> >> +
> >> + if (kretprobe_blacklist_size) {
> >> + /* lookup the function address from its name */
> >> + for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
> >> + kprobe_lookup_name(kretprobe_blacklist[i].name,
> >> + kretprobe_blacklist[i].addr);
> >> + if (!kretprobe_blacklist[i].addr)
> >> + printk("kretprobe: lookup failed: %s\n",
> >> + kretprobe_blacklist[i].name);
> >> + }
> >> + }
> >> + kprobe_blacklist_initialized = 1;
> >
> > You need smp_wmb() before assigning 'kprobe_blacklist_initialized = 1'.
> > This guarantee that who see kprobe_blacklist_initialized = 1 will get
> > updated data of kprobe_blacklist.
>
> Right, to ensure blacklist is updated, memory barrier is required.

Agreed.

> > Please refer my previous patch once more :)
> >
> > And How about define kprobe_blacklist_initialized as boolean?
>
> Good idea :)
>

I'll fix it for v3.

-Oskar
>