Hi,
These are trivial bugfix and cleanup patches for kprobes which
I've found in kprobes-tracer and kprobes-jump optimization
developement.
Please apply it.
Thank you,
---
Masami Hiramatsu (3):
kprobes: cleanup: use list instead of hlist for insn_pages
kprobes: no need to unlock kprobe_insn_mutex
kprobes: fix kprobe selftest configuration dependency
kernel/kprobes.c | 36 ++++++++++++------------------------
lib/Kconfig.debug | 1 +
2 files changed, 13 insertions(+), 24 deletions(-)
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America), Inc.
Software Solutions Division
e-mail: [email protected]
Remove needless kprobe_insn_mutex unlocking during safety check in garbage
collection, because if someone releases a dirty slot during safety check
(which ensures other cpus doesn't execute all dirty slots), the safety check
must be fail. So, we need to hold the mutex while checking safety.
Signed-off-by: Masami Hiramatsu <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Jim Keniston <[email protected]>
---
kernel/kprobes.c | 6 +-----
1 files changed, 1 insertions(+), 5 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c0fa54b..16b5739 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -237,13 +237,9 @@ static int __kprobes collect_garbage_slots(void)
{
struct kprobe_insn_page *kip;
struct hlist_node *pos, *next;
- int safety;
/* Ensure no-one is preepmted on the garbages */
- mutex_unlock(&kprobe_insn_mutex);
- safety = check_safety();
- mutex_lock(&kprobe_insn_mutex);
- if (safety != 0)
+ if (check_safety())
return -EAGAIN;
hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America), Inc.
Software Solutions Division
e-mail: [email protected]
Use struct list instead of struct hlist for managing insn_pages, because
insn_pages doesn't use hash table.
Signed-off-by: Masami Hiramatsu <[email protected]>
Acked-by: Ananth N Mavinakayanahalli <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Jim Keniston <[email protected]>
---
kernel/kprobes.c | 30 +++++++++++-------------------
1 files changed, 11 insertions(+), 19 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 16b5739..6fe9dc6 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -103,7 +103,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
struct kprobe_insn_page {
- struct hlist_node hlist;
+ struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
char slot_used[INSNS_PER_PAGE];
int nused;
@@ -117,7 +117,7 @@ enum kprobe_slot_state {
};
static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
-static struct hlist_head kprobe_insn_pages;
+static LIST_HEAD(kprobe_insn_pages);
static int kprobe_garbage_slots;
static int collect_garbage_slots(void);
@@ -152,10 +152,9 @@ loop_end:
static kprobe_opcode_t __kprobes *__get_insn_slot(void)
{
struct kprobe_insn_page *kip;
- struct hlist_node *pos;
retry:
- hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+ list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->nused < INSNS_PER_PAGE) {
int i;
for (i = 0; i < INSNS_PER_PAGE; i++) {
@@ -189,8 +188,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
kfree(kip);
return NULL;
}
- INIT_HLIST_NODE(&kip->hlist);
- hlist_add_head(&kip->hlist, &kprobe_insn_pages);
+ INIT_LIST_HEAD(&kip->list);
+ list_add(&kip->list, &kprobe_insn_pages);
memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
@@ -219,12 +218,8 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
* so as not to have to set it up again the
* next time somebody inserts a probe.
*/
- hlist_del(&kip->hlist);
- if (hlist_empty(&kprobe_insn_pages)) {
- INIT_HLIST_NODE(&kip->hlist);
- hlist_add_head(&kip->hlist,
- &kprobe_insn_pages);
- } else {
+ if (!list_is_singular(&kprobe_insn_pages)) {
+ list_del(&kip->list);
module_free(NULL, kip->insns);
kfree(kip);
}
@@ -235,14 +230,13 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
static int __kprobes collect_garbage_slots(void)
{
- struct kprobe_insn_page *kip;
- struct hlist_node *pos, *next;
+ struct kprobe_insn_page *kip, *next;
/* Ensure no-one is preepmted on the garbages */
if (check_safety())
return -EAGAIN;
- hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
+ list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
int i;
if (kip->ngarbage == 0)
continue;
@@ -260,19 +254,17 @@ static int __kprobes collect_garbage_slots(void)
void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
{
struct kprobe_insn_page *kip;
- struct hlist_node *pos;
mutex_lock(&kprobe_insn_mutex);
- hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+ list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->insns <= slot &&
slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
int i = (slot - kip->insns) / MAX_INSN_SIZE;
if (dirty) {
kip->slot_used[i] = SLOT_DIRTY;
kip->ngarbage++;
- } else {
+ } else
collect_one_slot(kip, i);
- }
break;
}
}
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America), Inc.
Software Solutions Division
e-mail: [email protected]
Select CONFIG_KALLSYMS_ALL when CONFIG_KPROBES_SANITY_TEST=y.
Kprobe selftest always fail without CONFIG_KALLSYMS_ALL=y, because
kallsyms doesn't list up the target functions which are probed in this
test.
Signed-off-by: Masami Hiramatsu <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Jim Keniston <[email protected]>
---
lib/Kconfig.debug | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 80d6db7..741a860 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -740,6 +740,7 @@ config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
depends on KPROBES
+ select KALLSYMS_ALL
default n
help
This option provides for testing basic kprobes functionality on
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America), Inc.
Software Solutions Division
e-mail: [email protected]
* Masami Hiramatsu <[email protected]> wrote:
> Use struct list instead of struct hlist for managing insn_pages,
> because insn_pages doesn't use hash table.
> struct kprobe_insn_page {
> - struct hlist_node hlist;
> + struct list_head list;
Hm, you know that this increases the size of kprobe_insn_page by 4/8
bytes, right?
hlists are not just used for hashes - but also when we want a more
compact / smaller list head.
How many kprobe_insn_page's can be allocated in the system,
maximally?
Ingo
Commit-ID: 130c5b2a0f78ba37f60f58010960fca29beaaf2d
Gitweb: http://git.kernel.org/tip/130c5b2a0f78ba37f60f58010960fca29beaaf2d
Author: Masami Hiramatsu <[email protected]>
AuthorDate: Tue, 30 Jun 2009 17:08:03 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Tue, 30 Jun 2009 23:21:55 +0200
kprobes: Fix kprobe selftest configuration dependency
Select CONFIG_KALLSYMS_ALL when CONFIG_KPROBES_SANITY_TEST=y.
Kprobe selftest always fail without CONFIG_KALLSYMS_ALL=y,
because kallsyms doesn't list up the target functions which are
probed in this test.
Signed-off-by: Masami Hiramatsu <[email protected]>
Cc: systemtap<[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
Cc: Jim Keniston <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
lib/Kconfig.debug | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4c32b1a..876407b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -728,6 +728,7 @@ config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
depends on KPROBES
+ select KALLSYMS_ALL
default n
help
This option provides for testing basic kprobes functionality on
Commit-ID: 4ca2efad573aef3cad6157bb7c0036435f87a329
Gitweb: http://git.kernel.org/tip/4ca2efad573aef3cad6157bb7c0036435f87a329
Author: Masami Hiramatsu <[email protected]>
AuthorDate: Tue, 30 Jun 2009 17:08:09 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Tue, 30 Jun 2009 23:21:58 +0200
kprobes: No need to unlock kprobe_insn_mutex
Remove needless kprobe_insn_mutex unlocking during safety check
in garbage collection, because if someone releases a dirty slot
during safety check (which ensures other cpus doesn't execute
all dirty slots), the safety check must be fail. So, we need to
hold the mutex while checking safety.
Signed-off-by: Masami Hiramatsu <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
Cc: Jim Keniston <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/kprobes.c | 6 +-----
1 files changed, 1 insertions(+), 5 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c0fa54b..16b5739 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -237,13 +237,9 @@ static int __kprobes collect_garbage_slots(void)
{
struct kprobe_insn_page *kip;
struct hlist_node *pos, *next;
- int safety;
/* Ensure no-one is preepmted on the garbages */
- mutex_unlock(&kprobe_insn_mutex);
- safety = check_safety();
- mutex_lock(&kprobe_insn_mutex);
- if (safety != 0)
+ if (check_safety())
return -EAGAIN;
hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
Ingo Molnar wrote:
> * Masami Hiramatsu <[email protected]> wrote:
>
>> Use struct list instead of struct hlist for managing insn_pages,
>> because insn_pages doesn't use hash table.
>
>> struct kprobe_insn_page {
>> - struct hlist_node hlist;
>> + struct list_head list;
>
> Hm, you know that this increases the size of kprobe_insn_page by 4/8
> bytes, right?
Sure, that will increase size.
> hlists are not just used for hashes - but also when we want a more
> compact / smaller list head.
Oh, I thought hlists are used for hash tables...
>
> How many kprobe_insn_page's can be allocated in the system,
> maximally?
It's depends on how many probes you will use, but logically,
1 kprobe_insn_pages is allocated per 4096/16 = 256 probes.
So, if you use 25,600 probes on your system, memory
consumption will increase 400/800 bytes.
Thank you,
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America), Inc.
Software Solutions Division
e-mail: [email protected]
* Masami Hiramatsu <[email protected]> wrote:
> Ingo Molnar wrote:
> > * Masami Hiramatsu <[email protected]> wrote:
> >
> >> Use struct list instead of struct hlist for managing insn_pages,
> >> because insn_pages doesn't use hash table.
> >
> >> struct kprobe_insn_page {
> >> - struct hlist_node hlist;
> >> + struct list_head list;
> >
> > Hm, you know that this increases the size of kprobe_insn_page by 4/8
> > bytes, right?
>
> Sure, that will increase size.
>
> > hlists are not just used for hashes - but also when we want a more
> > compact / smaller list head.
>
> Oh, I thought hlists are used for hash tables...
... because they are smaller, hence the hash table of list
heads becomes twice as dense as with list_head.
But otherwise it's an (almost) equivalent primitive to list_head,
with a slightly higher runtime cost versus better RAM footprint.
> > How many kprobe_insn_page's can be allocated in the system,
> > maximally?
>
> It's depends on how many probes you will use, but logically, 1
> kprobe_insn_pages is allocated per 4096/16 = 256 probes. So, if
> you use 25,600 probes on your system, memory consumption will
> increase 400/800 bytes.
it's your call really - just wanted to react on the 'because it
should be used for hash tables' comment in the changelog.
Ingo
Ingo Molnar wrote:
> * Masami Hiramatsu <[email protected]> wrote:
>
>> Ingo Molnar wrote:
>>> * Masami Hiramatsu <[email protected]> wrote:
>>>
>>>> Use struct list instead of struct hlist for managing insn_pages,
>>>> because insn_pages doesn't use hash table.
>>>> struct kprobe_insn_page {
>>>> - struct hlist_node hlist;
>>>> + struct list_head list;
>>> Hm, you know that this increases the size of kprobe_insn_page by 4/8
>>> bytes, right?
>> Sure, that will increase size.
>>
>>> hlists are not just used for hashes - but also when we want a more
>>> compact / smaller list head.
>> Oh, I thought hlists are used for hash tables...
>
> ... because they are smaller, hence the hash table of list
> heads becomes twice as dense as with list_head.
>
> But otherwise it's an (almost) equivalent primitive to list_head,
> with a slightly higher runtime cost versus better RAM footprint.
>
>>> How many kprobe_insn_page's can be allocated in the system,
>>> maximally?
>> It's depends on how many probes you will use, but logically, 1
>> kprobe_insn_pages is allocated per 4096/16 = 256 probes. So, if
>> you use 25,600 probes on your system, memory consumption will
>> increase 400/800 bytes.
>
> it's your call really - just wanted to react on the 'because it
> should be used for hash tables' comment in the changelog.
Hi Ingo,
Would I might be misunderstood?
struct list_head {
struct list_head *next, *prev;
};
struct hlist_node {
struct hlist_node *next, **pprev;
};
Both of list_head and hlist_node are the same size...
--
Masami Hiramatsu
Software Engineer
Hitachi Computer Products (America), Inc.
Software Solutions Division
e-mail: [email protected]
* Masami Hiramatsu <[email protected]> wrote:
> Ingo Molnar wrote:
> > * Masami Hiramatsu <[email protected]> wrote:
> >
> >> Ingo Molnar wrote:
> >>> * Masami Hiramatsu <[email protected]> wrote:
> >>>
> >>>> Use struct list instead of struct hlist for managing insn_pages,
> >>>> because insn_pages doesn't use hash table.
> >>>> struct kprobe_insn_page {
> >>>> - struct hlist_node hlist;
> >>>> + struct list_head list;
> >>> Hm, you know that this increases the size of kprobe_insn_page by 4/8
> >>> bytes, right?
> >> Sure, that will increase size.
> >>
> >>> hlists are not just used for hashes - but also when we want a more
> >>> compact / smaller list head.
> >> Oh, I thought hlists are used for hash tables...
> >
> > ... because they are smaller, hence the hash table of list
> > heads becomes twice as dense as with list_head.
> >
> > But otherwise it's an (almost) equivalent primitive to list_head,
> > with a slightly higher runtime cost versus better RAM footprint.
> >
> >>> How many kprobe_insn_page's can be allocated in the system,
> >>> maximally?
> >> It's depends on how many probes you will use, but logically, 1
> >> kprobe_insn_pages is allocated per 4096/16 = 256 probes. So, if
> >> you use 25,600 probes on your system, memory consumption will
> >> increase 400/800 bytes.
> >
> > it's your call really - just wanted to react on the 'because it
> > should be used for hash tables' comment in the changelog.
>
> Hi Ingo,
>
> Would I might be misunderstood?
>
> struct list_head {
> struct list_head *next, *prev;
> };
>
> struct hlist_node {
> struct hlist_node *next, **pprev;
> };
>
> Both of list_head and hlist_node are the same size...
ahhh ... a light goes up: i read it as hlist_head, while it's
hlist_node.
You are right, hlist_node is a needless complication so your cleanup
is correct.
Ingo
Commit-ID: aa621943d0058eb1c9a67c2d6bc9951d20335e87
Gitweb: http://git.kernel.org/tip/aa621943d0058eb1c9a67c2d6bc9951d20335e87
Author: Masami Hiramatsu <[email protected]>
AuthorDate: Tue, 30 Jun 2009 17:08:14 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Wed, 1 Jul 2009 01:19:17 +0200
kprobes: Clean up insn_pages by using list instead of hlist
Use struct list instead of struct hlist for managing
insn_pages, because insn_pages doesn't use hash table.
Signed-off-by: Masami Hiramatsu <[email protected]>
Acked-by: Ananth N Mavinakayanahalli <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
Cc: Jim Keniston <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/kprobes.c | 30 +++++++++++-------------------
1 files changed, 11 insertions(+), 19 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 16b5739..6fe9dc6 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -103,7 +103,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
struct kprobe_insn_page {
- struct hlist_node hlist;
+ struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
char slot_used[INSNS_PER_PAGE];
int nused;
@@ -117,7 +117,7 @@ enum kprobe_slot_state {
};
static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
-static struct hlist_head kprobe_insn_pages;
+static LIST_HEAD(kprobe_insn_pages);
static int kprobe_garbage_slots;
static int collect_garbage_slots(void);
@@ -152,10 +152,9 @@ loop_end:
static kprobe_opcode_t __kprobes *__get_insn_slot(void)
{
struct kprobe_insn_page *kip;
- struct hlist_node *pos;
retry:
- hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+ list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->nused < INSNS_PER_PAGE) {
int i;
for (i = 0; i < INSNS_PER_PAGE; i++) {
@@ -189,8 +188,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
kfree(kip);
return NULL;
}
- INIT_HLIST_NODE(&kip->hlist);
- hlist_add_head(&kip->hlist, &kprobe_insn_pages);
+ INIT_LIST_HEAD(&kip->list);
+ list_add(&kip->list, &kprobe_insn_pages);
memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
@@ -219,12 +218,8 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
* so as not to have to set it up again the
* next time somebody inserts a probe.
*/
- hlist_del(&kip->hlist);
- if (hlist_empty(&kprobe_insn_pages)) {
- INIT_HLIST_NODE(&kip->hlist);
- hlist_add_head(&kip->hlist,
- &kprobe_insn_pages);
- } else {
+ if (!list_is_singular(&kprobe_insn_pages)) {
+ list_del(&kip->list);
module_free(NULL, kip->insns);
kfree(kip);
}
@@ -235,14 +230,13 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
static int __kprobes collect_garbage_slots(void)
{
- struct kprobe_insn_page *kip;
- struct hlist_node *pos, *next;
+ struct kprobe_insn_page *kip, *next;
/* Ensure no-one is preepmted on the garbages */
if (check_safety())
return -EAGAIN;
- hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
+ list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
int i;
if (kip->ngarbage == 0)
continue;
@@ -260,19 +254,17 @@ static int __kprobes collect_garbage_slots(void)
void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
{
struct kprobe_insn_page *kip;
- struct hlist_node *pos;
mutex_lock(&kprobe_insn_mutex);
- hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+ list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->insns <= slot &&
slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
int i = (slot - kip->insns) / MAX_INSN_SIZE;
if (dirty) {
kip->slot_used[i] = SLOT_DIRTY;
kip->ngarbage++;
- } else {
+ } else
collect_one_slot(kip, i);
- }
break;
}
}
Commit-ID: 4a2bb6fcc80e6330ca2f2393e98605052cc7780b
Gitweb: http://git.kernel.org/tip/4a2bb6fcc80e6330ca2f2393e98605052cc7780b
Author: Masami Hiramatsu <[email protected]>
AuthorDate: Tue, 30 Jun 2009 17:08:09 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Wed, 1 Jul 2009 10:43:07 +0200
kprobes: No need to unlock kprobe_insn_mutex
Remove needless kprobe_insn_mutex unlocking during safety check
in garbage collection, because if someone releases a dirty slot
during safety check (which ensures other cpus doesn't execute
all dirty slots), the safety check must be fail. So, we need to
hold the mutex while checking safety.
Signed-off-by: Masami Hiramatsu <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
Cc: Jim Keniston <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/kprobes.c | 6 +-----
1 files changed, 1 insertions(+), 5 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c0fa54b..16b5739 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -237,13 +237,9 @@ static int __kprobes collect_garbage_slots(void)
{
struct kprobe_insn_page *kip;
struct hlist_node *pos, *next;
- int safety;
/* Ensure no-one is preepmted on the garbages */
- mutex_unlock(&kprobe_insn_mutex);
- safety = check_safety();
- mutex_lock(&kprobe_insn_mutex);
- if (safety != 0)
+ if (check_safety())
return -EAGAIN;
hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
Commit-ID: c5cb5a2d8d7dc872cf1504091ad0e59fe5ff7cb5
Gitweb: http://git.kernel.org/tip/c5cb5a2d8d7dc872cf1504091ad0e59fe5ff7cb5
Author: Masami Hiramatsu <[email protected]>
AuthorDate: Tue, 30 Jun 2009 17:08:14 -0400
Committer: Ingo Molnar <[email protected]>
CommitDate: Wed, 1 Jul 2009 10:43:41 +0200
kprobes: Clean up insn_pages by using list instead of hlist
Use struct list instead of struct hlist for managing
insn_pages, because insn_pages doesn't use hash table.
Signed-off-by: Masami Hiramatsu <[email protected]>
Acked-by: Ananth N Mavinakayanahalli <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
Cc: Jim Keniston <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/kprobes.c | 30 +++++++++++-------------------
1 files changed, 11 insertions(+), 19 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 16b5739..6fe9dc6 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -103,7 +103,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
struct kprobe_insn_page {
- struct hlist_node hlist;
+ struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
char slot_used[INSNS_PER_PAGE];
int nused;
@@ -117,7 +117,7 @@ enum kprobe_slot_state {
};
static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
-static struct hlist_head kprobe_insn_pages;
+static LIST_HEAD(kprobe_insn_pages);
static int kprobe_garbage_slots;
static int collect_garbage_slots(void);
@@ -152,10 +152,9 @@ loop_end:
static kprobe_opcode_t __kprobes *__get_insn_slot(void)
{
struct kprobe_insn_page *kip;
- struct hlist_node *pos;
retry:
- hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+ list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->nused < INSNS_PER_PAGE) {
int i;
for (i = 0; i < INSNS_PER_PAGE; i++) {
@@ -189,8 +188,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
kfree(kip);
return NULL;
}
- INIT_HLIST_NODE(&kip->hlist);
- hlist_add_head(&kip->hlist, &kprobe_insn_pages);
+ INIT_LIST_HEAD(&kip->list);
+ list_add(&kip->list, &kprobe_insn_pages);
memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
@@ -219,12 +218,8 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
* so as not to have to set it up again the
* next time somebody inserts a probe.
*/
- hlist_del(&kip->hlist);
- if (hlist_empty(&kprobe_insn_pages)) {
- INIT_HLIST_NODE(&kip->hlist);
- hlist_add_head(&kip->hlist,
- &kprobe_insn_pages);
- } else {
+ if (!list_is_singular(&kprobe_insn_pages)) {
+ list_del(&kip->list);
module_free(NULL, kip->insns);
kfree(kip);
}
@@ -235,14 +230,13 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
static int __kprobes collect_garbage_slots(void)
{
- struct kprobe_insn_page *kip;
- struct hlist_node *pos, *next;
+ struct kprobe_insn_page *kip, *next;
/* Ensure no-one is preepmted on the garbages */
if (check_safety())
return -EAGAIN;
- hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
+ list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
int i;
if (kip->ngarbage == 0)
continue;
@@ -260,19 +254,17 @@ static int __kprobes collect_garbage_slots(void)
void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
{
struct kprobe_insn_page *kip;
- struct hlist_node *pos;
mutex_lock(&kprobe_insn_mutex);
- hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+ list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->insns <= slot &&
slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
int i = (slot - kip->insns) / MAX_INSN_SIZE;
if (dirty) {
kip->slot_used[i] = SLOT_DIRTY;
kip->ngarbage++;
- } else {
+ } else
collect_one_slot(kip, i);
- }
break;
}
}