2007-05-10 19:00:38

by Christoph Lameter

[permalink] [raw]
Subject: [RFC] Slab allocators: Drop support for destructors

As far as I can tell there is only a single slab destructor left (there
is currently another in i386 but its going to go as soon as Andi merges
i386s support for quicklists).

I wonder how difficult it would be to remove it? If we have no need for
destructors anymore then maybe we could remove destructor support from the
slab allocators? There is no point in checking for destructor uses in
the slab allocators if there are none.

Or are there valid reason to keep them around? It seems they were mainly
used for list management which required them to take a spinlock. Taking a
spinlock in a destructor is a bit risky since the slab allocators may run
the destructors anytime they decide a slab is no longer needed.

Or do we want to continue support destructors? If so why?




The last destructor is in

arch/mm/pmb.c:

static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned
long flags)
{
struct pmb_entry *pmbe = pmb;

memset(pmb, 0, sizeof(struct pmb_entry));

spin_lock_irq(&pmb_list_lock);

pmbe->entry = PMB_NO_ENTRY;
pmb_list_add(pmbe);

spin_unlock_irq(&pmb_list_lock);
}

static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned
long flags)
{
spin_lock_irq(&pmb_list_lock);
pmb_list_del(pmb);
spin_unlock_irq(&pmb_list_lock);
}

static int __init pmb_init(void)
{
unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
unsigned int entry;

BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));

pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
SLAB_PANIC, pmb_cache_ctor,
pmb_cache_dtor);



Patch drops destructor support from all slab allocators. Any attempt to
use a destructor will BUG().

Cc: Pekka Enberg <[email protected]>
Cc: Paul Mundt <[email protected]>
Signed-off-by: Christoph Lameter <[email protected]>

---
include/linux/slub_def.h | 1 -
mm/slab.c | 27 ++-------------------------
mm/slob.c | 6 +-----
mm/slub.c | 46 +++++++++++++++-------------------------------
4 files changed, 18 insertions(+), 62 deletions(-)

Index: slub/include/linux/slub_def.h
===================================================================
--- slub.orig/include/linux/slub_def.h 2007-05-10 11:47:41.000000000 -0700
+++ slub/include/linux/slub_def.h 2007-05-10 11:47:58.000000000 -0700
@@ -40,7 +40,6 @@ struct kmem_cache {
int objects; /* Number of objects in slab */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(void *, struct kmem_cache *, unsigned long);
- void (*dtor)(void *, struct kmem_cache *, unsigned long);
int inuse; /* Offset to metadata */
int align; /* Alignment */
const char *name; /* Name (only for display!) */
Index: slub/mm/slab.c
===================================================================
--- slub.orig/mm/slab.c 2007-05-10 11:47:41.000000000 -0700
+++ slub/mm/slab.c 2007-05-10 11:47:58.000000000 -0700
@@ -409,9 +409,6 @@ struct kmem_cache {
/* constructor func */
void (*ctor) (void *, struct kmem_cache *, unsigned long);

- /* de-constructor func */
- void (*dtor) (void *, struct kmem_cache *, unsigned long);
-
/* 5) cache creation/removal */
const char *name;
struct list_head next;
@@ -1911,20 +1908,11 @@ static void slab_destroy_objs(struct kme
slab_error(cachep, "end of a freed object "
"was overwritten");
}
- if (cachep->dtor && !(cachep->flags & SLAB_POISON))
- (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
}
}
#else
static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
{
- if (cachep->dtor) {
- int i;
- for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
- (cachep->dtor) (objp, cachep, 0);
- }
- }
}
#endif

@@ -2124,7 +2112,7 @@ static int setup_cpu_cache(struct kmem_c
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @ctor: A constructor for the objects.
- * @dtor: A destructor for the objects.
+ * @dtor: A destructor for the objects (not implemented anymore).
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a int, but can be interrupted.
@@ -2159,7 +2147,7 @@ kmem_cache_create (const char *name, siz
* Sanity checks... these are all serious usage bugs.
*/
if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
- (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
+ (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || dtor) {
printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
name);
BUG();
@@ -2213,9 +2201,6 @@ kmem_cache_create (const char *name, siz
if (flags & SLAB_DESTROY_BY_RCU)
BUG_ON(flags & SLAB_POISON);
#endif
- if (flags & SLAB_DESTROY_BY_RCU)
- BUG_ON(dtor);
-
/*
* Always checks flags, a caller might be expecting debug support which
* isn't available.
@@ -2370,7 +2355,6 @@ kmem_cache_create (const char *name, siz
BUG_ON(!cachep->slabp_cache);
}
cachep->ctor = ctor;
- cachep->dtor = dtor;
cachep->name = name;

if (setup_cpu_cache(cachep)) {
@@ -2835,7 +2819,6 @@ failed:
* Perform extra freeing checks:
* - detect bad pointers.
* - POISON/RED_ZONE checking
- * - destructor calls, for caches with POISON+dtor
*/
static void kfree_debugcheck(const void *objp)
{
@@ -2894,12 +2877,6 @@ static void *cache_free_debugcheck(struc
BUG_ON(objnr >= cachep->num);
BUG_ON(objp != index_to_obj(cachep, slabp, objnr));

- if (cachep->flags & SLAB_POISON && cachep->dtor) {
- /* we want to cache poison the object,
- * call the destruction callback
- */
- cachep->dtor(objp + obj_offset(cachep), cachep, 0);
- }
#ifdef CONFIG_DEBUG_SLAB_LEAK
slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
#endif
Index: slub/mm/slob.c
===================================================================
--- slub.orig/mm/slob.c 2007-05-10 11:47:41.000000000 -0700
+++ slub/mm/slob.c 2007-05-10 11:47:58.000000000 -0700
@@ -268,7 +268,6 @@ struct kmem_cache {
unsigned int size, align;
const char *name;
void (*ctor)(void *, struct kmem_cache *, unsigned long);
- void (*dtor)(void *, struct kmem_cache *, unsigned long);
};

struct kmem_cache *kmem_cache_create(const char *name, size_t size,
@@ -278,13 +277,13 @@ struct kmem_cache *kmem_cache_create(con
{
struct kmem_cache *c;

+ BUG_ON(dtor);
c = slob_alloc(sizeof(struct kmem_cache), flags, 0);

if (c) {
c->name = name;
c->size = size;
c->ctor = ctor;
- c->dtor = dtor;
/* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < align)
@@ -330,9 +329,6 @@ EXPORT_SYMBOL(kmem_cache_zalloc);

void kmem_cache_free(struct kmem_cache *c, void *b)
{
- if (c->dtor)
- c->dtor(b, c, 0);
-
if (c->size < PAGE_SIZE)
slob_free(b, c->size);
else
Index: slub/mm/slub.c
===================================================================
--- slub.orig/mm/slub.c 2007-05-10 11:47:57.000000000 -0700
+++ slub/mm/slub.c 2007-05-10 11:54:29.000000000 -0700
@@ -898,13 +898,13 @@ static void kmem_cache_open_debug_check(
* On 32 bit platforms the limit is 256k. On 64bit platforms
* the limit is 512k.
*
- * Debugging or ctor/dtors may create a need to move the free
+ * Debugging or ctor may create a need to move the free
* pointer. Fail if this happens.
*/
if (s->size >= 65535 * sizeof(void *)) {
BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
- BUG_ON(s->ctor || s->dtor);
+ BUG_ON(s->ctor);
}
else
/*
@@ -1037,15 +1037,12 @@ static void __free_slab(struct kmem_cach
{
int pages = 1 << s->order;

- if (unlikely(SlabDebug(page) || s->dtor)) {
+ if (unlikely(SlabDebug(page))) {
void *p;

slab_pad_check(s, page);
- for_each_object(p, s, page_address(page)) {
- if (s->dtor)
- s->dtor(p, s, 0);
+ for_each_object(p, s, page_address(page))
check_object(s, page, p, 0);
- }
}

mod_zone_page_state(page_zone(page),
@@ -1883,7 +1880,7 @@ static int calculate_sizes(struct kmem_c
* then we should never poison the object itself.
*/
if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
- !s->ctor && !s->dtor)
+ !s->ctor)
s->flags |= __OBJECT_POISON;
else
s->flags &= ~__OBJECT_POISON;
@@ -1913,7 +1910,7 @@ static int calculate_sizes(struct kmem_c

#ifdef CONFIG_SLUB_DEBUG
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
- s->ctor || s->dtor)) {
+ s->ctor)) {
/*
* Relocate free pointer after the object if it is not
* permitted to overwrite the first word of the object on
@@ -1982,13 +1979,11 @@ static int calculate_sizes(struct kmem_c
static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
const char *name, size_t size,
size_t align, unsigned long flags,
- void (*ctor)(void *, struct kmem_cache *, unsigned long),
- void (*dtor)(void *, struct kmem_cache *, unsigned long))
+ void (*ctor)(void *, struct kmem_cache *, unsigned long))
{
memset(s, 0, kmem_size);
s->name = name;
s->ctor = ctor;
- s->dtor = dtor;
s->objsize = size;
s->flags = flags;
s->align = align;
@@ -2173,7 +2168,7 @@ static struct kmem_cache *create_kmalloc

down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
- flags, NULL, NULL))
+ flags, NULL))
goto panic;

list_add(&s->list, &slab_caches);
@@ -2485,7 +2480,7 @@ static int slab_unmergeable(struct kmem_
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1;

- if (s->ctor || s->dtor)
+ if (s->ctor)
return 1;

return 0;
@@ -2493,15 +2488,14 @@ static int slab_unmergeable(struct kmem_

static struct kmem_cache *find_mergeable(size_t size,
size_t align, unsigned long flags,
- void (*ctor)(void *, struct kmem_cache *, unsigned long),
- void (*dtor)(void *, struct kmem_cache *, unsigned long))
+ void (*ctor)(void *, struct kmem_cache *, unsigned long))
{
struct list_head *h;

if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
return NULL;

- if (ctor || dtor)
+ if (ctor)
return NULL;

size = ALIGN(size, sizeof(void *));
@@ -2543,8 +2537,10 @@ struct kmem_cache *kmem_cache_create(con
{
struct kmem_cache *s;

+ BUG_ON(dtor);
+
down_write(&slub_lock);
- s = find_mergeable(size, align, flags, dtor, ctor);
+ s = find_mergeable(size, align, flags, ctor);
if (s) {
s->refcount++;
/*
@@ -2558,7 +2554,7 @@ struct kmem_cache *kmem_cache_create(con
} else {
s = kmalloc(kmem_size, GFP_KERNEL);
if (s && kmem_cache_open(s, GFP_KERNEL, name,
- size, align, flags, ctor, dtor)) {
+ size, align, flags, ctor)) {
if (sysfs_slab_add(s)) {
kfree(s);
goto err;
@@ -3199,17 +3195,6 @@ static ssize_t ctor_show(struct kmem_cac
}
SLAB_ATTR_RO(ctor);

-static ssize_t dtor_show(struct kmem_cache *s, char *buf)
-{
- if (s->dtor) {
- int n = sprint_symbol(buf, (unsigned long)s->dtor);
-
- return n + sprintf(buf + n, "\n");
- }
- return 0;
-}
-SLAB_ATTR_RO(dtor);
-
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", s->refcount - 1);
@@ -3441,7 +3426,6 @@ static struct attribute * slab_attrs[] =
&partial_attr.attr,
&cpu_slabs_attr.attr,
&ctor_attr.attr,
- &dtor_attr.attr,
&aliases_attr.attr,
&align_attr.attr,
&sanity_checks_attr.attr,


2007-05-10 19:21:20

by Pekka Enberg

[permalink] [raw]
Subject: Re: [RFC] Slab allocators: Drop support for destructors

On 5/10/07, Christoph Lameter <[email protected]> wrote:
> Or are there valid reason to keep them around? It seems they were mainly
> used for list management which required them to take a spinlock. Taking a
> spinlock in a destructor is a bit risky since the slab allocators may run
> the destructors anytime they decide a slab is no longer needed.
>
> Or do we want to continue support destructors? If so why?

Well, constructors are on their way out too because they don't seem to
give the performance benefit they were designed for anymore. As for
destructors, they have been pretty useless in Linux for a long time
now and we really don't do much "complex initialization" that requires
undo (releasing resources).

Looks good to me.

Acked-by: Pekka Enberg <[email protected]>

Pekka

2007-05-10 19:22:12

by William Lee Irwin III

[permalink] [raw]
Subject: Re: [RFC] Slab allocators: Drop support for destructors

On Thu, May 10, 2007 at 12:00:08PM -0700, Christoph Lameter wrote:
> As far as I can tell there is only a single slab destructor left (there
> is currently another in i386 but its going to go as soon as Andi merges
> i386s support for quicklists).
> I wonder how difficult it would be to remove it? If we have no need for
> destructors anymore then maybe we could remove destructor support from the
> slab allocators? There is no point in checking for destructor uses in
> the slab allocators if there are none.
> Or are there valid reason to keep them around? It seems they were mainly
> used for list management which required them to take a spinlock. Taking a
> spinlock in a destructor is a bit risky since the slab allocators may run
> the destructors anytime they decide a slab is no longer needed.
> Or do we want to continue support destructors? If so why?

It used to be that some caches retained attached allocated objects
until the time of a destructor call, at which time they were freed.
I'm not aware of any current use of this idiom. Space consumption
pathologies arise very easily from the use of this idiom, so it's
not clear to me it's worth supporting.

List membership of cached preconstructed objects is part of a more
general idiom of updating such objects at the time of a state change
requiring the preconstructed state to change. It is possible to support
this without a ->dtor() operation by means of the allocator supporting
a flushing operation to dispose of cached preconstructed objects, or
the allocator supporting a generation marker for such state changes so
that stale preconstructed objects are tagged as such and re-constructed
on-demand.

The last idiom I can recall for which ->dtor() operations make sense is
essentially an integrity check. It's possible to support this with an
explicit slab debugging option to verify that freed objects have been
returned to their preconstructed states with superior error reporting
provided that some method of comparing the states is arranged. I'm not
aware of any in-kernel uses of this idiom.

Others may be aware of other idioms ->dtor() operations are used to
implement.


-- wli

2007-05-10 19:24:27

by Christoph Lameter

[permalink] [raw]
Subject: Re: [RFC] Slab allocators: Drop support for destructors

On Thu, 10 May 2007, Pekka Enberg wrote:

> > Or do we want to continue support destructors? If so why?
>
> Well, constructors are on their way out too because they don't seem to
> give the performance benefit they were designed for anymore. As for
> destructors, they have been pretty useless in Linux for a long time
> now and we really don't do much "complex initialization" that requires
> undo (releasing resources).

Well I am not too sure about removing constructors. Andrew's test seems to
show some benefit. That is just one test though. We need more. I do not
like constructors either but if performance tests show regressions then we
need to keep them.

2007-05-10 19:58:12

by William Lee Irwin III

[permalink] [raw]
Subject: Re: [RFC] Slab allocators: Drop support for destructors

On 5/10/07, Christoph Lameter <[email protected]> wrote:
>> Or are there valid reason to keep them around? It seems they were mainly
>> used for list management which required them to take a spinlock. Taking a
>> spinlock in a destructor is a bit risky since the slab allocators may run
>> the destructors anytime they decide a slab is no longer needed.
>> Or do we want to continue support destructors? If so why?

On Thu, May 10, 2007 at 10:21:08PM +0300, Pekka Enberg wrote:
> Well, constructors are on their way out too because they don't seem to
> give the performance benefit they were designed for anymore. As for
> destructors, they have been pretty useless in Linux for a long time
> now and we really don't do much "complex initialization" that requires
> undo (releasing resources).

The anti-constructor trend is counterproductive. The cache effects are
not being properly monitored and people are failing to understand the
importance of conserving cache.

Microbenchmarks where you pound the potentially preconstructed data
structures like wild monkeys are not why constructors are used. They're
to avoid burning cachelines in the cases where you need the cache for
other purposes besides building up and tearing down the structures in
question. Data structure layout becomes relevant to this; the
preconstructed cachelines need to be separated from ones that must be
clobbered regardless immediately after allocation. The effect is
cumulative and broadly dispersed. The constructor elimination patches
are incrementally filling up caches with the formerly preconstructed
objects' cachelines, where the degradation from each individual change
is so small as to be difficult to to discern or potentially even seen
to be advantageous on unrealistic microbenchmarks. The net effect of
removing constructors altogether will be degradations in real-world
workloads on account of the combined effect of the cache footprint
increases. The cache should rather be saved for userspace.

What would make this easier to see would be cache instrumentation not
available to most people. Specifically, tabulating what the various
cachelines in the cache are caching. Simulators may be useful to help
determine all that.


-- wli

2007-05-10 23:36:11

by Paul Mundt

[permalink] [raw]
Subject: Re: [RFC] Slab allocators: Drop support for destructors

On Thu, May 10, 2007 at 12:00:08PM -0700, Christoph Lameter wrote:
> As far as I can tell there is only a single slab destructor left (there
> is currently another in i386 but its going to go as soon as Andi merges
> i386s support for quicklists).
>
> I wonder how difficult it would be to remove it? If we have no need for
> destructors anymore then maybe we could remove destructor support from the
> slab allocators? There is no point in checking for destructor uses in
> the slab allocators if there are none.
>
> Or are there valid reason to keep them around? It seems they were mainly
> used for list management which required them to take a spinlock. Taking a
> spinlock in a destructor is a bit risky since the slab allocators may run
> the destructors anytime they decide a slab is no longer needed.
>
> Or do we want to continue support destructors? If so why?
>
[snip pmb stuff]

I'll take a look at tidying up the PMB slab, getting rid of the dtor
shouldn't be terribly painful. I simply opted to do the list management
there since others were doing it for the PGD slab cache at the time that
was written.

2007-05-11 02:22:07

by Paul Mundt

[permalink] [raw]
Subject: Re: [RFC] Slab allocators: Drop support for destructors

On Fri, May 11, 2007 at 08:35:27AM +0900, Paul Mundt wrote:
> On Thu, May 10, 2007 at 12:00:08PM -0700, Christoph Lameter wrote:
> > As far as I can tell there is only a single slab destructor left (there
> > is currently another in i386 but its going to go as soon as Andi merges
> > i386s support for quicklists).
> >
> > I wonder how difficult it would be to remove it? If we have no need for
> > destructors anymore then maybe we could remove destructor support from the
> > slab allocators? There is no point in checking for destructor uses in
> > the slab allocators if there are none.
> >
> > Or are there valid reason to keep them around? It seems they were mainly
> > used for list management which required them to take a spinlock. Taking a
> > spinlock in a destructor is a bit risky since the slab allocators may run
> > the destructors anytime they decide a slab is no longer needed.
> >
> > Or do we want to continue support destructors? If so why?
> >
> [snip pmb stuff]
>
> I'll take a look at tidying up the PMB slab, getting rid of the dtor
> shouldn't be terribly painful. I simply opted to do the list management
> there since others were doing it for the PGD slab cache at the time that
> was written.

And here's the bit for dropping pmb_cache_dtor(), moving the list
management up to pmb_alloc() and pmb_free().

With this applied, we're all set for killing off slab destructors
from the kernel entirely.

Signed-off-by: Paul Mundt <[email protected]>

--

arch/sh/mm/pmb.c | 79 ++++++++++++++++++++++++++-----------------------------
1 file changed, 38 insertions(+), 41 deletions(-)

diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 02aae06..b6a5a33 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,7 +3,7 @@
*
* Privileged Space Mapping Buffer (PMB) Support.
*
- * Copyright (C) 2005, 2006 Paul Mundt
+ * Copyright (C) 2005, 2006, 2007 Paul Mundt
*
* P1/P2 Section mapping definitions from map32.h, which was:
*
@@ -68,6 +68,32 @@ static inline unsigned long mk_pmb_data(unsigned int entry)
return mk_pmb_entry(entry) | PMB_DATA;
}

+static DEFINE_SPINLOCK(pmb_list_lock);
+static struct pmb_entry *pmb_list;
+
+static inline void pmb_list_add(struct pmb_entry *pmbe)
+{
+ struct pmb_entry **p, *tmp;
+
+ p = &pmb_list;
+ while ((tmp = *p) != NULL)
+ p = &tmp->next;
+
+ pmbe->next = tmp;
+ *p = pmbe;
+}
+
+static inline void pmb_list_del(struct pmb_entry *pmbe)
+{
+ struct pmb_entry **p, *tmp;
+
+ for (p = &pmb_list; (tmp = *p); p = &tmp->next)
+ if (tmp == pmbe) {
+ *p = tmp->next;
+ return;
+ }
+}
+
struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
unsigned long flags)
{
@@ -81,11 +107,19 @@ struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
pmbe->ppn = ppn;
pmbe->flags = flags;

+ spin_lock_irq(&pmb_list_lock);
+ pmb_list_add(pmbe);
+ spin_unlock_irq(&pmb_list_lock);
+
return pmbe;
}

void pmb_free(struct pmb_entry *pmbe)
{
+ spin_lock_irq(&pmb_list_lock);
+ pmb_list_del(pmbe);
+ spin_unlock_irq(&pmb_list_lock);
+
kmem_cache_free(pmb_cache, pmbe);
}

@@ -167,31 +201,6 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
clear_bit(entry, &pmb_map);
}

-static DEFINE_SPINLOCK(pmb_list_lock);
-static struct pmb_entry *pmb_list;
-
-static inline void pmb_list_add(struct pmb_entry *pmbe)
-{
- struct pmb_entry **p, *tmp;
-
- p = &pmb_list;
- while ((tmp = *p) != NULL)
- p = &tmp->next;
-
- pmbe->next = tmp;
- *p = pmbe;
-}
-
-static inline void pmb_list_del(struct pmb_entry *pmbe)
-{
- struct pmb_entry **p, *tmp;
-
- for (p = &pmb_list; (tmp = *p); p = &tmp->next)
- if (tmp == pmbe) {
- *p = tmp->next;
- return;
- }
-}

static struct {
unsigned long size;
@@ -283,25 +292,14 @@ void pmb_unmap(unsigned long addr)
} while (pmbe);
}

-static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
+static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep,
+ unsigned long flags)
{
struct pmb_entry *pmbe = pmb;

memset(pmb, 0, sizeof(struct pmb_entry));

- spin_lock_irq(&pmb_list_lock);
-
pmbe->entry = PMB_NO_ENTRY;
- pmb_list_add(pmbe);
-
- spin_unlock_irq(&pmb_list_lock);
-}
-
-static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
-{
- spin_lock_irq(&pmb_list_lock);
- pmb_list_del(pmb);
- spin_unlock_irq(&pmb_list_lock);
}

static int __init pmb_init(void)
@@ -312,8 +310,7 @@ static int __init pmb_init(void)
BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));

pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
- SLAB_PANIC, pmb_cache_ctor,
- pmb_cache_dtor);
+ SLAB_PANIC, pmb_cache_ctor, NULL);

jump_to_P2();