2015-07-10 12:07:43

by Thomas Gleixner

[permalink] [raw]
Subject: [patch] mm/slub: Move slab initialization into irq enabled region

Initializing a new slab can introduce rather large latencies because
most of the initialization runs always with interrupts disabled.

There is no point in doing so. The newly allocated slab is not visible
yet, so there is no reason to protect it against concurrent alloc/free.

Move the expensive parts of the initialization into allocate_slab(),
so for all allocations with GFP_WAIT set, interrupts are enabled.

Signed-off-by: Thomas Gleixner <[email protected]>
---
mm/slub.c | 85 +++++++++++++++++++++++++++++++-------------------------------
1 file changed, 43 insertions(+), 42 deletions(-)

Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct
kasan_slab_free(s, x);
}

+static void setup_object(struct kmem_cache *s, struct page *page,
+ void *object)
+{
+ setup_object_debug(s, page, object);
+ if (unlikely(s->ctor)) {
+ kasan_unpoison_object_data(s, object);
+ s->ctor(object);
+ kasan_poison_object_data(s, object);
+ }
+}
+
/*
* Slab allocation and freeing
*/
@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
+ void *start, *p;
+ int idx, order;

flags &= gfp_allowed_mask;

@@ -1364,8 +1377,11 @@ static struct page *allocate_slab(struct
stat(s, ORDER_FALLBACK);
}

- if (kmemcheck_enabled && page
- && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
+ if (!page)
+ goto out;
+
+ if (kmemcheck_enabled &&
+ !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);

kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
@@ -1380,51 +1396,12 @@ static struct page *allocate_slab(struct
kmemcheck_mark_unallocated_pages(page, pages);
}

- if (flags & __GFP_WAIT)
- local_irq_disable();
if (!page)
- return NULL;
+ goto out;

page->objects = oo_objects(oo);
- mod_zone_page_state(page_zone(page),
- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
- NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
- 1 << oo_order(oo));
-
- return page;
-}
-
-static void setup_object(struct kmem_cache *s, struct page *page,
- void *object)
-{
- setup_object_debug(s, page, object);
- if (unlikely(s->ctor)) {
- kasan_unpoison_object_data(s, object);
- s->ctor(object);
- kasan_poison_object_data(s, object);
- }
-}
-
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
-{
- struct page *page;
- void *start;
- void *p;
- int order;
- int idx;
-
- if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
- pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
- BUG();
- }
-
- page = allocate_slab(s,
- flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
- if (!page)
- goto out;

order = compound_order(page);
- inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab_cache = s;
__SetPageSlab(page);
if (page->pfmemalloc)
@@ -1448,10 +1425,34 @@ static struct page *new_slab(struct kmem
page->freelist = start;
page->inuse = page->objects;
page->frozen = 1;
+
out:
+ if (flags & __GFP_WAIT)
+ local_irq_disable();
+ if (!page)
+ return NULL;
+
+ mod_zone_page_state(page_zone(page),
+ (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
+ 1 << oo_order(oo));
+
+ inc_slabs_node(s, page_to_nid(page), page->objects);
+
return page;
}

+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+{
+ if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
+ pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
+ BUG();
+ }
+
+ return allocate_slab(s,
+ flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
+}
+
static void __free_slab(struct kmem_cache *s, struct page *page)
{
int order = compound_order(page);


Subject: Re: [patch] mm/slub: Move slab initialization into irq enabled region

There is a duplicate check for page == NULL now.


Subject: slub: allocate_slab: no need to check twice for page == NULL

Remove the second check.

Signed-off-by: Christoph Lameter <[email protected]>

Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1396,9 +1396,6 @@ static struct page *allocate_slab(struct
kmemcheck_mark_unallocated_pages(page, pages);
}

- if (!page)
- goto out;
-
page->objects = oo_objects(oo);

order = compound_order(page);

2015-07-10 15:03:09

by Steven Rostedt

[permalink] [raw]
Subject: Re: [patch] mm/slub: Move slab initialization into irq enabled region

On Fri, 10 Jul 2015 12:07:13 -0000
Thomas Gleixner <[email protected]> wrote:


> /*
> * Slab allocation and freeing
> */
> @@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct
> struct page *page;
> struct kmem_cache_order_objects oo = s->oo;
> gfp_t alloc_gfp;
> + void *start, *p;
> + int idx, order;
>
> flags &= gfp_allowed_mask;
>
> @@ -1364,8 +1377,11 @@ static struct page *allocate_slab(struct
> stat(s, ORDER_FALLBACK);
> }
>
> - if (kmemcheck_enabled && page
> - && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
> + if (!page)
> + goto out;

Since the above now looks like this:

page = alloc_slab_page(s, alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
alloc_gfp = flags;
/*
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
page = alloc_slab_page(s, alloc_gfp, node, oo);

if (page)
stat(s, ORDER_FALLBACK);
}

if (!page)
goto out;

Why not have it do this:

page = alloc_slab_page(s, alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
alloc_gfp = flags;
/*
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
page = alloc_slab_page(s, alloc_gfp, node, oo);
if (unlikely(!page))
goto out;

stat(s, ORDER_FALLBACK);
}

And get rid of the double check for !page in the fast path.

-- Steve


> +
> + if (kmemcheck_enabled &&
> + !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
> int pages = 1 << oo_order(oo);
>
> kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
> @@ -1380,51 +1396,12 @@ static struct page *allocate_slab(struct
> kmemcheck_mark_unallocated_pages(page, pages);
> }
>
> - if (flags & __GFP_WAIT)
> - local_irq_disable();
> if (!page)
> - return NULL;
> + goto out;
>
> page->objects = oo_objects(oo);

Subject: Re: [patch] mm/slub: Move slab initialization into irq enabled region

On Fri, 10 Jul 2015, Steven Rostedt wrote:

> And get rid of the double check for !page in the fast path.

This is the 2nd of 3 checks by the way. Both our approaches together get
it down to 1.

2015-07-10 21:09:05

by Thomas Gleixner

[permalink] [raw]
Subject: [patch V2] mm/slub: Move slab initialization into irq enabled region

Initializing a new slab can introduce rather large latencies because
most of the initialization runs always with interrupts disabled.

There is no point in doing so. The newly allocated slab is not visible
yet, so there is no reason to protect it against concurrent alloc/free.

Move the expensive parts of the initialization into allocate_slab(),
so for all allocations with GFP_WAIT set, interrupts are enabled.

Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: Sebastian Andrzej Siewior <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Peter Zijlstra <[email protected]>
---

V2: Removed the extra NULL checks as suggested by Steven and Christoph

mm/slub.c | 89 +++++++++++++++++++++++++++++---------------------------------
1 file changed, 42 insertions(+), 47 deletions(-)

Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c
+++ linux/mm/slub.c
@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct
kasan_slab_free(s, x);
}

+static void setup_object(struct kmem_cache *s, struct page *page,
+ void *object)
+{
+ setup_object_debug(s, page, object);
+ if (unlikely(s->ctor)) {
+ kasan_unpoison_object_data(s, object);
+ s->ctor(object);
+ kasan_poison_object_data(s, object);
+ }
+}
+
/*
* Slab allocation and freeing
*/
@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
+ void *start, *p;
+ int idx, order;

flags &= gfp_allowed_mask;

@@ -1359,13 +1372,13 @@ static struct page *allocate_slab(struct
* Try a lower order alloc if possible
*/
page = alloc_slab_page(s, alloc_gfp, node, oo);
-
- if (page)
- stat(s, ORDER_FALLBACK);
+ if (unlikely(!page))
+ goto out;
+ stat(s, ORDER_FALLBACK);
}

- if (kmemcheck_enabled && page
- && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
+ if (kmemcheck_enabled &&
+ !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);

kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
@@ -1380,51 +1393,9 @@ static struct page *allocate_slab(struct
kmemcheck_mark_unallocated_pages(page, pages);
}

- if (flags & __GFP_WAIT)
- local_irq_disable();
- if (!page)
- return NULL;
-
page->objects = oo_objects(oo);
- mod_zone_page_state(page_zone(page),
- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
- NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
- 1 << oo_order(oo));
-
- return page;
-}
-
-static void setup_object(struct kmem_cache *s, struct page *page,
- void *object)
-{
- setup_object_debug(s, page, object);
- if (unlikely(s->ctor)) {
- kasan_unpoison_object_data(s, object);
- s->ctor(object);
- kasan_poison_object_data(s, object);
- }
-}
-
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
-{
- struct page *page;
- void *start;
- void *p;
- int order;
- int idx;
-
- if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
- pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
- BUG();
- }
-
- page = allocate_slab(s,
- flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
- if (!page)
- goto out;

order = compound_order(page);
- inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab_cache = s;
__SetPageSlab(page);
if (page->pfmemalloc)
@@ -1448,10 +1419,34 @@ static struct page *new_slab(struct kmem
page->freelist = start;
page->inuse = page->objects;
page->frozen = 1;
+
out:
+ if (flags & __GFP_WAIT)
+ local_irq_disable();
+ if (!page)
+ return NULL;
+
+ mod_zone_page_state(page_zone(page),
+ (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
+ 1 << oo_order(oo));
+
+ inc_slabs_node(s, page_to_nid(page), page->objects);
+
return page;
}

+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+{
+ if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
+ pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
+ BUG();
+ }
+
+ return allocate_slab(s,
+ flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
+}
+
static void __free_slab(struct kmem_cache *s, struct page *page)
{
int order = compound_order(page);

2015-07-15 23:43:10

by David Rientjes

[permalink] [raw]
Subject: Re: [patch V2] mm/slub: Move slab initialization into irq enabled region

On Fri, 10 Jul 2015, Thomas Gleixner wrote:

> Initializing a new slab can introduce rather large latencies because
> most of the initialization runs always with interrupts disabled.
>
> There is no point in doing so. The newly allocated slab is not visible
> yet, so there is no reason to protect it against concurrent alloc/free.
>
> Move the expensive parts of the initialization into allocate_slab(),
> so for all allocations with GFP_WAIT set, interrupts are enabled.
>
> Signed-off-by: Thomas Gleixner <[email protected]>
> Cc: Christoph Lameter <[email protected]>
> Cc: Pekka Enberg <[email protected]>
> Cc: David Rientjes <[email protected]>
> Cc: Joonsoo Kim <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: [email protected]
> Cc: Sebastian Andrzej Siewior <[email protected]>
> Cc: Steven Rostedt <[email protected]>
> Cc: Peter Zijlstra <[email protected]>

Acked-by: David Rientjes <[email protected]>