2007-09-19 03:41:40

by Christoph Lameter

[permalink] [raw]
Subject: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

SLAB_VFALLBACK can be specified for selected slab caches. If fallback is
available then the conservative settings for higher order allocations are
overridden. We then request an order that can accomodate at mininum
100 objects. The size of an individual slab allocation is allowed to reach
up to 256k (order 6 on i386, order 4 on IA64).

Implementing fallback requires special handling of virtual mappings in
the free path. However, the impact is minimal since we already check the
address if its NULL or ZERO_SIZE_PTR. No additional cachelines are
touched if we do not fall back. However, if we need to handle a virtual
compound page then walk the kernel page table in the free paths to
determine the page struct.

We also need special handling in the allocation paths since the virtual
addresses cannot be obtained via page_address(). SLUB exploits that
page->private is set to the vmalloc address to avoid a costly
vmalloc_address().

However, for diagnostics there is still the need to determine the
vmalloc address from the page struct. There we must use the costly
vmalloc_address().

Signed-off-by: Christoph Lameter <[email protected]>

---
include/linux/slab.h | 1
include/linux/slub_def.h | 1
mm/slub.c | 83 ++++++++++++++++++++++++++++++++---------------
3 files changed, 60 insertions(+), 25 deletions(-)

Index: linux-2.6/include/linux/slab.h
===================================================================
--- linux-2.6.orig/include/linux/slab.h 2007-09-18 17:03:30.000000000 -0700
+++ linux-2.6/include/linux/slab.h 2007-09-18 17:07:39.000000000 -0700
@@ -19,6 +19,7 @@
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
+#define SLAB_VFALLBACK 0x00000200UL /* May fall back to vmalloc */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2007-09-18 17:03:30.000000000 -0700
+++ linux-2.6/mm/slub.c 2007-09-18 18:13:38.000000000 -0700
@@ -20,6 +20,7 @@
#include <linux/mempolicy.h>
#include <linux/ctype.h>
#include <linux/kallsyms.h>
+#include <linux/vmalloc.h>

/*
* Lock order:
@@ -277,6 +278,26 @@ static inline struct kmem_cache_node *ge
#endif
}

+static inline void *slab_address(struct page *page)
+{
+ if (unlikely(PageVcompound(page)))
+ return vmalloc_address(page);
+ else
+ return page_address(page);
+}
+
+static inline struct page *virt_to_slab(const void *addr)
+{
+ struct page *page;
+
+ if (unlikely(is_vmalloc_addr(addr)))
+ page = vmalloc_to_page(addr);
+ else
+ page = virt_to_page(addr);
+
+ return compound_head(page);
+}
+
static inline int check_valid_pointer(struct kmem_cache *s,
struct page *page, const void *object)
{
@@ -285,7 +306,7 @@ static inline int check_valid_pointer(st
if (!object)
return 1;

- base = page_address(page);
+ base = slab_address(page);
if (object < base || object >= base + s->objects * s->size ||
(object - base) % s->size) {
return 0;
@@ -470,7 +491,7 @@ static void slab_fix(struct kmem_cache *
static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
{
unsigned int off; /* Offset of last byte */
- u8 *addr = page_address(page);
+ u8 *addr = slab_address(page);

print_tracking(s, p);

@@ -648,7 +669,7 @@ static int slab_pad_check(struct kmem_ca
if (!(s->flags & SLAB_POISON))
return 1;

- start = page_address(page);
+ start = slab_address(page);
end = start + (PAGE_SIZE << s->order);
length = s->objects * s->size;
remainder = end - (start + length);
@@ -1040,11 +1061,7 @@ static struct page *allocate_slab(struct
struct page * page;
int pages = 1 << s->order;

- if (s->order)
- flags |= __GFP_COMP;
-
- if (s->flags & SLAB_CACHE_DMA)
- flags |= SLUB_DMA;
+ flags |= s->gfpflags;

if (node == -1)
page = alloc_pages(flags, s->order);
@@ -1098,7 +1115,11 @@ static struct page *new_slab(struct kmem
SLAB_STORE_USER | SLAB_TRACE))
SetSlabDebug(page);

- start = page_address(page);
+ if (!PageVcompound(page))
+ start = slab_address(page);
+ else
+ start = (void *)page->private;
+
end = start + s->objects * s->size;

if (unlikely(s->flags & SLAB_POISON))
@@ -1130,7 +1151,7 @@ static void __free_slab(struct kmem_cach
void *p;

slab_pad_check(s, page);
- for_each_object(p, s, page_address(page))
+ for_each_object(p, s, slab_address(page))
check_object(s, page, p, 0);
ClearSlabDebug(page);
}
@@ -1672,7 +1693,7 @@ void kmem_cache_free(struct kmem_cache *
{
struct page *page;

- page = virt_to_head_page(x);
+ page = virt_to_slab(x);

slab_free(s, page, x, __builtin_return_address(0));
}
@@ -1681,7 +1702,7 @@ EXPORT_SYMBOL(kmem_cache_free);
/* Figure out on which slab object the object resides */
static struct page *get_object_page(const void *x)
{
- struct page *page = virt_to_head_page(x);
+ struct page *page = virt_to_slab(x);

if (!PageSlab(page))
return NULL;
@@ -1780,10 +1801,9 @@ static inline int slab_order(int size, i
return order;
}

-static inline int calculate_order(int size)
+static inline int calculate_order(int size, int min_objects, int max_order)
{
int order;
- int min_objects;
int fraction;

/*
@@ -1794,13 +1814,12 @@ static inline int calculate_order(int si
* First we reduce the acceptable waste in a slab. Then
* we reduce the minimum objects required in a slab.
*/
- min_objects = slub_min_objects;
while (min_objects > 1) {
fraction = 8;
while (fraction >= 4) {
order = slab_order(size, min_objects,
- slub_max_order, fraction);
- if (order <= slub_max_order)
+ max_order, fraction);
+ if (order <= max_order)
return order;
fraction /= 2;
}
@@ -1811,8 +1830,8 @@ static inline int calculate_order(int si
* We were unable to place multiple objects in a slab. Now
* lets see if we can place a single object there.
*/
- order = slab_order(size, 1, slub_max_order, 1);
- if (order <= slub_max_order)
+ order = slab_order(size, 1, max_order, 1);
+ if (order <= max_order)
return order;

/*
@@ -2059,10 +2078,24 @@ static int calculate_sizes(struct kmem_c
size = ALIGN(size, align);
s->size = size;

- s->order = calculate_order(size);
+ if (s->flags & SLAB_VFALLBACK)
+ s->order = calculate_order(size, 100, 18 - PAGE_SHIFT);
+ else
+ s->order = calculate_order(size, slub_min_objects,
+ slub_max_order);
+
if (s->order < 0)
return 0;

+ if (s->order)
+ s->gfpflags |= __GFP_COMP;
+
+ if (s->flags & SLAB_VFALLBACK)
+ s->gfpflags |= __GFP_VFALLBACK;
+
+ if (s->flags & SLAB_CACHE_DMA)
+ s->flags |= SLUB_DMA;
+
/*
* Determine the number of objects per slab
*/
@@ -2477,7 +2510,7 @@ void kfree(const void *x)
if (ZERO_OR_NULL_PTR(x))
return;

- page = virt_to_head_page(x);
+ page = virt_to_slab(x);
s = page->slab;

slab_free(s, page, (void *)x, __builtin_return_address(0));
@@ -2806,7 +2839,7 @@ static int validate_slab(struct kmem_cac
unsigned long *map)
{
void *p;
- void *addr = page_address(page);
+ void *addr = slab_address(page);

if (!check_slab(s, page) ||
!on_freelist(s, page, NULL))
@@ -3048,7 +3081,7 @@ static int add_location(struct loc_track

cpu_set(track->cpu, l->cpus);
}
- node_set(page_to_nid(virt_to_page(track)), l->nodes);
+ node_set(page_to_nid(virt_to_slab(track)), l->nodes);
return 1;
}

@@ -3079,14 +3112,14 @@ static int add_location(struct loc_track
cpus_clear(l->cpus);
cpu_set(track->cpu, l->cpus);
nodes_clear(l->nodes);
- node_set(page_to_nid(virt_to_page(track)), l->nodes);
+ node_set(page_to_nid(virt_to_slab(track)), l->nodes);
return 1;
}

static void process_slab(struct loc_track *t, struct kmem_cache *s,
struct page *page, enum track_item alloc)
{
- void *addr = page_address(page);
+ void *addr = slab_address(page);
DECLARE_BITMAP(map, s->objects);
void *p;

Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h 2007-09-18 17:03:30.000000000 -0700
+++ linux-2.6/include/linux/slub_def.h 2007-09-18 17:07:39.000000000 -0700
@@ -31,6 +31,7 @@ struct kmem_cache {
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
int order;
+ int gfpflags; /* Allocation flags */

/*
* Avoid an extra cache line for UP, SMP and for the node local to

--


2007-09-28 14:14:17

by Nick Piggin

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Wednesday 19 September 2007 13:36, Christoph Lameter wrote:
> SLAB_VFALLBACK can be specified for selected slab caches. If fallback is
> available then the conservative settings for higher order allocations are
> overridden. We then request an order that can accomodate at mininum
> 100 objects. The size of an individual slab allocation is allowed to reach
> up to 256k (order 6 on i386, order 4 on IA64).

How come SLUB wants such a big amount of objects? I thought the
unqueued nature of it made it better than slab because it minimised
the amount of cache hot memory lying around in slabs...

vmalloc is incredibly slow and unscalable at the moment. I'm still working
on making it more scalable and faster -- hopefully to a point where it would
actually be usable for this... but you still get moved off large TLBs, and
also have to inevitably do tlb flushing.

Or do you have SLUB at a point where performance is comparable to SLAB,
and this is just a possible idea for more performance?

2007-09-28 17:33:38

by Christoph Lameter

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Fri, 28 Sep 2007, Nick Piggin wrote:

> On Wednesday 19 September 2007 13:36, Christoph Lameter wrote:
> > SLAB_VFALLBACK can be specified for selected slab caches. If fallback is
> > available then the conservative settings for higher order allocations are
> > overridden. We then request an order that can accomodate at mininum
> > 100 objects. The size of an individual slab allocation is allowed to reach
> > up to 256k (order 6 on i386, order 4 on IA64).
>
> How come SLUB wants such a big amount of objects? I thought the
> unqueued nature of it made it better than slab because it minimised
> the amount of cache hot memory lying around in slabs...

The more objects in a page the more the fast path runs. The more the fast
path runs the lower the cache footprint and the faster the overall
allocations etc.

SLAB can be configured for large queues holdings lots of objects.
SLUB can only reach the same through large pages because it does not
have queues. One could add the ability to manage pools of cpu slabs but
that would be adding yet another layer to compensate for the problem of
the small pages. Reliable large page allocations means that we can get rid
of these layers and the many workarounds that we have in place right now.

The unqueued nature of SLUB reduces memory requirements and in general the
more efficient code paths of SLUB offset the advantage that SLAB can reach
by being able to put more objects onto its queues. SLAB necessarily
introduces complexity and cache line use through the need to manage those
queues.

> vmalloc is incredibly slow and unscalable at the moment. I'm still working
> on making it more scalable and faster -- hopefully to a point where it would
> actually be usable for this... but you still get moved off large TLBs, and
> also have to inevitably do tlb flushing.

Again I have not seen any fallbacks to vmalloc in my testing. What we are
doing here is mainly to address your theoretical cases that we so far have
never seen to be a problem and increase the reliability of allocations of
page orders larger than 3 to a usable level. So far I have so far not
dared to enable orders larger than 3 by default.

AFAICT The performance of vmalloc is not really relevant. If this would
become an issue then it would be possible to reduce the orders used to
avoid fallbacks.

> Or do you have SLUB at a point where performance is comparable to SLAB,
> and this is just a possible idea for more performance?

AFAICT SLUBs performance is superior to SLAB in most cases and it was like
that from the beginning. I am still concerned about several corner cases
though (I think most of them are going to be addressed by the per cpu
patches in mm). Having a comparable or larger amount of per cpu objects as
SLAB is something that also could address some of these concerns and could
increase performance much further.

2007-09-28 17:59:30

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK


On Fri, 2007-09-28 at 10:33 -0700, Christoph Lameter wrote:

> Again I have not seen any fallbacks to vmalloc in my testing. What we are
> doing here is mainly to address your theoretical cases that we so far have
> never seen to be a problem and increase the reliability of allocations of
> page orders larger than 3 to a usable level. So far I have so far not
> dared to enable orders larger than 3 by default.

take a recent -mm kernel, boot with mem=128M.

start 2 processes that each mmap a separate 64M file, and which does
sequential writes on them. start a 3th process that does the same with
64M anonymous.

wait for a while, and you'll see order=1 failures.



2007-09-28 18:20:36

by Christoph Lameter

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Fri, 28 Sep 2007, Peter Zijlstra wrote:

>
> On Fri, 2007-09-28 at 10:33 -0700, Christoph Lameter wrote:
>
> > Again I have not seen any fallbacks to vmalloc in my testing. What we are
> > doing here is mainly to address your theoretical cases that we so far have
> > never seen to be a problem and increase the reliability of allocations of
> > page orders larger than 3 to a usable level. So far I have so far not
> > dared to enable orders larger than 3 by default.
>
> take a recent -mm kernel, boot with mem=128M.

Ok so only 32k pages to play with? I have tried parallel kernel compiles
with mem=256m and they seemed to be fine.

> start 2 processes that each mmap a separate 64M file, and which does
> sequential writes on them. start a 3th process that does the same with
> 64M anonymous.
>
> wait for a while, and you'll see order=1 failures.

Really? That means we can no longer even allocate stacks for forking.

Its surprising that neither lumpy reclaim nor the mobility patches can
deal with it? Lumpy reclaim should be able to free neighboring pages to
avoid the order 1 failure unless there are lots of pinned pages.

I guess then that lots of pages are pinned through I/O?

2007-09-28 18:30:30

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK


On Fri, 2007-09-28 at 11:20 -0700, Christoph Lameter wrote:

> > start 2 processes that each mmap a separate 64M file, and which does
> > sequential writes on them. start a 3th process that does the same with
> > 64M anonymous.
> >
> > wait for a while, and you'll see order=1 failures.
>
> Really? That means we can no longer even allocate stacks for forking.
>
> Its surprising that neither lumpy reclaim nor the mobility patches can
> deal with it? Lumpy reclaim should be able to free neighboring pages to
> avoid the order 1 failure unless there are lots of pinned pages.
>
> I guess then that lots of pages are pinned through I/O?

memory got massively fragemented, as anti-frag gets easily defeated.
setting min_free_kbytes to 12M does seem to solve it - it forces 2 max
order blocks to stay available, so we don't mix types. however 12M on
128M is rather a lot.

its still on my todo list to look at it further..

2007-09-28 18:41:32

by Christoph Lameter

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Fri, 28 Sep 2007, Peter Zijlstra wrote:

> memory got massively fragemented, as anti-frag gets easily defeated.
> setting min_free_kbytes to 12M does seem to solve it - it forces 2 max
> order blocks to stay available, so we don't mix types. however 12M on
> 128M is rather a lot.

Yes, strict ordering would be much better. On NUMA it may be possible to
completely forbid merging. We can fall back to other nodes if necessary.
12M is not much on a NUMA system.

But this shows that (unsurprisingly) we may have issues on systems with a
small amounts of memory and we may not want to use higher orders on such
systems.

The case you got may be good to use as a testcase for the virtual
fallback. Hmmmm... Maybe it is possible to allocate the stack as a virtual
compound page. Got some script/code to produce that problem?

2007-09-28 21:00:09

by mel

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On (28/09/07 20:25), Peter Zijlstra didst pronounce:
>
> On Fri, 2007-09-28 at 11:20 -0700, Christoph Lameter wrote:
>
> > > start 2 processes that each mmap a separate 64M file, and which does
> > > sequential writes on them. start a 3th process that does the same with
> > > 64M anonymous.
> > >
> > > wait for a while, and you'll see order=1 failures.
> >
> > Really? That means we can no longer even allocate stacks for forking.
> >
> > Its surprising that neither lumpy reclaim nor the mobility patches can
> > deal with it? Lumpy reclaim should be able to free neighboring pages to
> > avoid the order 1 failure unless there are lots of pinned pages.
> >
> > I guess then that lots of pages are pinned through I/O?
>
> memory got massively fragemented, as anti-frag gets easily defeated.
> setting min_free_kbytes to 12M does seem to solve it - it forces 2 max

The 12MB is related to the size of pageblock_order. I strongly suspect
that if you forced pageblock_order to be something like 4 or 5, the
min_free_kbytes would not need to be raised. The current values are
selected based on the hugepage size.

> order blocks to stay available, so we don't mix types. however 12M on
> 128M is rather a lot.
>
> its still on my todo list to look at it further..
>

--
--
Mel Gorman
Part-time Phd Student Linux Technology Center
University of Limerick IBM Dublin Software Lab

2007-09-28 21:05:39

by mel

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On (28/09/07 10:33), Christoph Lameter didst pronounce:
> On Fri, 28 Sep 2007, Nick Piggin wrote:
>
> > On Wednesday 19 September 2007 13:36, Christoph Lameter wrote:
> > > SLAB_VFALLBACK can be specified for selected slab caches. If fallback is
> > > available then the conservative settings for higher order allocations are
> > > overridden. We then request an order that can accomodate at mininum
> > > 100 objects. The size of an individual slab allocation is allowed to reach
> > > up to 256k (order 6 on i386, order 4 on IA64).
> >
> > How come SLUB wants such a big amount of objects? I thought the
> > unqueued nature of it made it better than slab because it minimised
> > the amount of cache hot memory lying around in slabs...
>
> The more objects in a page the more the fast path runs. The more the fast
> path runs the lower the cache footprint and the faster the overall
> allocations etc.
>
> SLAB can be configured for large queues holdings lots of objects.
> SLUB can only reach the same through large pages because it does not
> have queues.

Large pages, flood gates etc. Be wary.

SLUB has to run 100% reliable or things go whoops. SLUB regularly depends on
atomic allocations and cannot take the necessary steps to get the contiguous
pages if it gets into trouble. This means that something like lumpy reclaim
cannot help you in it's current state.

We currently do not take the per-emptive steps with kswapd to ensure the
high-order pages are free. We also don't do something like have users that
can sleep keep the watermarks high. I had considered the possibility but
didn't have the justification for the complexity.

Minimally, SLUB by default should continue to use order-0 pages. Peter has
managed to bust order-1 pages with mem=128MB. Admittedly, it was a really
hostile workload but the point remains. It was artifically worked around
with min_free_kbytes (value set based on pageblock_order, could also have
been artifically worked around by dropping pageblock_order) and he eventually
caused order-0 failures so the workload is pretty damn hostile to everything.

> One could add the ability to manage pools of cpu slabs but
> that would be adding yet another layer to compensate for the problem of
> the small pages.

A compromise may be to have per-cpu lists for higher-order pages in the page
allocator itself as they can be easily drained unlike the SLAB queues. The
thing to watch for would be excessive IPI calls which would offset any
performance gained by SLUB using larger pages.

> Reliable large page allocations means that we can get rid
> of these layers and the many workarounds that we have in place right now.
>

They are not reliable yet, particularly for atomic allocs.

> The unqueued nature of SLUB reduces memory requirements and in general the
> more efficient code paths of SLUB offset the advantage that SLAB can reach
> by being able to put more objects onto its queues. SLAB necessarily
> introduces complexity and cache line use through the need to manage those
> queues.
>
> > vmalloc is incredibly slow and unscalable at the moment. I'm still working
> > on making it more scalable and faster -- hopefully to a point where it would
> > actually be usable for this... but you still get moved off large TLBs, and
> > also have to inevitably do tlb flushing.
>
> Again I have not seen any fallbacks to vmalloc in my testing. What we are
> doing here is mainly to address your theoretical cases that we so far have
> never seen to be a problem and increase the reliability of allocations of
> page orders larger than 3 to a usable level. So far I have so far not
> dared to enable orders larger than 3 by default.
>
> AFAICT The performance of vmalloc is not really relevant. If this would
> become an issue then it would be possible to reduce the orders used to
> avoid fallbacks.
>

If we're falling back to vmalloc ever, there is a danger that the
problem is postponed until vmalloc space is consumed. More an issue for
32 bit.

> > Or do you have SLUB at a point where performance is comparable to SLAB,
> > and this is just a possible idea for more performance?
>
> AFAICT SLUBs performance is superior to SLAB in most cases and it was like
> that from the beginning. I am still concerned about several corner cases
> though (I think most of them are going to be addressed by the per cpu
> patches in mm). Having a comparable or larger amount of per cpu objects as
> SLAB is something that also could address some of these concerns and could
> increase performance much further.
>

--
--
Mel Gorman
Part-time Phd Student Linux Technology Center
University of Limerick IBM Dublin Software Lab

2007-09-28 21:14:57

by mel

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On (28/09/07 11:41), Christoph Lameter didst pronounce:
> On Fri, 28 Sep 2007, Peter Zijlstra wrote:
>
> > memory got massively fragemented, as anti-frag gets easily defeated.
> > setting min_free_kbytes to 12M does seem to solve it - it forces 2 max
> > order blocks to stay available, so we don't mix types. however 12M on
> > 128M is rather a lot.
>
> Yes, strict ordering would be much better. On NUMA it may be possible to
> completely forbid merging.

The forbidding of merging is trivial and the code is isolated to one function
__rmqueue_fallback(). We don't do it because the decision at development
time was that it was better to allow fragmentation than take a reclaim step
for example[1] and slow things up. This is based on my initial assumption
of anti-frag being mainly of interest to hugepages which are happy to wait
long periods during startup or fail.

> We can fall back to other nodes if necessary.
> 12M is not much on a NUMA system.
>
> But this shows that (unsurprisingly) we may have issues on systems with a
> small amounts of memory and we may not want to use higher orders on such
> systems.
>

This is another option if you want to use a higher order for SLUB by
default. Use order-0 unless you are sure there is enough memory. At boot
if there is loads of memory, set the higher order and up min_free_kbytes on
each node to reduce mixing[2]. We can test with Peters uber-hostile
case to see if it works[3].

> The case you got may be good to use as a testcase for the virtual
> fallback. Hmmmm...

For sure.

> Maybe it is possible to allocate the stack as a virtual
> compound page. Got some script/code to produce that problem?
>

[1] It might be tunnel vision but I still keep hugepages in mind as the
principal user of anti-frag. Andy used to have patches that force evicted
pages of the "foreign" type when mixing occured so the end result was
no mixing. We never fully completed them because it was too costly
for hugepages.

[2] This would require the identification of mixed blocks to be a
statistic available in mainline. Right now, it's only available in -mm
when PAGE_OWNER is set

[3] The definition of working in this case being that order-0
allocations fail which he has produced

--
Mel Gorman
Part-time Phd Student Linux Technology Center
University of Limerick IBM Dublin Software Lab

2007-09-28 21:46:32

by Nick Piggin

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Saturday 29 September 2007 03:33, Christoph Lameter wrote:
> On Fri, 28 Sep 2007, Nick Piggin wrote:
> > On Wednesday 19 September 2007 13:36, Christoph Lameter wrote:
> > > SLAB_VFALLBACK can be specified for selected slab caches. If fallback
> > > is available then the conservative settings for higher order
> > > allocations are overridden. We then request an order that can
> > > accomodate at mininum 100 objects. The size of an individual slab
> > > allocation is allowed to reach up to 256k (order 6 on i386, order 4 on
> > > IA64).
> >
> > How come SLUB wants such a big amount of objects? I thought the
> > unqueued nature of it made it better than slab because it minimised
> > the amount of cache hot memory lying around in slabs...
>
> The more objects in a page the more the fast path runs. The more the fast
> path runs the lower the cache footprint and the faster the overall
> allocations etc.
>
> SLAB can be configured for large queues holdings lots of objects.
> SLUB can only reach the same through large pages because it does not
> have queues. One could add the ability to manage pools of cpu slabs but
> that would be adding yet another layer to compensate for the problem of
> the small pages. Reliable large page allocations means that we can get rid
> of these layers and the many workarounds that we have in place right now.

That doesn't sound very nice because you don't actually want to use up
higher order allocations if you can avoid it, and you definitely don't want
to be increasing your slab page size unit if you can help it, because it
compounds the problem of slab fragmentation.


> The unqueued nature of SLUB reduces memory requirements and in general the
> more efficient code paths of SLUB offset the advantage that SLAB can reach
> by being able to put more objects onto its queues.
> introduces complexity and cache line use through the need to manage those
> queues.

I thought it was slower. Have you fixed the performance regression?
(OK, I read further down that you are still working on it but not confirmed
yet...)


> > vmalloc is incredibly slow and unscalable at the moment. I'm still
> > working on making it more scalable and faster -- hopefully to a point
> > where it would actually be usable for this... but you still get moved off
> > large TLBs, and also have to inevitably do tlb flushing.
>
> Again I have not seen any fallbacks to vmalloc in my testing. What we are
> doing here is mainly to address your theoretical cases that we so far have
> never seen to be a problem and increase the reliability of allocations of
> page orders larger than 3 to a usable level. So far I have so far not
> dared to enable orders larger than 3 by default.

Basically, all that shows is that your testing isn't very thorough. 128MB
is an order of magnitude *more* memory than some users have. They
probably wouldn't be happy with a regression in slab allocator performance
either.


> > Or do you have SLUB at a point where performance is comparable to SLAB,
> > and this is just a possible idea for more performance?
>
> AFAICT SLUBs performance is superior to SLAB in most cases and it was like
> that from the beginning. I am still concerned about several corner cases
> though (I think most of them are going to be addressed by the per cpu
> patches in mm). Having a comparable or larger amount of per cpu objects as
> SLAB is something that also could address some of these concerns and could
> increase performance much further.

OK, so long as it isn't going to depend on using higher order pages, that's
fine. (if they help even further as an optional thing, that's fine too. You
can turn them on your huge systems and not even bother about adding
this vmap fallback -- you won't have me to nag you about these
purely theoretical issues).

2007-09-29 08:14:23

by Andrew Morton

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Fri, 28 Sep 2007 20:25:50 +0200 Peter Zijlstra <[email protected]> wrote:

>
> On Fri, 2007-09-28 at 11:20 -0700, Christoph Lameter wrote:
>
> > > start 2 processes that each mmap a separate 64M file, and which does
> > > sequential writes on them. start a 3th process that does the same with
> > > 64M anonymous.
> > >
> > > wait for a while, and you'll see order=1 failures.
> >
> > Really? That means we can no longer even allocate stacks for forking.
> >
> > Its surprising that neither lumpy reclaim nor the mobility patches can
> > deal with it? Lumpy reclaim should be able to free neighboring pages to
> > avoid the order 1 failure unless there are lots of pinned pages.
> >
> > I guess then that lots of pages are pinned through I/O?
>
> memory got massively fragemented, as anti-frag gets easily defeated.
> setting min_free_kbytes to 12M does seem to solve it - it forces 2 max
> order blocks to stay available, so we don't mix types. however 12M on
> 128M is rather a lot.
>
> its still on my todo list to look at it further..
>

That would be really really bad (as in: patch-dropping time) if those
order-1 allocations are not atomic.

What's the callsite?

2007-09-29 08:50:13

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK


On Fri, 2007-09-28 at 11:20 -0700, Christoph Lameter wrote:

> Really? That means we can no longer even allocate stacks for forking.

I think I'm running with 4k stacks...

2007-09-29 08:51:32

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK


On Sat, 2007-09-29 at 01:13 -0700, Andrew Morton wrote:
> On Fri, 28 Sep 2007 20:25:50 +0200 Peter Zijlstra <[email protected]> wrote:
>
> >
> > On Fri, 2007-09-28 at 11:20 -0700, Christoph Lameter wrote:
> >
> > > > start 2 processes that each mmap a separate 64M file, and which does
> > > > sequential writes on them. start a 3th process that does the same with
> > > > 64M anonymous.
> > > >
> > > > wait for a while, and you'll see order=1 failures.
> > >
> > > Really? That means we can no longer even allocate stacks for forking.
> > >
> > > Its surprising that neither lumpy reclaim nor the mobility patches can
> > > deal with it? Lumpy reclaim should be able to free neighboring pages to
> > > avoid the order 1 failure unless there are lots of pinned pages.
> > >
> > > I guess then that lots of pages are pinned through I/O?
> >
> > memory got massively fragemented, as anti-frag gets easily defeated.
> > setting min_free_kbytes to 12M does seem to solve it - it forces 2 max
> > order blocks to stay available, so we don't mix types. however 12M on
> > 128M is rather a lot.
> >
> > its still on my todo list to look at it further..
> >
>
> That would be really really bad (as in: patch-dropping time) if those
> order-1 allocations are not atomic.
>
> What's the callsite?

Ah, right, that was the detail... all this lumpy reclaim is useless for
atomic allocations. And with SLUB using higher order pages, atomic !0
order allocations will be very very common.

One I can remember was:

add_to_page_cache()
radix_tree_insert()
radix_tree_node_alloc()
kmem_cache_alloc()

which is an atomic callsite.

Which leaves us in a situation where we can load pages, because there is
free memory, but can't manage to allocate memory to track them..

2007-09-29 08:58:28

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK


On Sat, 2007-09-29 at 10:47 +0200, Peter Zijlstra wrote:

> Ah, right, that was the detail... all this lumpy reclaim is useless for
> atomic allocations. And with SLUB using higher order pages, atomic !0
> order allocations will be very very common.
>
> One I can remember was:
>
> add_to_page_cache()
> radix_tree_insert()
> radix_tree_node_alloc()
> kmem_cache_alloc()
>
> which is an atomic callsite.
>
> Which leaves us in a situation where we can load pages, because there is
> free memory, but can't manage to allocate memory to track them..

Ah, I found a boot log of one of these sessions, its also full of
order-2 OOMs.. :-/

2007-09-29 09:01:56

by Andrew Morton

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Sat, 29 Sep 2007 10:47:12 +0200 Peter Zijlstra <[email protected]> wrote:

>
> On Sat, 2007-09-29 at 01:13 -0700, Andrew Morton wrote:
> > On Fri, 28 Sep 2007 20:25:50 +0200 Peter Zijlstra <[email protected]> wrote:
> >
> > >
> > > On Fri, 2007-09-28 at 11:20 -0700, Christoph Lameter wrote:
> > >
> > > > > start 2 processes that each mmap a separate 64M file, and which does
> > > > > sequential writes on them. start a 3th process that does the same with
> > > > > 64M anonymous.
> > > > >
> > > > > wait for a while, and you'll see order=1 failures.
> > > >
> > > > Really? That means we can no longer even allocate stacks for forking.
> > > >
> > > > Its surprising that neither lumpy reclaim nor the mobility patches can
> > > > deal with it? Lumpy reclaim should be able to free neighboring pages to
> > > > avoid the order 1 failure unless there are lots of pinned pages.
> > > >
> > > > I guess then that lots of pages are pinned through I/O?
> > >
> > > memory got massively fragemented, as anti-frag gets easily defeated.
> > > setting min_free_kbytes to 12M does seem to solve it - it forces 2 max
> > > order blocks to stay available, so we don't mix types. however 12M on
> > > 128M is rather a lot.
> > >
> > > its still on my todo list to look at it further..
> > >
> >
> > That would be really really bad (as in: patch-dropping time) if those
> > order-1 allocations are not atomic.
> >
> > What's the callsite?
>
> Ah, right, that was the detail... all this lumpy reclaim is useless for
> atomic allocations. And with SLUB using higher order pages, atomic !0
> order allocations will be very very common.

Oh OK.

I thought we'd already fixed slub so that it didn't do that. Maybe that
fix is in -mm but I don't think so.

Trying to do atomic order-1 allocations on behalf of arbitray slab caches
just won't fly - this is a significant degradation in kernel reliability,
as you've very easily demonstrated.

> One I can remember was:
>
> add_to_page_cache()
> radix_tree_insert()
> radix_tree_node_alloc()
> kmem_cache_alloc()
>
> which is an atomic callsite.
>
> Which leaves us in a situation where we can load pages, because there is
> free memory, but can't manage to allocate memory to track them..

Right. Leading to application failure which for many is equivalent to a
complete system outage.

2007-09-29 09:02:20

by Andrew Morton

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Sat, 29 Sep 2007 10:53:41 +0200 Peter Zijlstra <[email protected]> wrote:

>
> On Sat, 2007-09-29 at 10:47 +0200, Peter Zijlstra wrote:
>
> > Ah, right, that was the detail... all this lumpy reclaim is useless for
> > atomic allocations. And with SLUB using higher order pages, atomic !0
> > order allocations will be very very common.
> >
> > One I can remember was:
> >
> > add_to_page_cache()
> > radix_tree_insert()
> > radix_tree_node_alloc()
> > kmem_cache_alloc()
> >
> > which is an atomic callsite.
> >
> > Which leaves us in a situation where we can load pages, because there is
> > free memory, but can't manage to allocate memory to track them..
>
> Ah, I found a boot log of one of these sessions, its also full of
> order-2 OOMs.. :-/

oom-killings, or page allocation failures? The latter, one hopes.

2007-09-29 09:18:54

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK


On Sat, 2007-09-29 at 02:01 -0700, Andrew Morton wrote:
> On Sat, 29 Sep 2007 10:53:41 +0200 Peter Zijlstra <[email protected]> wrote:
>
> >
> > On Sat, 2007-09-29 at 10:47 +0200, Peter Zijlstra wrote:
> >
> > > Ah, right, that was the detail... all this lumpy reclaim is useless for
> > > atomic allocations. And with SLUB using higher order pages, atomic !0
> > > order allocations will be very very common.
> > >
> > > One I can remember was:
> > >
> > > add_to_page_cache()
> > > radix_tree_insert()
> > > radix_tree_node_alloc()
> > > kmem_cache_alloc()
> > >
> > > which is an atomic callsite.
> > >
> > > Which leaves us in a situation where we can load pages, because there is
> > > free memory, but can't manage to allocate memory to track them..
> >
> > Ah, I found a boot log of one of these sessions, its also full of
> > order-2 OOMs.. :-/
>
> oom-killings, or page allocation failures? The latter, one hopes.


Linux version 2.6.23-rc4-mm1-dirty (root@dyad) (gcc version 4.1.2 (Ubuntu 4.1.2-0ubuntu4)) #27 Tue Sep 18 15:40:35 CEST 2007

...


mm_tester invoked oom-killer: gfp_mask=0x40d0, order=2, oomkilladj=0
Call Trace:
611b3878: [<6002dd28>] printk_ratelimit+0x15/0x17
611b3888: [<60052ed4>] out_of_memory+0x80/0x100
611b38c8: [<60054b0c>] __alloc_pages+0x1ed/0x280
611b3948: [<6006c608>] allocate_slab+0x5b/0xb0
611b3968: [<6006c705>] new_slab+0x7e/0x183
611b39a8: [<6006cbae>] __slab_alloc+0xc9/0x14b
611b39b0: [<6011f89f>] radix_tree_preload+0x70/0xbf
611b39b8: [<600980f2>] do_mpage_readpage+0x3b3/0x472
611b39e0: [<6011f89f>] radix_tree_preload+0x70/0xbf
611b39f8: [<6006cc81>] kmem_cache_alloc+0x51/0x98
611b3a38: [<6011f89f>] radix_tree_preload+0x70/0xbf
611b3a58: [<6004f8e2>] add_to_page_cache+0x22/0xf7
611b3a98: [<6004f9c6>] add_to_page_cache_lru+0xf/0x24
611b3ab8: [<6009821e>] mpage_readpages+0x6d/0x109
611b3ac0: [<600d59f0>] ext3_get_block+0x0/0xf2
611b3b08: [<6005483d>] get_page_from_freelist+0x8d/0xc1
611b3b88: [<600d6937>] ext3_readpages+0x18/0x1a
611b3b98: [<60056f00>] read_pages+0x37/0x9b
611b3bd8: [<60057064>] __do_page_cache_readahead+0x100/0x157
611b3c48: [<60057196>] do_page_cache_readahead+0x52/0x5f
611b3c78: [<60050ab4>] filemap_fault+0x145/0x278
611b3ca8: [<60022b61>] run_syscall_stub+0xd1/0xdd
611b3ce8: [<6005eae3>] __do_fault+0x7e/0x3ca
611b3d68: [<6005ee60>] do_linear_fault+0x31/0x33
611b3d88: [<6005f149>] handle_mm_fault+0x14e/0x246
611b3da8: [<60120a7b>] __up_read+0x73/0x7b
611b3de8: [<60013177>] handle_page_fault+0x11f/0x23b
611b3e48: [<60013419>] segv+0xac/0x297
611b3f28: [<60013367>] segv_handler+0x68/0x6e
611b3f48: [<600232ad>] get_skas_faultinfo+0x9c/0xa1
611b3f68: [<60023853>] userspace+0x13a/0x19d
611b3fc8: [<60010d58>] fork_handler+0x86/0x8d

Mem-info:
Normal per-cpu:
CPU 0: Hot: hi: 42, btch: 7 usd: 0 Cold: hi: 14, btch: 3 usd: 0
Active:11 inactive:9 dirty:0 writeback:1 unstable:0
free:19533 slab:10587 mapped:0 pagetables:260 bounce:0
Normal free:78132kB min:4096kB low:5120kB high:6144kB active:44kB inactive:36kB present:129280kB pages_scanned:0 all_unreclaimable? no
lowmem_reserve[]: 0 0
Normal: 7503*4kB 5977*8kB 19*16kB 0*32kB 0*64kB 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 78132kB
Swap cache: add 1192822, delete 1192790, find 491441/626861, race 0+1
Free swap = 455300kB
Total swap = 524280kB
Free swap: 455300kB
32768 pages of RAM
0 pages of HIGHMEM
1948 reserved pages
11 pages shared
32 pages swap cached
Out of memory: kill process 2647 (portmap) score 2233 or a child
Killed process 2647 (portmap)


2007-09-29 09:33:34

by Andrew Morton

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Sat, 29 Sep 2007 11:14:02 +0200 Peter Zijlstra <[email protected]> wrote:

> > oom-killings, or page allocation failures? The latter, one hopes.
>
>
> Linux version 2.6.23-rc4-mm1-dirty (root@dyad) (gcc version 4.1.2 (Ubuntu 4.1.2-0ubuntu4)) #27 Tue Sep 18 15:40:35 CEST 2007
>
> ...
>
>
> mm_tester invoked oom-killer: gfp_mask=0x40d0, order=2, oomkilladj=0
> Call Trace:
> 611b3878: [<6002dd28>] printk_ratelimit+0x15/0x17
> 611b3888: [<60052ed4>] out_of_memory+0x80/0x100
> 611b38c8: [<60054b0c>] __alloc_pages+0x1ed/0x280
> 611b3948: [<6006c608>] allocate_slab+0x5b/0xb0
> 611b3968: [<6006c705>] new_slab+0x7e/0x183
> 611b39a8: [<6006cbae>] __slab_alloc+0xc9/0x14b
> 611b39b0: [<6011f89f>] radix_tree_preload+0x70/0xbf
> 611b39b8: [<600980f2>] do_mpage_readpage+0x3b3/0x472
> 611b39e0: [<6011f89f>] radix_tree_preload+0x70/0xbf
> 611b39f8: [<6006cc81>] kmem_cache_alloc+0x51/0x98
> 611b3a38: [<6011f89f>] radix_tree_preload+0x70/0xbf
> 611b3a58: [<6004f8e2>] add_to_page_cache+0x22/0xf7
> 611b3a98: [<6004f9c6>] add_to_page_cache_lru+0xf/0x24
> 611b3ab8: [<6009821e>] mpage_readpages+0x6d/0x109
> 611b3ac0: [<600d59f0>] ext3_get_block+0x0/0xf2
> 611b3b08: [<6005483d>] get_page_from_freelist+0x8d/0xc1
> 611b3b88: [<600d6937>] ext3_readpages+0x18/0x1a
> 611b3b98: [<60056f00>] read_pages+0x37/0x9b
> 611b3bd8: [<60057064>] __do_page_cache_readahead+0x100/0x157
> 611b3c48: [<60057196>] do_page_cache_readahead+0x52/0x5f
> 611b3c78: [<60050ab4>] filemap_fault+0x145/0x278
> 611b3ca8: [<60022b61>] run_syscall_stub+0xd1/0xdd
> 611b3ce8: [<6005eae3>] __do_fault+0x7e/0x3ca
> 611b3d68: [<6005ee60>] do_linear_fault+0x31/0x33
> 611b3d88: [<6005f149>] handle_mm_fault+0x14e/0x246
> 611b3da8: [<60120a7b>] __up_read+0x73/0x7b
> 611b3de8: [<60013177>] handle_page_fault+0x11f/0x23b
> 611b3e48: [<60013419>] segv+0xac/0x297
> 611b3f28: [<60013367>] segv_handler+0x68/0x6e
> 611b3f48: [<600232ad>] get_skas_faultinfo+0x9c/0xa1
> 611b3f68: [<60023853>] userspace+0x13a/0x19d
> 611b3fc8: [<60010d58>] fork_handler+0x86/0x8d

OK, that's different. Someone broke the vm - order-2 GFP_KERNEL
allocations aren't supposed to fail.

I'm suspecting that did_some_progress thing.

2007-09-29 12:51:01

by Nick Piggin

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Saturday 29 September 2007 19:27, Andrew Morton wrote:
> On Sat, 29 Sep 2007 11:14:02 +0200 Peter Zijlstra <[email protected]>
wrote:
> > > oom-killings, or page allocation failures? The latter, one hopes.
> >
> > Linux version 2.6.23-rc4-mm1-dirty (root@dyad) (gcc version 4.1.2 (Ubuntu
> > 4.1.2-0ubuntu4)) #27 Tue Sep 18 15:40:35 CEST 2007
> >
> > ...
> >
> >
> > mm_tester invoked oom-killer: gfp_mask=0x40d0, order=2, oomkilladj=0
> > Call Trace:
> > 611b3878: [<6002dd28>] printk_ratelimit+0x15/0x17
> > 611b3888: [<60052ed4>] out_of_memory+0x80/0x100
> > 611b38c8: [<60054b0c>] __alloc_pages+0x1ed/0x280
> > 611b3948: [<6006c608>] allocate_slab+0x5b/0xb0
> > 611b3968: [<6006c705>] new_slab+0x7e/0x183
> > 611b39a8: [<6006cbae>] __slab_alloc+0xc9/0x14b
> > 611b39b0: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > 611b39b8: [<600980f2>] do_mpage_readpage+0x3b3/0x472
> > 611b39e0: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > 611b39f8: [<6006cc81>] kmem_cache_alloc+0x51/0x98
> > 611b3a38: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > 611b3a58: [<6004f8e2>] add_to_page_cache+0x22/0xf7
> > 611b3a98: [<6004f9c6>] add_to_page_cache_lru+0xf/0x24
> > 611b3ab8: [<6009821e>] mpage_readpages+0x6d/0x109
> > 611b3ac0: [<600d59f0>] ext3_get_block+0x0/0xf2
> > 611b3b08: [<6005483d>] get_page_from_freelist+0x8d/0xc1
> > 611b3b88: [<600d6937>] ext3_readpages+0x18/0x1a
> > 611b3b98: [<60056f00>] read_pages+0x37/0x9b
> > 611b3bd8: [<60057064>] __do_page_cache_readahead+0x100/0x157
> > 611b3c48: [<60057196>] do_page_cache_readahead+0x52/0x5f
> > 611b3c78: [<60050ab4>] filemap_fault+0x145/0x278
> > 611b3ca8: [<60022b61>] run_syscall_stub+0xd1/0xdd
> > 611b3ce8: [<6005eae3>] __do_fault+0x7e/0x3ca
> > 611b3d68: [<6005ee60>] do_linear_fault+0x31/0x33
> > 611b3d88: [<6005f149>] handle_mm_fault+0x14e/0x246
> > 611b3da8: [<60120a7b>] __up_read+0x73/0x7b
> > 611b3de8: [<60013177>] handle_page_fault+0x11f/0x23b
> > 611b3e48: [<60013419>] segv+0xac/0x297
> > 611b3f28: [<60013367>] segv_handler+0x68/0x6e
> > 611b3f48: [<600232ad>] get_skas_faultinfo+0x9c/0xa1
> > 611b3f68: [<60023853>] userspace+0x13a/0x19d
> > 611b3fc8: [<60010d58>] fork_handler+0x86/0x8d
>
> OK, that's different. Someone broke the vm - order-2 GFP_KERNEL
> allocations aren't supposed to fail.
>
> I'm suspecting that did_some_progress thing.

The allocation didn't fail -- it invoked the OOM killer because the kernel
ran out of unfragmented memory. Probably because higher order
allocations are the new vogue in -mm at the moment ;)

2007-09-29 12:53:40

by Nick Piggin

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Saturday 29 September 2007 04:41, Christoph Lameter wrote:
> On Fri, 28 Sep 2007, Peter Zijlstra wrote:
> > memory got massively fragemented, as anti-frag gets easily defeated.
> > setting min_free_kbytes to 12M does seem to solve it - it forces 2 max
> > order blocks to stay available, so we don't mix types. however 12M on
> > 128M is rather a lot.
>
> Yes, strict ordering would be much better. On NUMA it may be possible to
> completely forbid merging. We can fall back to other nodes if necessary.
> 12M is not much on a NUMA system.
>
> But this shows that (unsurprisingly) we may have issues on systems with a
> small amounts of memory and we may not want to use higher orders on such
> systems.
>
> The case you got may be good to use as a testcase for the virtual
> fallback. Hmmmm... Maybe it is possible to allocate the stack as a virtual
> compound page. Got some script/code to produce that problem?

Yeah, you could do that, but we generally don't have big problems allocating
stacks in mainline, because we have very few users of higher order pages,
the few that are there don't seem to be a problem.

2007-09-29 19:21:16

by Andrew Morton

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Sat, 29 Sep 2007 06:19:33 +1000 Nick Piggin <[email protected]> wrote:

> On Saturday 29 September 2007 19:27, Andrew Morton wrote:
> > On Sat, 29 Sep 2007 11:14:02 +0200 Peter Zijlstra <[email protected]>
> wrote:
> > > > oom-killings, or page allocation failures? The latter, one hopes.
> > >
> > > Linux version 2.6.23-rc4-mm1-dirty (root@dyad) (gcc version 4.1.2 (Ubuntu
> > > 4.1.2-0ubuntu4)) #27 Tue Sep 18 15:40:35 CEST 2007
> > >
> > > ...
> > >
> > >
> > > mm_tester invoked oom-killer: gfp_mask=0x40d0, order=2, oomkilladj=0
> > > Call Trace:
> > > 611b3878: [<6002dd28>] printk_ratelimit+0x15/0x17
> > > 611b3888: [<60052ed4>] out_of_memory+0x80/0x100
> > > 611b38c8: [<60054b0c>] __alloc_pages+0x1ed/0x280
> > > 611b3948: [<6006c608>] allocate_slab+0x5b/0xb0
> > > 611b3968: [<6006c705>] new_slab+0x7e/0x183
> > > 611b39a8: [<6006cbae>] __slab_alloc+0xc9/0x14b
> > > 611b39b0: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > > 611b39b8: [<600980f2>] do_mpage_readpage+0x3b3/0x472
> > > 611b39e0: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > > 611b39f8: [<6006cc81>] kmem_cache_alloc+0x51/0x98
> > > 611b3a38: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > > 611b3a58: [<6004f8e2>] add_to_page_cache+0x22/0xf7
> > > 611b3a98: [<6004f9c6>] add_to_page_cache_lru+0xf/0x24
> > > 611b3ab8: [<6009821e>] mpage_readpages+0x6d/0x109
> > > 611b3ac0: [<600d59f0>] ext3_get_block+0x0/0xf2
> > > 611b3b08: [<6005483d>] get_page_from_freelist+0x8d/0xc1
> > > 611b3b88: [<600d6937>] ext3_readpages+0x18/0x1a
> > > 611b3b98: [<60056f00>] read_pages+0x37/0x9b
> > > 611b3bd8: [<60057064>] __do_page_cache_readahead+0x100/0x157
> > > 611b3c48: [<60057196>] do_page_cache_readahead+0x52/0x5f
> > > 611b3c78: [<60050ab4>] filemap_fault+0x145/0x278
> > > 611b3ca8: [<60022b61>] run_syscall_stub+0xd1/0xdd
> > > 611b3ce8: [<6005eae3>] __do_fault+0x7e/0x3ca
> > > 611b3d68: [<6005ee60>] do_linear_fault+0x31/0x33
> > > 611b3d88: [<6005f149>] handle_mm_fault+0x14e/0x246
> > > 611b3da8: [<60120a7b>] __up_read+0x73/0x7b
> > > 611b3de8: [<60013177>] handle_page_fault+0x11f/0x23b
> > > 611b3e48: [<60013419>] segv+0xac/0x297
> > > 611b3f28: [<60013367>] segv_handler+0x68/0x6e
> > > 611b3f48: [<600232ad>] get_skas_faultinfo+0x9c/0xa1
> > > 611b3f68: [<60023853>] userspace+0x13a/0x19d
> > > 611b3fc8: [<60010d58>] fork_handler+0x86/0x8d
> >
> > OK, that's different. Someone broke the vm - order-2 GFP_KERNEL
> > allocations aren't supposed to fail.
> >
> > I'm suspecting that did_some_progress thing.
>
> The allocation didn't fail -- it invoked the OOM killer because the kernel
> ran out of unfragmented memory.

We can't "run out of unfragmented memory" for an order-2 GFP_KERNEL
allocation in this workload. We go and synchronously free stuff up to make
it work.

How did this get broken?

> Probably because higher order
> allocations are the new vogue in -mm at the moment ;)

That's a different bug.

bug 1: We shouldn't be doing higher-order allocations in slub because of
the considerable damage this does to atomic allocations.

bug 2: order-2 GFP_KERNEL allocations shouldn't fail like this.


2007-09-30 11:41:04

by Nick Piggin

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Sunday 30 September 2007 05:20, Andrew Morton wrote:
> On Sat, 29 Sep 2007 06:19:33 +1000 Nick Piggin <[email protected]>
wrote:
> > On Saturday 29 September 2007 19:27, Andrew Morton wrote:
> > > On Sat, 29 Sep 2007 11:14:02 +0200 Peter Zijlstra
> > > <[email protected]>
> >
> > wrote:
> > > > > oom-killings, or page allocation failures? The latter, one hopes.
> > > >
> > > > Linux version 2.6.23-rc4-mm1-dirty (root@dyad) (gcc version 4.1.2
> > > > (Ubuntu 4.1.2-0ubuntu4)) #27 Tue Sep 18 15:40:35 CEST 2007
> > > >
> > > > ...
> > > >
> > > >
> > > > mm_tester invoked oom-killer: gfp_mask=0x40d0, order=2, oomkilladj=0
> > > > Call Trace:
> > > > 611b3878: [<6002dd28>] printk_ratelimit+0x15/0x17
> > > > 611b3888: [<60052ed4>] out_of_memory+0x80/0x100
> > > > 611b38c8: [<60054b0c>] __alloc_pages+0x1ed/0x280
> > > > 611b3948: [<6006c608>] allocate_slab+0x5b/0xb0
> > > > 611b3968: [<6006c705>] new_slab+0x7e/0x183
> > > > 611b39a8: [<6006cbae>] __slab_alloc+0xc9/0x14b
> > > > 611b39b0: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > > > 611b39b8: [<600980f2>] do_mpage_readpage+0x3b3/0x472
> > > > 611b39e0: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > > > 611b39f8: [<6006cc81>] kmem_cache_alloc+0x51/0x98
> > > > 611b3a38: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > > > 611b3a58: [<6004f8e2>] add_to_page_cache+0x22/0xf7
> > > > 611b3a98: [<6004f9c6>] add_to_page_cache_lru+0xf/0x24
> > > > 611b3ab8: [<6009821e>] mpage_readpages+0x6d/0x109
> > > > 611b3ac0: [<600d59f0>] ext3_get_block+0x0/0xf2
> > > > 611b3b08: [<6005483d>] get_page_from_freelist+0x8d/0xc1
> > > > 611b3b88: [<600d6937>] ext3_readpages+0x18/0x1a
> > > > 611b3b98: [<60056f00>] read_pages+0x37/0x9b
> > > > 611b3bd8: [<60057064>] __do_page_cache_readahead+0x100/0x157
> > > > 611b3c48: [<60057196>] do_page_cache_readahead+0x52/0x5f
> > > > 611b3c78: [<60050ab4>] filemap_fault+0x145/0x278
> > > > 611b3ca8: [<60022b61>] run_syscall_stub+0xd1/0xdd
> > > > 611b3ce8: [<6005eae3>] __do_fault+0x7e/0x3ca
> > > > 611b3d68: [<6005ee60>] do_linear_fault+0x31/0x33
> > > > 611b3d88: [<6005f149>] handle_mm_fault+0x14e/0x246
> > > > 611b3da8: [<60120a7b>] __up_read+0x73/0x7b
> > > > 611b3de8: [<60013177>] handle_page_fault+0x11f/0x23b
> > > > 611b3e48: [<60013419>] segv+0xac/0x297
> > > > 611b3f28: [<60013367>] segv_handler+0x68/0x6e
> > > > 611b3f48: [<600232ad>] get_skas_faultinfo+0x9c/0xa1
> > > > 611b3f68: [<60023853>] userspace+0x13a/0x19d
> > > > 611b3fc8: [<60010d58>] fork_handler+0x86/0x8d
> > >
> > > OK, that's different. Someone broke the vm - order-2 GFP_KERNEL
> > > allocations aren't supposed to fail.
> > >
> > > I'm suspecting that did_some_progress thing.
> >
> > The allocation didn't fail -- it invoked the OOM killer because the
> > kernel ran out of unfragmented memory.
>
> We can't "run out of unfragmented memory" for an order-2 GFP_KERNEL
> allocation in this workload. We go and synchronously free stuff up to make
> it work.
>
> How did this get broken?

Either no more order-2 pages could be freed, or the ones that were being
freed were being used by something else (eg. other order-2 slab allocations).


> > Probably because higher order
> > allocations are the new vogue in -mm at the moment ;)
>
> That's a different bug.
>
> bug 1: We shouldn't be doing higher-order allocations in slub because of
> the considerable damage this does to atomic allocations.
>
> bug 2: order-2 GFP_KERNEL allocations shouldn't fail like this.

I think one causes 2 as well -- it isn't just considerable damage to atomic
allocations but to GFP_KERNEL allocations too.

2007-09-30 20:13:20

by Andrew Morton

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Sun, 30 Sep 2007 05:09:28 +1000 Nick Piggin <[email protected]> wrote:

> On Sunday 30 September 2007 05:20, Andrew Morton wrote:
> > On Sat, 29 Sep 2007 06:19:33 +1000 Nick Piggin <[email protected]>
> wrote:
> > > On Saturday 29 September 2007 19:27, Andrew Morton wrote:
> > > > On Sat, 29 Sep 2007 11:14:02 +0200 Peter Zijlstra
> > > > <[email protected]>
> > >
> > > wrote:
> > > > > > oom-killings, or page allocation failures? The latter, one hopes.
> > > > >
> > > > > Linux version 2.6.23-rc4-mm1-dirty (root@dyad) (gcc version 4.1.2
> > > > > (Ubuntu 4.1.2-0ubuntu4)) #27 Tue Sep 18 15:40:35 CEST 2007
> > > > >
> > > > > ...
> > > > >
> > > > >
> > > > > mm_tester invoked oom-killer: gfp_mask=0x40d0, order=2, oomkilladj=0
> > > > > Call Trace:
> > > > > 611b3878: [<6002dd28>] printk_ratelimit+0x15/0x17
> > > > > 611b3888: [<60052ed4>] out_of_memory+0x80/0x100
> > > > > 611b38c8: [<60054b0c>] __alloc_pages+0x1ed/0x280
> > > > > 611b3948: [<6006c608>] allocate_slab+0x5b/0xb0
> > > > > 611b3968: [<6006c705>] new_slab+0x7e/0x183
> > > > > 611b39a8: [<6006cbae>] __slab_alloc+0xc9/0x14b
> > > > > 611b39b0: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > > > > 611b39b8: [<600980f2>] do_mpage_readpage+0x3b3/0x472
> > > > > 611b39e0: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > > > > 611b39f8: [<6006cc81>] kmem_cache_alloc+0x51/0x98
> > > > > 611b3a38: [<6011f89f>] radix_tree_preload+0x70/0xbf
> > > > > 611b3a58: [<6004f8e2>] add_to_page_cache+0x22/0xf7
> > > > > 611b3a98: [<6004f9c6>] add_to_page_cache_lru+0xf/0x24
> > > > > 611b3ab8: [<6009821e>] mpage_readpages+0x6d/0x109
> > > > > 611b3ac0: [<600d59f0>] ext3_get_block+0x0/0xf2
> > > > > 611b3b08: [<6005483d>] get_page_from_freelist+0x8d/0xc1
> > > > > 611b3b88: [<600d6937>] ext3_readpages+0x18/0x1a
> > > > > 611b3b98: [<60056f00>] read_pages+0x37/0x9b
> > > > > 611b3bd8: [<60057064>] __do_page_cache_readahead+0x100/0x157
> > > > > 611b3c48: [<60057196>] do_page_cache_readahead+0x52/0x5f
> > > > > 611b3c78: [<60050ab4>] filemap_fault+0x145/0x278
> > > > > 611b3ca8: [<60022b61>] run_syscall_stub+0xd1/0xdd
> > > > > 611b3ce8: [<6005eae3>] __do_fault+0x7e/0x3ca
> > > > > 611b3d68: [<6005ee60>] do_linear_fault+0x31/0x33
> > > > > 611b3d88: [<6005f149>] handle_mm_fault+0x14e/0x246
> > > > > 611b3da8: [<60120a7b>] __up_read+0x73/0x7b
> > > > > 611b3de8: [<60013177>] handle_page_fault+0x11f/0x23b
> > > > > 611b3e48: [<60013419>] segv+0xac/0x297
> > > > > 611b3f28: [<60013367>] segv_handler+0x68/0x6e
> > > > > 611b3f48: [<600232ad>] get_skas_faultinfo+0x9c/0xa1
> > > > > 611b3f68: [<60023853>] userspace+0x13a/0x19d
> > > > > 611b3fc8: [<60010d58>] fork_handler+0x86/0x8d
> > > >
> > > > OK, that's different. Someone broke the vm - order-2 GFP_KERNEL
> > > > allocations aren't supposed to fail.
> > > >
> > > > I'm suspecting that did_some_progress thing.
> > >
> > > The allocation didn't fail -- it invoked the OOM killer because the
> > > kernel ran out of unfragmented memory.
> >
> > We can't "run out of unfragmented memory" for an order-2 GFP_KERNEL
> > allocation in this workload. We go and synchronously free stuff up to make
> > it work.
> >
> > How did this get broken?
>
> Either no more order-2 pages could be freed, or the ones that were being
> freed were being used by something else (eg. other order-2 slab allocations).

No. The current design of reclaim (for better or for worse) is that for
order 0,1,2 and 3 allocations we just keep on trying until it works. That
got broken and I think it got broken at a design level when that
did_some_progress logic went in. Perhaps something else we did later
worsened things.

>
> > > Probably because higher order
> > > allocations are the new vogue in -mm at the moment ;)
> >
> > That's a different bug.
> >
> > bug 1: We shouldn't be doing higher-order allocations in slub because of
> > the considerable damage this does to atomic allocations.
> >
> > bug 2: order-2 GFP_KERNEL allocations shouldn't fail like this.
>
> I think one causes 2 as well -- it isn't just considerable damage to atomic
> allocations but to GFP_KERNEL allocations too.

Well sure, because we already broke GFP_KERNEL allocations.

2007-09-30 20:47:46

by Nick Piggin

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Monday 01 October 2007 06:12, Andrew Morton wrote:
> On Sun, 30 Sep 2007 05:09:28 +1000 Nick Piggin <[email protected]>
wrote:
> > On Sunday 30 September 2007 05:20, Andrew Morton wrote:

> > > We can't "run out of unfragmented memory" for an order-2 GFP_KERNEL
> > > allocation in this workload. We go and synchronously free stuff up to
> > > make it work.
> > >
> > > How did this get broken?
> >
> > Either no more order-2 pages could be freed, or the ones that were being
> > freed were being used by something else (eg. other order-2 slab
> > allocations).
>
> No. The current design of reclaim (for better or for worse) is that for
> order 0,1,2 and 3 allocations we just keep on trying until it works. That
> got broken and I think it got broken at a design level when that
> did_some_progress logic went in. Perhaps something else we did later
> worsened things.

It will keep trying until it works. It won't have stopped trying (unless
I'm very mistaken?), it's just oom killing things merrily along the way.

2007-10-01 20:50:54

by Christoph Lameter

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Fri, 28 Sep 2007, Nick Piggin wrote:

> I thought it was slower. Have you fixed the performance regression?
> (OK, I read further down that you are still working on it but not confirmed
> yet...)

The problem is with the weird way of Intel testing and communication.
Every 3-6 month or so they will tell you the system is X% up or down on
arch Y (and they wont give you details because its somehow secret). And
then there are conflicting statements by the two or so performance test
departments. One of them repeatedly assured me that they do not see any
regressions.

> OK, so long as it isn't going to depend on using higher order pages, that's
> fine. (if they help even further as an optional thing, that's fine too. You
> can turn them on your huge systems and not even bother about adding
> this vmap fallback -- you won't have me to nag you about these
> purely theoretical issues).

Well the vmap fallback is generally useful AFAICT. Higher order
allocations are common on some of our platforms. Order 1 failures even
affect essential things like stacks that have nothing to do with SLUB and
the LBS patchset.


2007-10-01 20:55:40

by Christoph Lameter

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Sat, 29 Sep 2007, Andrew Morton wrote:

> > atomic allocations. And with SLUB using higher order pages, atomic !0
> > order allocations will be very very common.
>
> Oh OK.
>
> I thought we'd already fixed slub so that it didn't do that. Maybe that
> fix is in -mm but I don't think so.
>
> Trying to do atomic order-1 allocations on behalf of arbitray slab caches
> just won't fly - this is a significant degradation in kernel reliability,
> as you've very easily demonstrated.

Ummm... SLAB also does order 1 allocations. We have always done them.

See mm/slab.c

/*
* Do not go above this order unless 0 objects fit into the slab.
*/
#define BREAK_GFP_ORDER_HI 1
#define BREAK_GFP_ORDER_LO 0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;

2007-10-01 21:01:18

by Christoph Lameter

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Sat, 29 Sep 2007, Peter Zijlstra wrote:

>
> On Fri, 2007-09-28 at 11:20 -0700, Christoph Lameter wrote:
>
> > Really? That means we can no longer even allocate stacks for forking.
>
> I think I'm running with 4k stacks...

4k stacks will never fly on an SGI x86_64 NUMA configuration given the
additional data that may be kept on the stack. We are currently
considering to go from 8k to 16k (or even 32k) to make things work. So
having the ability to put the stacks in vmalloc space may be something to
look at.

2007-10-01 21:10:22

by Christoph Lameter

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Fri, 28 Sep 2007, Mel Gorman wrote:

> Minimally, SLUB by default should continue to use order-0 pages. Peter has
> managed to bust order-1 pages with mem=128MB. Admittedly, it was a really
> hostile workload but the point remains. It was artifically worked around
> with min_free_kbytes (value set based on pageblock_order, could also have
> been artifically worked around by dropping pageblock_order) and he eventually
> caused order-0 failures so the workload is pretty damn hostile to everything.

SLAB default is order 1 so is SLUB default upstream.

SLAB does runtime detection of the amount of memory and configures the max
order correspondingly:

from mm/slab.c:

/*
* Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory.
*/
if (num_physpages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order = BREAK_GFP_ORDER_HI;


We could duplicate something like that for SLUB.


2007-10-01 21:32:18

by Andrew Morton

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Mon, 1 Oct 2007 13:55:29 -0700 (PDT)
Christoph Lameter <[email protected]> wrote:

> On Sat, 29 Sep 2007, Andrew Morton wrote:
>
> > > atomic allocations. And with SLUB using higher order pages, atomic !0
> > > order allocations will be very very common.
> >
> > Oh OK.
> >
> > I thought we'd already fixed slub so that it didn't do that. Maybe that
> > fix is in -mm but I don't think so.
> >
> > Trying to do atomic order-1 allocations on behalf of arbitray slab caches
> > just won't fly - this is a significant degradation in kernel reliability,
> > as you've very easily demonstrated.
>
> Ummm... SLAB also does order 1 allocations. We have always done them.
>
> See mm/slab.c
>
> /*
> * Do not go above this order unless 0 objects fit into the slab.
> */
> #define BREAK_GFP_ORDER_HI 1
> #define BREAK_GFP_ORDER_LO 0
> static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;

Do slab and slub use the same underlying page size for each slab?

Single data point: the CONFIG_SLAB boxes which I have access to here are
using order-0 for radix_tree_node, so they won't be failing in the way in
which Peter's machine is.

I've never ever before seen reports of page allocation failures in the
radix-tree node allocation code, and that's the bottom line. This is just
a drop-dead must-fix show-stopping bug. We cannot rely upon atomic order-1
allocations succeeding so we cannot use them for radix-tree nodes. Nor for
lots of other things which we have no chance of identifying.

Peter, is this bug -mm only, or is 2.6.23 similarly failing?

2007-10-01 21:39:13

by Christoph Lameter

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Mon, 1 Oct 2007, Andrew Morton wrote:

> Do slab and slub use the same underlying page size for each slab?

SLAB cannot pack objects as dense as SLUB and they have different
algorithm to make the choice of order. Thus the number of objects per slab
may vary between SLAB and SLUB and therefore also the choice of order to
store these objects.

> Single data point: the CONFIG_SLAB boxes which I have access to here are
> using order-0 for radix_tree_node, so they won't be failing in the way in
> which Peter's machine is.

Upstream SLUB uses order 0 allocations for the radix tree. MM varies
because the use of higher order allocs is more loose if the mobility
algorithms are found to be active:

2.6.23-rc8:

Name Objects Objsize Space Slabs/Part/Cpu O/S O %Fr %Ef Flg\
radix_tree_node 14281 552 9.9M 2432/948/1 7 0 38 79

2007-10-01 21:46:24

by Andrew Morton

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Mon, 1 Oct 2007 14:38:55 -0700 (PDT)
Christoph Lameter <[email protected]> wrote:

> On Mon, 1 Oct 2007, Andrew Morton wrote:
>
> > Do slab and slub use the same underlying page size for each slab?
>
> SLAB cannot pack objects as dense as SLUB and they have different
> algorithm to make the choice of order. Thus the number of objects per slab
> may vary between SLAB and SLUB and therefore also the choice of order to
> store these objects.
>
> > Single data point: the CONFIG_SLAB boxes which I have access to here are
> > using order-0 for radix_tree_node, so they won't be failing in the way in
> > which Peter's machine is.
>
> Upstream SLUB uses order 0 allocations for the radix tree.

OK, that's a relief.

> MM varies
> because the use of higher order allocs is more loose if the mobility
> algorithms are found to be active:
>
> 2.6.23-rc8:
>
> Name Objects Objsize Space Slabs/Part/Cpu O/S O %Fr %Ef Flg\
> radix_tree_node 14281 552 9.9M 2432/948/1 7 0 38 79

Ah. So the already-dropped
slub-exploit-page-mobility-to-increase-allocation-order.patch was the
culprit?

2007-10-01 21:52:46

by Christoph Lameter

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Mon, 1 Oct 2007, Andrew Morton wrote:

> Ah. So the already-dropped
> slub-exploit-page-mobility-to-increase-allocation-order.patch was the
> culprit?

Yes without that patch SLUB will no longer take special action if antifrag
is around.

2007-10-02 09:19:47

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Mon, 2007-10-01 at 14:30 -0700, Andrew Morton wrote:
> On Mon, 1 Oct 2007 13:55:29 -0700 (PDT)
> Christoph Lameter <[email protected]> wrote:
>
> > On Sat, 29 Sep 2007, Andrew Morton wrote:
> >
> > > > atomic allocations. And with SLUB using higher order pages, atomic !0
> > > > order allocations will be very very common.
> > >
> > > Oh OK.
> > >
> > > I thought we'd already fixed slub so that it didn't do that. Maybe that
> > > fix is in -mm but I don't think so.
> > >
> > > Trying to do atomic order-1 allocations on behalf of arbitray slab caches
> > > just won't fly - this is a significant degradation in kernel reliability,
> > > as you've very easily demonstrated.
> >
> > Ummm... SLAB also does order 1 allocations. We have always done them.
> >
> > See mm/slab.c
> >
> > /*
> > * Do not go above this order unless 0 objects fit into the slab.
> > */
> > #define BREAK_GFP_ORDER_HI 1
> > #define BREAK_GFP_ORDER_LO 0
> > static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
>
> Do slab and slub use the same underlying page size for each slab?
>
> Single data point: the CONFIG_SLAB boxes which I have access to here are
> using order-0 for radix_tree_node, so they won't be failing in the way in
> which Peter's machine is.
>
> I've never ever before seen reports of page allocation failures in the
> radix-tree node allocation code, and that's the bottom line. This is just
> a drop-dead must-fix show-stopping bug. We cannot rely upon atomic order-1
> allocations succeeding so we cannot use them for radix-tree nodes. Nor for
> lots of other things which we have no chance of identifying.
>
> Peter, is this bug -mm only, or is 2.6.23 similarly failing?

I'm mainly using -mm (so you have at least one tester :-), I think the
-mm specific SLUB patch that ups slub_min_order makes the problem -mm
specific, would have to test .23.


Attachments:
signature.asc (189.00 B)
This is a digitally signed message part

2007-10-03 01:09:18

by Nick Piggin

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Tuesday 02 October 2007 07:01, Christoph Lameter wrote:
> On Sat, 29 Sep 2007, Peter Zijlstra wrote:
> > On Fri, 2007-09-28 at 11:20 -0700, Christoph Lameter wrote:
> > > Really? That means we can no longer even allocate stacks for forking.
> >
> > I think I'm running with 4k stacks...
>
> 4k stacks will never fly on an SGI x86_64 NUMA configuration given the
> additional data that may be kept on the stack. We are currently
> considering to go from 8k to 16k (or even 32k) to make things work. So
> having the ability to put the stacks in vmalloc space may be something to
> look at.

i386 and x86-64 already used 8K stacks for years and they have never
really been much problem before.

They only started failing when contiguous memory is getting used up
by other things, _even with_ those anti-frag patches in there.

Bottom line is that you do not use higher order allocations when you do
not need them.

2007-10-03 01:14:56

by Nick Piggin

[permalink] [raw]
Subject: Re: [15/17] SLUB: Support virtual fallback via SLAB_VFALLBACK

On Tuesday 02 October 2007 06:50, Christoph Lameter wrote:
> On Fri, 28 Sep 2007, Nick Piggin wrote:
> > I thought it was slower. Have you fixed the performance regression?
> > (OK, I read further down that you are still working on it but not
> > confirmed yet...)
>
> The problem is with the weird way of Intel testing and communication.
> Every 3-6 month or so they will tell you the system is X% up or down on
> arch Y (and they wont give you details because its somehow secret). And
> then there are conflicting statements by the two or so performance test
> departments. One of them repeatedly assured me that they do not see any
> regressions.

Just so long as there aren't known regressions that would require higher
order allocations to fix them.


> > OK, so long as it isn't going to depend on using higher order pages,
> > that's fine. (if they help even further as an optional thing, that's fine
> > too. You can turn them on your huge systems and not even bother about
> > adding this vmap fallback -- you won't have me to nag you about these
> > purely theoretical issues).
>
> Well the vmap fallback is generally useful AFAICT. Higher order
> allocations are common on some of our platforms. Order 1 failures even
> affect essential things like stacks that have nothing to do with SLUB and
> the LBS patchset.

I don't know if it is worth the trouble, though. The best thing to do is to
ensure that contiguous memory is not wasted on frivolous things... a few
order-1 or 2 allocations aren't too much of a problem.

The only high order allocation failure I've seen from fragmentation for a
long time IIRC are the order-3 failures coming from e1000. And obviously
they cannot use vmap.

2007-10-04 16:16:51

by Matthew Wilcox

[permalink] [raw]
Subject: SLUB performance regression vs SLAB

On Mon, Oct 01, 2007 at 01:50:44PM -0700, Christoph Lameter wrote:
> The problem is with the weird way of Intel testing and communication.
> Every 3-6 month or so they will tell you the system is X% up or down on
> arch Y (and they wont give you details because its somehow secret). And
> then there are conflicting statements by the two or so performance test
> departments. One of them repeatedly assured me that they do not see any
> regressions.

Could you cut out the snarky remarks? It takes a long time to run a
test, and testing every one of the patches you send really isn't high
on anyone's priority list. The performance team have also been having
problems getting stable results with recent kernels, adding to the delay.
The good news is that we do now have committment to testing upstream
kernels, so you should see results more frequently than you have been.

I'm taking over from Suresh as liason for the performance team, so
if you hear *anything* from *anyone* else at Intel about performance,
I want you to cc me about it. OK? And I don't want to hear any more
whining about hearing different things from different people.

So, on "a well-known OLTP benchmark which prohibits publishing absolute
numbers" and on an x86-64 system (I don't think exactly which model
is important), we're seeing *6.51%* performance loss on slub vs slab.
This is with a 2.6.23-rc3 kernel. Tuning the boot parameters, as you've
asked for before (slub_min_order=2, slub_max_order=4, slub_min_objects=8)
gets back 0.38% of that. It's still down 6.13% over slab.

For what it's worth, 2.6.23-rc3 already has a 1.19% regression versus
RHEL 4.5, so the performance guys are really unhappy about going up to
almost 8% regression.

In the detailed profiles, __slab_free is the third most expensive
function, behind only spin locks. get_partial_node is right behind it
in fourth place, and kmem_cache_alloc is sixth. __slab_alloc is eight
and kmem_cache_free is tenth. These positions don't change with the
slub boot parameters.

Now, where do we go next? I suspect that 2.6.23-rc9 has significant
changes since -rc3, but I'd like to confirm that before kicking off
another (expensive) run. Please, tell me what useful kernels are to test.

--
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours. We can't possibly take such
a retrograde step."

2007-10-04 17:38:27

by Christoph Lameter

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, 4 Oct 2007, Matthew Wilcox wrote:

> So, on "a well-known OLTP benchmark which prohibits publishing absolute
> numbers" and on an x86-64 system (I don't think exactly which model
> is important), we're seeing *6.51%* performance loss on slub vs slab.
> This is with a 2.6.23-rc3 kernel. Tuning the boot parameters, as you've
> asked for before (slub_min_order=2, slub_max_order=4, slub_min_objects=8)
> gets back 0.38% of that. It's still down 6.13% over slab.

Yeah the fastpath vs. slow path is not the issue as Siddha and I concluded
earlier. Seems that we are mainly seeing cacheline bouncing due to two
cpus accessing meta data in the same page struct. The patches in
MM that are scheduled to be merged for .24 address that issue. I
have repeatedly asked that these patches be tested. The patches were
posted months ago.

> Now, where do we go next? I suspect that 2.6.23-rc9 has significant
> changes since -rc3, but I'd like to confirm that before kicking off
> another (expensive) run. Please, tell me what useful kernels are to test.

I thought Siddha has a test in the works with the per cpu structure
patchset from MM? Could you sync up with Siddha?

2007-10-04 17:46:04

by Matthew Wilcox

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, Oct 04, 2007 at 10:38:15AM -0700, Christoph Lameter wrote:
> On Thu, 4 Oct 2007, Matthew Wilcox wrote:
>
> > So, on "a well-known OLTP benchmark which prohibits publishing absolute
> > numbers" and on an x86-64 system (I don't think exactly which model
> > is important), we're seeing *6.51%* performance loss on slub vs slab.
> > This is with a 2.6.23-rc3 kernel. Tuning the boot parameters, as you've
> > asked for before (slub_min_order=2, slub_max_order=4, slub_min_objects=8)
> > gets back 0.38% of that. It's still down 6.13% over slab.
>
> Yeah the fastpath vs. slow path is not the issue as Siddha and I concluded
> earlier. Seems that we are mainly seeing cacheline bouncing due to two
> cpus accessing meta data in the same page struct. The patches in
> MM that are scheduled to be merged for .24 address that issue. I
> have repeatedly asked that these patches be tested. The patches were
> posted months ago.

I just checked with the guys who did the test. When I said -rc3, I
mis-spoke; this is 2.6.23-rc3 *plus* the patches which Suresh agreed to
test for you.

2007-10-04 17:50:07

by Christoph Lameter

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, 4 Oct 2007, Matthew Wilcox wrote:

> > Yeah the fastpath vs. slow path is not the issue as Siddha and I concluded
> > earlier. Seems that we are mainly seeing cacheline bouncing due to two
> > cpus accessing meta data in the same page struct. The patches in
> > MM that are scheduled to be merged for .24 address that issue. I
> > have repeatedly asked that these patches be tested. The patches were
> > posted months ago.
>
> I just checked with the guys who did the test. When I said -rc3, I
> mis-spoke; this is 2.6.23-rc3 *plus* the patches which Suresh agreed to
> test for you.

I was not aware of that. Would it be possible for you to summarize all the
test data that you have right now about SLUB vs. SLAB with the patches
listed? Exactly what kernel version and what version of the per cpu
patches were tested? Was the page allocator pass through patchset
separately applied as I requested?

Finally: Is there some way that I can reproduce the tests on my machines?

2007-10-04 17:53:08

by Arjan van de Ven

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, 4 Oct 2007 10:38:15 -0700 (PDT)
Christoph Lameter <[email protected]> wrote:


> Yeah the fastpath vs. slow path is not the issue as Siddha and I
> concluded earlier. Seems that we are mainly seeing cacheline bouncing
> due to two cpus accessing meta data in the same page struct. The
> patches in MM that are scheduled to be merged for .24 address


Ok every time something says anything not 100% positive about SLUB you
come back with "but it's fixed in the next patch set"... *every time*.

To be honest, to me that sounds that SLUB isn't ready for prime time
yet, or at least not ready to be the only one in town...

The day that the answer is "the kernel.org slub is fixing all the
issues" is when it's ready..

2007-10-04 17:58:58

by Christoph Lameter

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, 4 Oct 2007, Arjan van de Ven wrote:

> Ok every time something says anything not 100% positive about SLUB you
> come back with "but it's fixed in the next patch set"... *every time*.

All I ask that people test the fixes that have been out there for the
known issues. If there are remaining performance issues then lets figure
them out and address them.

2007-10-04 18:32:20

by Peter Zijlstra

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB


On Thu, 2007-10-04 at 10:50 -0700, Arjan van de Ven wrote:
> On Thu, 4 Oct 2007 10:38:15 -0700 (PDT)
> Christoph Lameter <[email protected]> wrote:
>
>
> > Yeah the fastpath vs. slow path is not the issue as Siddha and I
> > concluded earlier. Seems that we are mainly seeing cacheline bouncing
> > due to two cpus accessing meta data in the same page struct. The
> > patches in MM that are scheduled to be merged for .24 address
>
>
> Ok every time something says anything not 100% positive about SLUB you
> come back with "but it's fixed in the next patch set"... *every time*.
>
> To be honest, to me that sounds that SLUB isn't ready for prime time
> yet, or at least not ready to be the only one in town...
>
> The day that the answer is "the kernel.org slub is fixing all the
> issues" is when it's ready..

Arjan, to be honest, there has been some confusion on _what_ code has
been tested with what results. And with Christoph not able to reproduce
these results locally, it is very hard for him to fix it proper.



2007-10-04 18:40:42

by Matthew Wilcox

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, Oct 04, 2007 at 10:49:52AM -0700, Christoph Lameter wrote:
> I was not aware of that. Would it be possible for you to summarize all the
> test data that you have right now about SLUB vs. SLAB with the patches
> listed? Exactly what kernel version and what version of the per cpu
> patches were tested?

We have three runs, all with 2.6.23-rc3 plus the patches that Suresh
applied from 20070922. The first run is with slab. The second run is
with SLUB and the third run is SLUB plus the tuning parameters you
recommended.

I have a spreadsheet with Vtune data in it that was collected during
each of these test runs, so we can see which functions are the hottest.
I can grab that data and send it to you, if that's interesting.

> Was the page allocator pass through patchset
> separately applied as I requested?

I don't believe so. Suresh?

I think for future tests, it would be easiest if you send me a git
reference. That way we will all know precisely what is being tested.

> Finally: Is there some way that I can reproduce the tests on my machines?

As usual for these kinds of setups ... take a two-CPU machine, 64GB
of memory, half a dozen fibre channel adapters, about 3000 discs,
a commercial database, a team of experts for three months worth of
tuning ...

I don't know if anyone's tried to replicate a benchmark like this using
Postgres. Would be nice if they have ...

2007-10-04 19:05:47

by Christoph Lameter

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, 4 Oct 2007, Matthew Wilcox wrote:

> We have three runs, all with 2.6.23-rc3 plus the patches that Suresh
> applied from 20070922. The first run is with slab. The second run is
> with SLUB and the third run is SLUB plus the tuning parameters you
> recommended.

There was quite a bit of communication on tuning parameters. Guess we got
more confusion there and multiple configurations settings that I wanted to
be tested separately were merged. Setting slub_min_order to more than zero
can certainly be detrimental to performance since higher order page
allocations can cause cacheline bouncing on zone locks.

Which patches? 20070922 refers to a pull on the slab git tree on the
performance branch?

> I have a spreadsheet with Vtune data in it that was collected during
> each of these test runs, so we can see which functions are the hottest.
> I can grab that data and send it to you, if that's interesting.

Please do. Add the kernel .configs please. Is there any slab queue tuning
going on on boot with the SLAB configuration?

Include any tuning that was done to the kernel please.

> > Was the page allocator pass through patchset
> > separately applied as I requested?
>
> I don't believe so. Suresh?

If it was a git pull then the pass through was included and never taken
out.

> I think for future tests, it would be easiest if you send me a git
> reference. That way we will all know precisely what is being tested.

Sure we can do that.

> > Finally: Is there some way that I can reproduce the tests on my machines?
>
> As usual for these kinds of setups ... take a two-CPU machine, 64GB
> of memory, half a dozen fibre channel adapters, about 3000 discs,
> a commercial database, a team of experts for three months worth of
> tuning ...
>
> I don't know if anyone's tried to replicate a benchmark like this using
> Postgres. Would be nice if they have ...

Well we got our own performance test department here at SGI. If we get
them involved then we can add another 3 months until we get the test
results confirmed ;-). Seems that this is a small configuration. Why
does it take that long? And the experts knew SLAB and not SLUB right?

Lets look at all the data that you got and then see if this is enough to
figure out what is wrong.

2007-10-04 19:47:04

by Suresh Siddha

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, Oct 04, 2007 at 12:05:35PM -0700, Christoph Lameter wrote:
> > > Was the page allocator pass through patchset
> > > separately applied as I requested?
> >
> > I don't believe so. Suresh?
>
> If it was a git pull then the pass through was included and never taken
> out.

It was a git pull from the performance branch that you pointed out earlier
http://git.kernel.org/?p=linux/kernel/git/christoph/slab.git;a=log;h=performance

and the config is based on EL5 config with just the SLUB turned on.

2007-10-04 20:48:46

by David Miller

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

From: Arjan van de Ven <[email protected]>
Date: Thu, 4 Oct 2007 10:50:46 -0700

> Ok every time something says anything not 100% positive about SLUB you
> come back with "but it's fixed in the next patch set"... *every time*.

I think this is partly Christoph subconsciously venting his
frustration that he's never given a reproducable test case he can use
to fix the problem.

There comes a point where it is the reporter's responsibility to help
the developer come up with a publishable test case the developer can
use to work on fixing the problem and help ensure it stays fixed.

Using an unpublishable benchmark, whose results even cannot be
published, really stretches the limits of "reasonable" don't you
think?

This "SLUB isn't ready yet" bullshit is just a shamans dance which
distracts attention away from the real problem, which is that a
reproducable, publishable test case, is not being provided to the
developer so he can work on fixing the problem.

I can tell you this thing would be fixed overnight if a proper test
case had been provided by now.

2007-10-04 20:55:48

by David Miller

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

From: [email protected] (Matthew Wilcox)
Date: Thu, 4 Oct 2007 12:28:25 -0700

> On Thu, Oct 04, 2007 at 10:49:52AM -0700, Christoph Lameter wrote:
> > Finally: Is there some way that I can reproduce the tests on my machines?
>
> As usual for these kinds of setups ... take a two-CPU machine, 64GB
> of memory, half a dozen fibre channel adapters, about 3000 discs,
> a commercial database, a team of experts for three months worth of
> tuning ...

Anything, I do mean anything, can be simulated using small test
programs. Pointing at a big fancy machine with lots of storage
and disk is a passive aggressive way to avoid the real issues,
in that nobody is putting forth the effort to try and come up
with an at least publishable test case that Christoph can use to
help you guys.

If coming up with a reproducable and publishable test case is
the difference between this getting fixed and it not getting
fixed, are you going to invest the time to do that?

2007-10-04 20:58:24

by Matthew Wilcox

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, Oct 04, 2007 at 01:48:34PM -0700, David Miller wrote:
> There comes a point where it is the reporter's responsibility to help
> the developer come up with a publishable test case the developer can
> use to work on fixing the problem and help ensure it stays fixed.

That's a lot of effort. Is it more effort than doing some remote
debugging with Christoph? I don't know.

> Using an unpublishable benchmark, whose results even cannot be
> published, really stretches the limits of "reasonable" don't you
> think?

Yet here we stand. Christoph is aggressively trying to get slab removed
from the tree. There is a testcase which shows slub performing worse
than slab. It's not my fault I can't publish it. And just because I
can't publish it doesn't mean it doesn't exist.

Slab needs to not get removed until slub is as good a performer on this
benchmark.

--
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours. We can't possibly take such
a retrograde step."

2007-10-04 21:05:31

by Matthew Wilcox

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, Oct 04, 2007 at 01:55:37PM -0700, David Miller wrote:
> Anything, I do mean anything, can be simulated using small test
> programs. Pointing at a big fancy machine with lots of storage
> and disk is a passive aggressive way to avoid the real issues,
> in that nobody is putting forth the effort to try and come up
> with an at least publishable test case that Christoph can use to
> help you guys.
>
> If coming up with a reproducable and publishable test case is
> the difference between this getting fixed and it not getting
> fixed, are you going to invest the time to do that?

If that's what it takes, then yes. But I'm far from convinced that
it's as easy to come up with a TPC benchmark simulator as you think.
There have been efforts in the past (orasim, for example), but
presumably Christoph has already tried these benchmarks.

--
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours. We can't possibly take such
a retrograde step."

2007-10-04 21:06:13

by David Miller

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

From: Matthew Wilcox <[email protected]>
Date: Thu, 4 Oct 2007 14:58:12 -0600

> On Thu, Oct 04, 2007 at 01:48:34PM -0700, David Miller wrote:
> > There comes a point where it is the reporter's responsibility to help
> > the developer come up with a publishable test case the developer can
> > use to work on fixing the problem and help ensure it stays fixed.
>
> That's a lot of effort. Is it more effort than doing some remote
> debugging with Christoph? I don't know.

That's a good question and an excellent point. I'm sure that,
either way, Christoph will be more than willing to engage and
assist.

2007-10-04 21:07:41

by Chuck Ebbert

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On 10/04/2007 04:55 PM, David Miller wrote:
>
> Anything, I do mean anything, can be simulated using small test
> programs.

How do you simulate reading 100TB of data spread across 3000 disks,
selecting 10% of it using some criterion, then sorting and summarizing
the result?

2007-10-04 21:11:26

by David Miller

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

From: Chuck Ebbert <[email protected]>
Date: Thu, 04 Oct 2007 17:02:17 -0400

> How do you simulate reading 100TB of data spread across 3000 disks,
> selecting 10% of it using some criterion, then sorting and
> summarizing the result?

You repeatedly read zeros from a smaller disk into the same amount of
memory, and sort that as if it were real data instead.

You're not thinking outside of the box, and you need to do that to
write good test cases and fix kernel bugs effectively.

2007-10-04 21:12:16

by Christoph Lameter

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, 4 Oct 2007, Matthew Wilcox wrote:

> Yet here we stand. Christoph is aggressively trying to get slab removed
> from the tree. There is a testcase which shows slub performing worse
> than slab. It's not my fault I can't publish it. And just because I
> can't publish it doesn't mean it doesn't exist.
>
> Slab needs to not get removed until slub is as good a performer on this
> benchmark.

I agree with this .... SLAB will stay until we have worked through all the
performance issues.

2007-10-04 21:48:41

by Chuck Ebbert

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On 10/04/2007 05:11 PM, David Miller wrote:
> From: Chuck Ebbert <[email protected]>
> Date: Thu, 04 Oct 2007 17:02:17 -0400
>
>> How do you simulate reading 100TB of data spread across 3000 disks,
>> selecting 10% of it using some criterion, then sorting and
>> summarizing the result?
>
> You repeatedly read zeros from a smaller disk into the same amount of
> memory, and sort that as if it were real data instead.

You've just replaced 3000 concurrent streams of data with a single
stream. That won't test the memory allocator's ability to allocate
memory to many concurrent users very well.

2007-10-04 22:07:30

by David Miller

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

From: Chuck Ebbert <[email protected]>
Date: Thu, 04 Oct 2007 17:47:48 -0400

> On 10/04/2007 05:11 PM, David Miller wrote:
> > From: Chuck Ebbert <[email protected]>
> > Date: Thu, 04 Oct 2007 17:02:17 -0400
> >
> >> How do you simulate reading 100TB of data spread across 3000 disks,
> >> selecting 10% of it using some criterion, then sorting and
> >> summarizing the result?
> >
> > You repeatedly read zeros from a smaller disk into the same amount of
> > memory, and sort that as if it were real data instead.
>
> You've just replaced 3000 concurrent streams of data with a single
> stream. That won't test the memory allocator's ability to allocate
> memory to many concurrent users very well.

You've kindly removed my "thinking outside of the box" comment.

The point is was not that my specific suggestion would be
perfect, but that if you used your creativity and thought
in similar directions you might find a way to do it.

People are too narrow minded when it comes to these things, and
that's the problem I want to address.

2007-10-04 22:24:42

by David Chinner

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, Oct 04, 2007 at 03:07:18PM -0700, David Miller wrote:
> From: Chuck Ebbert <[email protected]> Date: Thu, 04 Oct 2007 17:47:48
> -0400
>
> > On 10/04/2007 05:11 PM, David Miller wrote:
> > > From: Chuck Ebbert <[email protected]> Date: Thu, 04 Oct 2007 17:02:17
> > > -0400
> > >
> > >> How do you simulate reading 100TB of data spread across 3000 disks,
> > >> selecting 10% of it using some criterion, then sorting and summarizing
> > >> the result?
> > >
> > > You repeatedly read zeros from a smaller disk into the same amount of
> > > memory, and sort that as if it were real data instead.
> >
> > You've just replaced 3000 concurrent streams of data with a single stream.
> > That won't test the memory allocator's ability to allocate memory to many
> > concurrent users very well.
>
> You've kindly removed my "thinking outside of the box" comment.
>
> The point is was not that my specific suggestion would be perfect, but that
> if you used your creativity and thought in similar directions you might find
> a way to do it.
>
> People are too narrow minded when it comes to these things, and that's the
> problem I want to address.

And it's a good point, too, because often problems to one person are a
no-brainer to someone else.

Creating lots of "fake" disks is trivial to do, IMO. Use loopback on sparse
files containing sparse filesxi, use ramdisks containing sparse files or write a
sparse dm target for sparse block device mapping, etc. I'm sure there's more than the
few I just threw out...

Cheers,

Dave.
--
Dave Chinner
Principal Engineer
SGI Australian Software Group

2007-10-04 23:40:23

by David Schwartz

[permalink] [raw]
Subject: RE: SLUB performance regression vs SLAB


David Miller wrote:

> Using an unpublishable benchmark, whose results even cannot be
> published, really stretches the limits of "reasonable" don't you
> think?
>
> This "SLUB isn't ready yet" bullshit is just a shamans dance which
> distracts attention away from the real problem, which is that a
> reproducable, publishable test case, is not being provided to the
> developer so he can work on fixing the problem.
>
> I can tell you this thing would be fixed overnight if a proper test
> case had been provided by now.

I would just like to echo what you said just a bit angrier. This is the same
as someone asking him to fix a bug that they can only see with a binary-only
kernel module. I think he's perfectly justified in simply responding "the
bug is as likely to be in your code as mine".

Now, just because he's justified in doing that doesn't mean he should. I
presume he has an honest desire to improve his own code and if they've found
a real problem, I'm sure he'd love to fix it.

But this is just a preposterous position to put him in. If there's no
reproduceable test case, then why should he care that one program he can't
even see works badly? If you care, you fix it.

Matthew Wilcox wrote:

> Yet here we stand. Christoph is aggressively trying to get slab removed
> from the tree. There is a testcase which shows slub performing worse
> than slab. It's not my fault I can't publish it. And just because I
> can't publish it doesn't mean it doesn't exist.

It means it may or may not exist. All we have is your word that slub is the
problem. If I said I found a bug in the Linux kernel that caused it to panic
but I could only reproduce it with the nVidia driver, I'd be laughed at.

It may even be that slub is better, your benchmark simply interprets this as
worse. Without the details of your benchmark, we can't know. For example,
I've seen benchmarks that (usually unintentionally) actually do a *variable*
amount of work and details of the implementation may result in the benchmark
actually doing *more* work, so it taking longer does not mean it ran slower.

DS


2007-10-04 23:49:45

by Chuck Ebbert

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On 10/04/2007 07:39 PM, David Schwartz wrote:
> But this is just a preposterous position to put him in. If there's no
> reproduceable test case, then why should he care that one program he can't
> even see works badly? If you care, you fix it.
>

People have been trying for years to make reproducible test cases
for huge and complex workloads. It doesn't work. The tests that do
work take weeks to run and need to be carefully validated before
they can be officially released. The open source community can and
should be working on similar tests, but they will never be simple.

2007-10-05 02:44:18

by Christoph Lameter

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

I just spend some time looking at the functions that you see high in the
list. The trouble is that I have to speculate and that I have nothing to
verify my thoughts. If you could give me the hitlist for each of the
3 runs then this would help to check my thinking. I could be totally off
here.

It seems that we miss the per cpu slab frequently on slab_free() which
leads to the calling of __slab_free() and which in turn needs to take a
lock on the page (in the page struct). Typically the page lock is
uncontended which seems to not be the case here otherwise it would not be
that high up.

The per cpu patch in mm should reduce the contention on the page struct by
not touching the page struct on alloc and on free. Does not seem to work
all the way though. slab_free() still has to touch the page struct if the
free is not to the currently active cpu slab.

So there could still be page struct contention left if multiple processors
frequently and simultaneously free to the same slab and that slab is not
the per cpu slab of a cpu. That could be addressed by optimizing the
object free handling further to not touch the page struct even if we miss
the per cpu slab.

That get_partial* is far up indicates contention on the list lock that
should be addressable by either increasing the slab size or by changing
the object free handling to batch in some form.

This is an SMP system right? 2 cores with 4 cpus each? The main loop is
always hitting on the same slabs? Which slabs would this be? Am I right in
thinking that one process allocates objects and then lets multiple other
processors do work and then the allocated object is freed from a cpu that
did not allocate the object? If neighboring objects in one slab are
allocated on one cpu and then are almost simultaneously freed from a set
of different cpus then this may be explain the situation.

2007-10-05 02:53:58

by Arjan van de Ven

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Thu, 4 Oct 2007 19:43:58 -0700 (PDT)
Christoph Lameter <[email protected]> wrote:

> So there could still be page struct contention left if multiple
> processors frequently and simultaneously free to the same slab and
> that slab is not the per cpu slab of a cpu. That could be addressed
> by optimizing the object free handling further to not touch the page
> struct even if we miss the per cpu slab.
>
> That get_partial* is far up indicates contention on the list lock
> that should be addressable by either increasing the slab size or by
> changing the object free handling to batch in some form.
>
> This is an SMP system right? 2 cores with 4 cpus each? The main loop
> is always hitting on the same slabs? Which slabs would this be? Am I
> right in thinking that one process allocates objects and then lets
> multiple other processors do work and then the allocated object is
> freed from a cpu that did not allocate the object? If neighboring
> objects in one slab are allocated on one cpu and then are almost
> simultaneously freed from a set of different cpus then this may be
> explain the situation. -

one of the characteristics of the application in use is the following:
all cores submit IO (which means they allocate various scsi and block
structures on all cpus).. but only 1 will free it (the one the IRQ is
bound to). SO it's allocate-on-one-free-on-another at a high rate.

That is assuming this is the IO slab; that's a bit of an assumption
obviously (it's one of the slab things that are hot, but it's a complex
workload, there could be others)

2007-10-05 04:18:39

by David Schwartz

[permalink] [raw]
Subject: RE: SLUB performance regression vs SLAB


> On 10/04/2007 07:39 PM, David Schwartz wrote:

> > But this is just a preposterous position to put him in. If there's no
> > reproduceable test case, then why should he care that one
> > program he can't
> > even see works badly? If you care, you fix it.

> People have been trying for years to make reproducible test cases
> for huge and complex workloads. It doesn't work. The tests that do
> work take weeks to run and need to be carefully validated before
> they can be officially released. The open source community can and
> should be working on similar tests, but they will never be simple.

That's true, but irrelevent. Either the test can identify a problem that
applies generally, or it's doing nothing but measuring how good the system
is at doing the test. If the former, it should be possible to create a
simple test case once you know from the complex test where the problem is.
If the latter, who cares about a supposed regression?

It should be possible to identify exactly what portion of the test shows the
regression the most and exactly what the system is doing during that moment.
The test may be great at finding regressions, but once it finds them, they
should be forever *found*.

Did you follow the recent incident when iperf fout what seemed to be a
significnat CFS networking regression? The only way to identify that it was
a quirk in what iperf was doing was by looking at exactly what iperf was
doing. The only efficient way was to look at iperf's source and see that
iperf's weird yielding meant it didn't replicate typical use cases like it
was supposed to.

DS


2007-10-05 06:47:39

by Jens Axboe

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Fri, Oct 05 2007, David Chinner wrote:
> On Thu, Oct 04, 2007 at 03:07:18PM -0700, David Miller wrote:
> > From: Chuck Ebbert <[email protected]> Date: Thu, 04 Oct 2007 17:47:48
> > -0400
> >
> > > On 10/04/2007 05:11 PM, David Miller wrote:
> > > > From: Chuck Ebbert <[email protected]> Date: Thu, 04 Oct 2007 17:02:17
> > > > -0400
> > > >
> > > >> How do you simulate reading 100TB of data spread across 3000 disks,
> > > >> selecting 10% of it using some criterion, then sorting and summarizing
> > > >> the result?
> > > >
> > > > You repeatedly read zeros from a smaller disk into the same amount of
> > > > memory, and sort that as if it were real data instead.
> > >
> > > You've just replaced 3000 concurrent streams of data with a single stream.
> > > That won't test the memory allocator's ability to allocate memory to many
> > > concurrent users very well.
> >
> > You've kindly removed my "thinking outside of the box" comment.
> >
> > The point is was not that my specific suggestion would be perfect, but that
> > if you used your creativity and thought in similar directions you might find
> > a way to do it.
> >
> > People are too narrow minded when it comes to these things, and that's the
> > problem I want to address.
>
> And it's a good point, too, because often problems to one person are a
> no-brainer to someone else.
>
> Creating lots of "fake" disks is trivial to do, IMO. Use loopback on
> sparse files containing sparse filesxi, use ramdisks containing sparse
> files or write a sparse dm target for sparse block device mapping,
> etc. I'm sure there's more than the few I just threw out...

Or use scsi_debug to fake drives/controllers, works wonderful as well
for some things and involve the full IO stack.

I'd like to second Davids emails here, this is a serious problem. Having
a reproducible test case lowers the barrier for getting the problem
fixed by orders of magnitude. It's the difference between the problem
getting fixed in a day or two and it potentially lingering for months,
because email ping-pong takes forever and "the test team has moved on to
other tests, we'll let you know the results of test foo in 3 weeks time
when we have a new slot on the box" just removing any developer
motivation to work on the issue.

--
Jens Axboe

2007-10-05 09:19:39

by Pekka Enberg

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

Hi,

On 10/5/07, Jens Axboe <[email protected]> wrote:
> I'd like to second Davids emails here, this is a serious problem. Having
> a reproducible test case lowers the barrier for getting the problem
> fixed by orders of magnitude. It's the difference between the problem
> getting fixed in a day or two and it potentially lingering for months,
> because email ping-pong takes forever and "the test team has moved on to
> other tests, we'll let you know the results of test foo in 3 weeks time
> when we have a new slot on the box" just removing any developer
> motivation to work on the issue.

What I don't understand is that why don't the people who _have_ access
to the test case fix the problem? Unlike slab, slub is not a pile of
crap that only Christoph can hack on...

Pekka

2007-10-05 09:27:15

by Jens Axboe

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Fri, Oct 05 2007, Pekka Enberg wrote:
> Hi,
>
> On 10/5/07, Jens Axboe <[email protected]> wrote:
> > I'd like to second Davids emails here, this is a serious problem. Having
> > a reproducible test case lowers the barrier for getting the problem
> > fixed by orders of magnitude. It's the difference between the problem
> > getting fixed in a day or two and it potentially lingering for months,
> > because email ping-pong takes forever and "the test team has moved on to
> > other tests, we'll let you know the results of test foo in 3 weeks time
> > when we have a new slot on the box" just removing any developer
> > motivation to work on the issue.
>
> What I don't understand is that why don't the people who _have_ access
> to the test case fix the problem? Unlike slab, slub is not a pile of
> crap that only Christoph can hack on...

Often the people testing are only doing just that, testing. So they
kindly offer to test any patches and so on, which usually takes forever
because of the above limitations in response time, machine availability,
etc.

Writing a small test module to exercise slub/slab in various ways
(allocating from all cpus freeing from one, as described) should not be
too hard. Perhaps that would be enough to find this performance
discrepancy between slab and slub?

--
Jens Axboe

2007-10-05 11:12:46

by Andi Kleen

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

Jens Axboe <[email protected]> writes:
>
> Writing a small test module to exercise slub/slab in various ways
> (allocating from all cpus freeing from one, as described) should not be
> too hard. Perhaps that would be enough to find this performance
> discrepancy between slab and slub?

You could simulate that by just sending packets using unix sockets
between threads bound to different CPUs. Sending a packet allocates; receiving
deallocates.

But it's not clear that will really simulate the cache bounce environment
of the database test. I don't think all passing of data between CPUs
using slub objects is slow.

-Andi

2007-10-05 11:56:46

by Matthew Wilcox

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Fri, Oct 05, 2007 at 08:48:53AM +0200, Jens Axboe wrote:
> I'd like to second Davids emails here, this is a serious problem. Having
> a reproducible test case lowers the barrier for getting the problem
> fixed by orders of magnitude. It's the difference between the problem
> getting fixed in a day or two and it potentially lingering for months,
> because email ping-pong takes forever and "the test team has moved on to
> other tests, we'll let you know the results of test foo in 3 weeks time
> when we have a new slot on the box" just removing any developer
> motivation to work on the issue.

I vaguely remembered something called orasim, so I went looking for it.
I found http://oss.oracle.com/~wcoekaer/orasim/ which is dated from
2004, and I found http://oss.oracle.com/projects/orasimjobfiles/ which
seems to be a stillborn project. Is there anything else I should know
about orasim? ;-)

--
Intel are signing my paycheques ... these opinions are still mine
"Bill, look, we understand that you're interested in selling us this
operating system, but compare it to ours. We can't possibly take such
a retrograde step."

2007-10-05 12:36:00

by Jens Axboe

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Fri, Oct 05 2007, Matthew Wilcox wrote:
> On Fri, Oct 05, 2007 at 08:48:53AM +0200, Jens Axboe wrote:
> > I'd like to second Davids emails here, this is a serious problem. Having
> > a reproducible test case lowers the barrier for getting the problem
> > fixed by orders of magnitude. It's the difference between the problem
> > getting fixed in a day or two and it potentially lingering for months,
> > because email ping-pong takes forever and "the test team has moved on to
> > other tests, we'll let you know the results of test foo in 3 weeks time
> > when we have a new slot on the box" just removing any developer
> > motivation to work on the issue.
>
> I vaguely remembered something called orasim, so I went looking for it.
> I found http://oss.oracle.com/~wcoekaer/orasim/ which is dated from
> 2004, and I found http://oss.oracle.com/projects/orasimjobfiles/ which
> seems to be a stillborn project. Is there anything else I should know
> about orasim? ;-)

I don't know much about orasim, except that internally we're trying to
use fio for that instead. As far as I know, it was a project that was
never feature complete (or completed all together, for that matter).

--
Jens Axboe

2007-10-05 12:38:40

by Jens Axboe

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Fri, Oct 05 2007, Andi Kleen wrote:
> Jens Axboe <[email protected]> writes:
> >
> > Writing a small test module to exercise slub/slab in various ways
> > (allocating from all cpus freeing from one, as described) should not be
> > too hard. Perhaps that would be enough to find this performance
> > discrepancy between slab and slub?
>
> You could simulate that by just sending packets using unix sockets
> between threads bound to different CPUs. Sending a packet allocates;
> receiving deallocates.

Sure, there are a host of ways to accomplish the same thing.

> But it's not clear that will really simulate the cache bounce
> environment of the database test. I don't think all passing of data
> between CPUs using slub objects is slow.

It might not, it might. The point is trying to isolate the problem and
making a simple test case that could be used to reproduce it, so that
Christoph (or someone else) can easily fix it.

--
Jens Axboe

2007-10-05 19:28:17

by Christoph Lameter

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Fri, 5 Oct 2007, Matthew Wilcox wrote:

> I vaguely remembered something called orasim, so I went looking for it.
> I found http://oss.oracle.com/~wcoekaer/orasim/ which is dated from
> 2004, and I found http://oss.oracle.com/projects/orasimjobfiles/ which
> seems to be a stillborn project. Is there anything else I should know
> about orasim? ;-)

Too bad. If this would work then I would have a load to work against. I
have a patch here that may address the issue for SMP (no NUMA for now) by
batching all frees on the per cpu freelist and then dumping them in
groups. But it is likely not too wise to have you run your weeklong
tests on this one. Needs some more care first.



2007-10-05 19:31:34

by Christoph Lameter

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

On Fri, 5 Oct 2007, Jens Axboe wrote:

> It might not, it might. The point is trying to isolate the problem and
> making a simple test case that could be used to reproduce it, so that
> Christoph (or someone else) can easily fix it.

In case there is someone who wants to hack on it: Here is what I got so
far for batching the frees. I will try to come up with a test next week if
nothing else happens before:

Patch 1/2 on top of mm:

SLUB: Keep counter of remaining objects on the per cpu list

Add a counter to keep track of how many objects are on the per cpu list.

Signed-off-by: Christoph Lameter <[email protected]>

---
include/linux/slub_def.h | 1 +
mm/slub.c | 8 ++++++--
2 files changed, 7 insertions(+), 2 deletions(-)

Index: linux-2.6.23-rc8-mm2/include/linux/slub_def.h
===================================================================
--- linux-2.6.23-rc8-mm2.orig/include/linux/slub_def.h 2007-10-04 22:41:58.000000000 -0700
+++ linux-2.6.23-rc8-mm2/include/linux/slub_def.h 2007-10-04 22:42:08.000000000 -0700
@@ -15,6 +15,7 @@ struct kmem_cache_cpu {
void **freelist;
struct page *page;
int node;
+ int remaining;
unsigned int offset;
unsigned int objsize;
};
Index: linux-2.6.23-rc8-mm2/mm/slub.c
===================================================================
--- linux-2.6.23-rc8-mm2.orig/mm/slub.c 2007-10-04 22:41:58.000000000 -0700
+++ linux-2.6.23-rc8-mm2/mm/slub.c 2007-10-04 22:42:08.000000000 -0700
@@ -1386,12 +1386,13 @@ static void deactivate_slab(struct kmem_
* because both freelists are empty. So this is unlikely
* to occur.
*/
- while (unlikely(c->freelist)) {
+ while (unlikely(c->remaining)) {
void **object;

/* Retrieve object from cpu_freelist */
object = c->freelist;
c->freelist = c->freelist[c->offset];
+ c->remaining--;

/* And put onto the regular freelist */
object[c->offset] = page->freelist;
@@ -1491,6 +1492,7 @@ load_freelist:

object = c->page->freelist;
c->freelist = object[c->offset];
+ c->remaining = s->objects - c->page->inuse - 1;
c->page->inuse = s->objects;
c->page->freelist = NULL;
c->node = page_to_nid(c->page);
@@ -1574,13 +1576,14 @@ static void __always_inline *slab_alloc(

local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
- if (unlikely(!c->freelist || !node_match(c, node)))
+ if (unlikely(!c->remaining || !node_match(c, node)))

object = __slab_alloc(s, gfpflags, node, addr, c);

else {
object = c->freelist;
c->freelist = object[c->offset];
+ c->remaining--;
}
local_irq_restore(flags);

@@ -1686,6 +1689,7 @@ static void __always_inline slab_free(st
if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
c->freelist = object;
+ c->remaining++;
} else
__slab_free(s, page, x, addr, c->offset);



2007-10-05 19:33:16

by Christoph Lameter

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

Patch 2/2


SLUB: Allow foreign objects on the per cpu object lists.

In order to free objects we need to touch the page struct of the page that the
object belongs to. If this occurs too frequently then we could generate a bouncing
cacheline.

We do not want that to occur too frequently. We can avoid the page struct touching
for per cpu objects. Now we extend that to allow a limited number of objects that are
not part of the cpu slab. Allow up to 4 times the objects that fit into a page
in the per cpu list.

If the objects are allocated before we need to free them then we have saved touching
a page struct twice. The objects are presumably cache hot, so it is performance wise
good to recycle these locally.

Foreign objects are drained before deactivating cpu slabs and if too many objects
accumulate.

For kmem_cache_free() this also has the beneficial effect of getting virt_to_page()
operations eliminated or grouped together which may help reduce the cache footprint
and increase the speed of virt_to_page() lookups (they hopefully all come from the
same pages).

For kfree() we may have to do virt_to_page() in the worst case twice. Once grouped
together.

Signed-off-by: Christoph Lameter <[email protected]>

---
include/linux/slub_def.h | 1
mm/slub.c | 82 ++++++++++++++++++++++++++++++++++++++---------
2 files changed, 68 insertions(+), 15 deletions(-)

Index: linux-2.6.23-rc8-mm2/include/linux/slub_def.h
===================================================================
--- linux-2.6.23-rc8-mm2.orig/include/linux/slub_def.h 2007-10-04 22:42:08.000000000 -0700
+++ linux-2.6.23-rc8-mm2/include/linux/slub_def.h 2007-10-04 22:43:19.000000000 -0700
@@ -16,6 +16,7 @@ struct kmem_cache_cpu {
struct page *page;
int node;
int remaining;
+ int drain_limit;
unsigned int offset;
unsigned int objsize;
};
Index: linux-2.6.23-rc8-mm2/mm/slub.c
===================================================================
--- linux-2.6.23-rc8-mm2.orig/mm/slub.c 2007-10-04 22:42:08.000000000 -0700
+++ linux-2.6.23-rc8-mm2/mm/slub.c 2007-10-04 22:56:49.000000000 -0700
@@ -187,6 +187,12 @@ static inline void ClearSlabDebug(struct
*/
#define MAX_PARTIAL 10

+/*
+ * How many times the number of objects per slab can accumulate on the
+ * per cpu objects list before we drain it.
+ */
+#define DRAIN_FACTOR 4
+
#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_STORE_USER)

@@ -1375,6 +1381,54 @@ static void unfreeze_slab(struct kmem_ca
}
}

+static void __slab_free(struct kmem_cache *s, struct page *page,
+ void *x, void *addr, unsigned int offset);
+
+/*
+ * Drain freelist of objects foreign to the slab. Interrupts must be off.
+ *
+ * This is called
+ *
+ * 1. Before taking the slub lock when a cpu slab is to be deactivated.
+ * Deactivation can only deal with native objects on the freelist.
+ *
+ * 2. If the number of objects in the per cpu structures grows beyond
+ * 3 times the objects that fit in a slab. In that case we need to throw
+ * some objects away. Stripping the foreign objects does the job and
+ * localizes any new the allocations.
+ */
+static void drain_foreign(struct kmem_cache *s, struct kmem_cache_cpu *c, void *addr)
+{
+ void **freelist = c->freelist;
+
+ if (unlikely(c->node < 0)) {
+ /* Slow path user */
+ __slab_free(s, virt_to_head_page(freelist), freelist, addr, c->offset);
+ freelist = NULL;
+ c->remaining--;
+ }
+
+ if (!freelist)
+ return;
+
+ c->freelist = NULL;
+ c->remaining = 0;
+
+ while (freelist) {
+ void **object = freelist;
+ struct page *page = virt_to_head_page(freelist);
+
+ freelist = freelist[c->offset];
+ if (page == c->page) {
+ /* Local object. Keep for future allocations */
+ object[c->offset] = c->freelist;
+ c->freelist = object;
+ c->remaining++;
+ } else
+ __slab_free(s, page, object, NULL, c->offset);
+ }
+}
+
/*
* Remove the cpu slab
*/
@@ -1405,6 +1459,7 @@ static void deactivate_slab(struct kmem_

static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
+ drain_foreign(s, c, NULL);
slab_lock(c->page);
deactivate_slab(s, c);
}
@@ -1480,6 +1535,7 @@ static void *__slab_alloc(struct kmem_ca
if (!c->page)
goto new_slab;

+ drain_foreign(s, c, NULL);
slab_lock(c->page);
if (unlikely(!node_match(c, node)))
goto another_slab;
@@ -1553,6 +1609,7 @@ debug:
c->page->inuse++;
c->page->freelist = object[c->offset];
c->node = -1;
+ c->remaining = s->objects * 64;
slab_unlock(c->page);
return object;
}
@@ -1676,8 +1733,8 @@ debug:
* If fastpath is not possible then fall back to __slab_free where we deal
* with all sorts of special processing.
*/
-static void __always_inline slab_free(struct kmem_cache *s,
- struct page *page, void *x, void *addr)
+static void __always_inline slab_free(struct kmem_cache *s, void *x,
+ void *addr)
{
void **object = (void *)x;
unsigned long flags;
@@ -1686,23 +1743,17 @@ static void __always_inline slab_free(st
local_irq_save(flags);
debug_check_no_locks_freed(object, s->objsize);
c = get_cpu_slab(s, smp_processor_id());
- if (likely(page == c->page && c->node >= 0)) {
- object[c->offset] = c->freelist;
- c->freelist = object;
- c->remaining++;
- } else
- __slab_free(s, page, x, addr, c->offset);
-
+ object[c->offset] = c->freelist;
+ c->freelist = object;
+ c->remaining++;
+ if (unlikely(c->remaining >= c->drain_limit))
+ drain_foreign(s, c, addr);
local_irq_restore(flags);
}

void kmem_cache_free(struct kmem_cache *s, void *x)
{
- struct page *page;
-
- page = virt_to_head_page(x);
-
- slab_free(s, page, x, __builtin_return_address(0));
+ slab_free(s, x, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_free);

@@ -1879,6 +1930,7 @@ static void init_kmem_cache_cpu(struct k
c->node = 0;
c->offset = s->offset / sizeof(void *);
c->objsize = s->objsize;
+ c->drain_limit = DRAIN_FACTOR * s->objects;
}

static void init_kmem_cache_node(struct kmem_cache_node *n)
@@ -2626,7 +2678,7 @@ void kfree(const void *x)
put_page(page);
return;
}
- slab_free(page->slab, page, (void *)x, __builtin_return_address(0));
+ slab_free(page->slab, (void *)x, __builtin_return_address(0));
}
EXPORT_SYMBOL(kfree);


2007-10-05 20:32:31

by Peter Zijlstra

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB


On Thu, 2007-10-04 at 17:02 -0400, Chuck Ebbert wrote:
> On 10/04/2007 04:55 PM, David Miller wrote:
> >
> > Anything, I do mean anything, can be simulated using small test
> > programs.
>
> How do you simulate reading 100TB of data spread across 3000 disks,
> selecting 10% of it using some criterion, then sorting and summarizing
> the result?

Focus on the slab allocator usage, instrument it, record a trace,
generate a statistical model that matches, and write a small
programm/kernel module that has the same allocation pattern. Then verify
this statistical workload still shows the same performance difference.

Easy: no
Doable: yes



2007-10-05 21:31:35

by David Miller

[permalink] [raw]
Subject: Re: SLUB performance regression vs SLAB

From: Peter Zijlstra <[email protected]>
Date: Fri, 05 Oct 2007 22:32:00 +0200

> Focus on the slab allocator usage, instrument it, record a trace,
> generate a statistical model that matches, and write a small
> programm/kernel module that has the same allocation pattern. Then verify
> this statistical workload still shows the same performance difference.
>
> Easy: no
> Doable: yes

The other important bit is likely to generate a lot of DMA traffic
such that the L2 cache bandwidth is getting used on the bus
side by the PCI controller doing invalidations of both dirty
and clean L2 cache lines as devices DMA to/from them.

This will also be exercising the memory controller, further contending
with the cpu when SLAB touches cold data structures.