Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754698AbbGJMHn (ORCPT ); Fri, 10 Jul 2015 08:07:43 -0400 Received: from www.linutronix.de ([62.245.132.108]:46395 "EHLO Galois.linutronix.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753736AbbGJMHY (ORCPT ); Fri, 10 Jul 2015 08:07:24 -0400 Message-Id: <20150710120259.836414367@linutronix.de> User-Agent: quilt/0.63-1 Date: Fri, 10 Jul 2015 12:07:13 -0000 From: Thomas Gleixner To: LKML Cc: Christoph Lameter , Pekka Enberg , David Rientjes , Joonsoo Kim , Andrew Morton , linux-mm@kvack.org, Sebastian Andrzej Siewior , Steven Rostedt , Peter Zijlstra Subject: [patch] mm/slub: Move slab initialization into irq enabled region MIME-Version: 1.0 Content-Type: text/plain; charset=ISO-8859-15 Content-Disposition: inline; filename=slub-move-slab-init-into-irq-enabled-region.patch X-Linutronix-Spam-Score: -1.0 X-Linutronix-Spam-Level: - X-Linutronix-Spam-Status: No , -1.0 points, 5.0 required, ALL_TRUSTED=-1,SHORTCIRCUIT=-0.0001,URIBL_BLOCKED=0.001 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4084 Lines: 154 Initializing a new slab can introduce rather large latencies because most of the initialization runs always with interrupts disabled. There is no point in doing so. The newly allocated slab is not visible yet, so there is no reason to protect it against concurrent alloc/free. Move the expensive parts of the initialization into allocate_slab(), so for all allocations with GFP_WAIT set, interrupts are enabled. Signed-off-by: Thomas Gleixner --- mm/slub.c | 85 +++++++++++++++++++++++++++++++------------------------------- 1 file changed, 43 insertions(+), 42 deletions(-) Index: linux/mm/slub.c =================================================================== --- linux.orig/mm/slub.c +++ linux/mm/slub.c @@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct kasan_slab_free(s, x); } +static void setup_object(struct kmem_cache *s, struct page *page, + void *object) +{ + setup_object_debug(s, page, object); + if (unlikely(s->ctor)) { + kasan_unpoison_object_data(s, object); + s->ctor(object); + kasan_poison_object_data(s, object); + } +} + /* * Slab allocation and freeing */ @@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct struct page *page; struct kmem_cache_order_objects oo = s->oo; gfp_t alloc_gfp; + void *start, *p; + int idx, order; flags &= gfp_allowed_mask; @@ -1364,8 +1377,11 @@ static struct page *allocate_slab(struct stat(s, ORDER_FALLBACK); } - if (kmemcheck_enabled && page - && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { + if (!page) + goto out; + + if (kmemcheck_enabled && + !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { int pages = 1 << oo_order(oo); kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node); @@ -1380,51 +1396,12 @@ static struct page *allocate_slab(struct kmemcheck_mark_unallocated_pages(page, pages); } - if (flags & __GFP_WAIT) - local_irq_disable(); if (!page) - return NULL; + goto out; page->objects = oo_objects(oo); - mod_zone_page_state(page_zone(page), - (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - 1 << oo_order(oo)); - - return page; -} - -static void setup_object(struct kmem_cache *s, struct page *page, - void *object) -{ - setup_object_debug(s, page, object); - if (unlikely(s->ctor)) { - kasan_unpoison_object_data(s, object); - s->ctor(object); - kasan_poison_object_data(s, object); - } -} - -static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) -{ - struct page *page; - void *start; - void *p; - int order; - int idx; - - if (unlikely(flags & GFP_SLAB_BUG_MASK)) { - pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); - BUG(); - } - - page = allocate_slab(s, - flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); - if (!page) - goto out; order = compound_order(page); - inc_slabs_node(s, page_to_nid(page), page->objects); page->slab_cache = s; __SetPageSlab(page); if (page->pfmemalloc) @@ -1448,10 +1425,34 @@ static struct page *new_slab(struct kmem page->freelist = start; page->inuse = page->objects; page->frozen = 1; + out: + if (flags & __GFP_WAIT) + local_irq_disable(); + if (!page) + return NULL; + + mod_zone_page_state(page_zone(page), + (s->flags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, + 1 << oo_order(oo)); + + inc_slabs_node(s, page_to_nid(page), page->objects); + return page; } +static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) +{ + if (unlikely(flags & GFP_SLAB_BUG_MASK)) { + pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); + BUG(); + } + + return allocate_slab(s, + flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); +} + static void __free_slab(struct kmem_cache *s, struct page *page) { int order = compound_order(page); -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/