Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S2992460Ab2KAMKb (ORCPT ); Thu, 1 Nov 2012 08:10:31 -0400 Received: from mailhub.sw.ru ([195.214.232.25]:13744 "EHLO relay.sw.ru" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S935725Ab2KAMH4 (ORCPT ); Thu, 1 Nov 2012 08:07:56 -0400 From: Glauber Costa To: Cc: , Andrew Morton , , Johannes Weiner , Tejun Heo , Michal Hocko , Christoph Lameter , Pekka Enberg , David Rientjes , Glauber Costa , Pekka Enberg , Suleiman Souhlal Subject: [PATCH v6 22/29] sl[au]b: Allocate objects from memcg cache Date: Thu, 1 Nov 2012 16:07:38 +0400 Message-Id: <1351771665-11076-23-git-send-email-glommer@parallels.com> X-Mailer: git-send-email 1.7.11.7 In-Reply-To: <1351771665-11076-1-git-send-email-glommer@parallels.com> References: <1351771665-11076-1-git-send-email-glommer@parallels.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4422 Lines: 133 We are able to match a cache allocation to a particular memcg. If the task doesn't change groups during the allocation itself - a rare event, this will give us a good picture about who is the first group to touch a cache page. This patch uses the now available infrastructure by calling memcg_kmem_get_cache() before all the cache allocations. [ v6: simplified kmalloc relay code ] Signed-off-by: Glauber Costa CC: Christoph Lameter CC: Pekka Enberg CC: Michal Hocko CC: Kamezawa Hiroyuki CC: Johannes Weiner CC: Suleiman Souhlal CC: Tejun Heo --- include/linux/slub_def.h | 5 ++++- mm/memcontrol.c | 3 +++ mm/slab.c | 6 +++++- mm/slub.c | 7 ++++--- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 961e72e..364ba6c 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -225,7 +225,10 @@ void *__kmalloc(size_t size, gfp_t flags); static __always_inline void * kmalloc_order(size_t size, gfp_t flags, unsigned int order) { - void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); + void *ret; + + flags |= (__GFP_COMP | __GFP_KMEMCG); + ret = (void *) __get_free_pages(flags, order); kmemleak_alloc(ret, size, 1, flags); return ret; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ff42586..f7773ed 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3059,6 +3059,9 @@ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align, (s->flags & ~SLAB_PANIC), s->ctor); + if (new) + new->allocflags |= __GFP_KMEMCG; + kfree(name); return new; } diff --git a/mm/slab.c b/mm/slab.c index de9cc0d..c5d6937 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1981,7 +1981,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) } if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; - free_pages((unsigned long)addr, cachep->gfporder); + free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); } static void kmem_rcu_free(struct rcu_head *head) @@ -3547,6 +3547,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, if (slab_should_failslab(cachep, flags)) return NULL; + cachep = memcg_kmem_get_cache(cachep, flags); + cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); @@ -3632,6 +3634,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) if (slab_should_failslab(cachep, flags)) return NULL; + cachep = memcg_kmem_get_cache(cachep, flags); + cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); objp = __do_cache_alloc(cachep, flags); diff --git a/mm/slub.c b/mm/slub.c index 6ff2bdb..8778370 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1405,7 +1405,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) reset_page_mapcount(page); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; - __free_pages(page, order); + __free_memcg_kmem_pages(page, order); } #define need_reserve_slab_rcu \ @@ -2321,6 +2321,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, if (slab_pre_alloc_hook(s, gfpflags)) return NULL; + s = memcg_kmem_get_cache(s, gfpflags); redo: /* @@ -3353,7 +3354,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) struct page *page; void *ptr = NULL; - flags |= __GFP_COMP | __GFP_NOTRACK; + flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG; page = alloc_pages_node(node, flags, get_order(size)); if (page) ptr = page_address(page); @@ -3459,7 +3460,7 @@ void kfree(const void *x) if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); kmemleak_free(x); - __free_pages(page, compound_order(page)); + __free_memcg_kmem_pages(page, compound_order(page)); return; } slab_free(page->slab_cache, page, object, _RET_IP_); -- 1.7.11.7 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/