Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752730AbbD0HVq (ORCPT ); Mon, 27 Apr 2015 03:21:46 -0400 Received: from lgeamrelo04.lge.com ([156.147.1.127]:64926 "EHLO lgeamrelo04.lge.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751666AbbD0HVo (ORCPT ); Mon, 27 Apr 2015 03:21:44 -0400 X-Original-SENDERIP: 10.177.220.157 X-Original-MAILFROM: iamjoonsoo.kim@lge.com From: Joonsoo Kim To: Andrew Morton Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org, Vlastimil Babka , Mel Gorman , Johannes Weiner , Rik van Riel , Joonsoo Kim Subject: [PATCH 1/3] mm/page_alloc: don't break highest order freepage if steal Date: Mon, 27 Apr 2015 16:23:39 +0900 Message-Id: <1430119421-13536-1-git-send-email-iamjoonsoo.kim@lge.com> X-Mailer: git-send-email 1.9.1 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4028 Lines: 131 When we steal whole pageblock, we don't need to break highest order freepage. Perhaps, there is small order freepage so we can use it. This also gives us some code size reduction because expand() which is used in __rmqueue_fallback() and inlined into __rmqueue_fallback() is removed. text data bss dec hex filename 37413 1440 624 39477 9a35 mm/page_alloc.o 37249 1440 624 39313 9991 mm/page_alloc.o Signed-off-by: Joonsoo Kim --- mm/page_alloc.c | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ed0f1c6..044f16c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1239,14 +1239,14 @@ int find_suitable_fallback(struct free_area *area, unsigned int order, } /* Remove an element from the buddy allocator from the fallback list */ -static inline struct page * +static inline bool __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) { struct free_area *area; unsigned int current_order; struct page *page; int fallback_mt; - bool can_steal; + bool can_steal_pageblock; /* Find the largest possible block of pages in the other list */ for (current_order = MAX_ORDER-1; @@ -1254,26 +1254,24 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) --current_order) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, - start_migratetype, false, &can_steal); + start_migratetype, false, + &can_steal_pageblock); if (fallback_mt == -1) continue; page = list_entry(area->free_list[fallback_mt].next, struct page, lru); - if (can_steal) + BUG_ON(!page); + + if (can_steal_pageblock) steal_suitable_fallback(zone, page, start_migratetype); - /* Remove the page from the freelists */ - area->nr_free--; - list_del(&page->lru); - rmv_page_order(page); + list_move(&page->lru, &area->free_list[start_migratetype]); - expand(zone, page, order, current_order, area, - start_migratetype); /* * The freepage_migratetype may differ from pageblock's * migratetype depending on the decisions in - * try_to_steal_freepages(). This is OK as long as it + * find_suitable_fallback(). This is OK as long as it * does not differ for MIGRATE_CMA pageblocks. For CMA * we need to make sure unallocated pages flushed from * pcp lists are returned to the correct freelist. @@ -1283,10 +1281,10 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, fallback_mt); - return page; + return true; } - return NULL; + return false; } /* @@ -1297,28 +1295,32 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order, int migratetype) { struct page *page; + bool steal_fallback; -retry_reserve: +retry: page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { if (migratetype == MIGRATE_MOVABLE) page = __rmqueue_cma_fallback(zone, order); - if (!page) - page = __rmqueue_fallback(zone, order, migratetype); + if (page) + goto out; + + steal_fallback = __rmqueue_fallback(zone, order, migratetype); /* * Use MIGRATE_RESERVE rather than fail an allocation. goto * is used because __rmqueue_smallest is an inline function * and we want just one call site */ - if (!page) { + if (!steal_fallback) migratetype = MIGRATE_RESERVE; - goto retry_reserve; - } + + goto retry; } +out: trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } -- 1.9.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/