Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753563AbcDYFVc (ORCPT ); Mon, 25 Apr 2016 01:21:32 -0400 Received: from mail-pf0-f170.google.com ([209.85.192.170]:35953 "EHLO mail-pf0-f170.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751934AbcDYFV1 (ORCPT ); Mon, 25 Apr 2016 01:21:27 -0400 From: js1304@gmail.com X-Google-Original-From: iamjoonsoo.kim@lge.com To: Andrew Morton Cc: Rik van Riel , Johannes Weiner , , Laura Abbott , Minchan Kim , Marek Szyprowski , Michal Nazarewicz , "Aneesh Kumar K.V" , Vlastimil Babka , linux-mm@kvack.org, linux-kernel@vger.kernel.org, Joonsoo Kim Subject: [PATCH v2 4/6] mm/cma: remove ALLOC_CMA Date: Mon, 25 Apr 2016 14:21:08 +0900 Message-Id: <1461561670-28012-5-git-send-email-iamjoonsoo.kim@lge.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1461561670-28012-1-git-send-email-iamjoonsoo.kim@lge.com> References: <1461561670-28012-1-git-send-email-iamjoonsoo.kim@lge.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 2443 Lines: 79 From: Joonsoo Kim Now, all reserved pages for CMA region are belong to the ZONE_CMA and it only serves for GFP_HIGHUSER_MOVABLE. Therefore, we don't need to consider ALLOC_CMA at all. Signed-off-by: Joonsoo Kim --- mm/internal.h | 3 +-- mm/page_alloc.c | 18 ++---------------- 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 64e3131..a25d45b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -478,8 +478,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, #define ALLOC_HARDER 0x10 /* try to alloc harder */ #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ -#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ -#define ALLOC_FAIR 0x100 /* fair zone allocation */ +#define ALLOC_FAIR 0x80 /* fair zone allocation */ enum ttu_flags; struct tlbflush_unmap_batch; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0a6a195..69546b7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2582,12 +2582,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, else min -= min / 4; -#ifdef CONFIG_CMA - /* If allocation can't use CMA areas don't use free CMA pages */ - if (!(alloc_flags & ALLOC_CMA)) - free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); -#endif - /* * Check watermarks for an order-0 allocation request. If these * are not met, then a high-order request also cannot go ahead @@ -2617,10 +2611,8 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, } #ifdef CONFIG_CMA - if ((alloc_flags & ALLOC_CMA) && - !list_empty(&area->free_list[MIGRATE_CMA])) { + if (!list_empty(&area->free_list[MIGRATE_CMA])) return true; - } #endif } return false; @@ -3217,10 +3209,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) unlikely(test_thread_flag(TIF_MEMDIE)))) alloc_flags |= ALLOC_NO_WATERMARKS; } -#ifdef CONFIG_CMA - if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) - alloc_flags |= ALLOC_CMA; -#endif + return alloc_flags; } @@ -3573,9 +3562,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!zonelist->_zonerefs->zone)) return NULL; - if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) - alloc_flags |= ALLOC_CMA; - retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); -- 1.9.1