Received: by 2002:a05:6a10:9848:0:0:0:0 with SMTP id x8csp3539518pxf; Mon, 29 Mar 2021 05:11:50 -0700 (PDT) X-Google-Smtp-Source: ABdhPJzotiP9XGHm8x36Ghh3oOmclGUSQx9CaX7skZIZadVFWndS1+mM1Him+lCS1AL0hT7ww0pG X-Received: by 2002:a17:907:37a:: with SMTP id rs26mr27941160ejb.336.1617019910260; Mon, 29 Mar 2021 05:11:50 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1617019910; cv=none; d=google.com; s=arc-20160816; b=JWRfRh3rG3KyzY2lcf6diptyfYkDnxo1PQqwaFc1oMdKtijkQP42fek43LHAGkRmj0 hKJAq/HL6shnoZcv8qMg4Ai121VzjyYnxLcTlLgEt43M3uYBLViWOXTNLdEMW0VFI5VI 3pmjefJSmFB9J/jkNmZsx9VqSz3WylKM8zXZs+/L8PB/fs9/+6Z/+ThQf0T1QOZGzfhb 8tClRFpzJ6k7+c44vZi/rwvPJ5zQ0b9irD/QXyhiSzJu1BKq4+EFpNvxDwVseEUYQQ+f 7dzzym/cZAg/ZWS3eMH5s15T4dSMut3yhHH/dICZHJOyqpQOgdQ8uGvhBXfNUi2ZHHay F35Q== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from; bh=5+z9bGrC6pXHbzsepf0yj+wSXUD5uR7sJTpkvbvvMrM=; b=cQ8Rqix7u7O2oJW4XxELNNmgJYtizmNg+jp3bpNww82zfSclIDOHZgdwRSEWFV/2Jr rbV4x9/suC/xBCN0QqET9GsgThMx/0AQR8FMdzdV42Ok5fTRH6GBIKdOQg8KaYrT42Sd xCVnd6+V74ZtLHo8C9QPbV1p7w1Z10Oll3DtuUiU7RXBSLI3mhqyZOkVHBdDbT2SXea7 RJEX8U81KCgxw+iLrjfVlqSOcIMhwS19KHMOri/7YVwFn7AwD8GsZwXJTC3euF/a9ttq 8aQCi1Cz/vokmaytTjC934Csra+LzG+1qLlJiCr3ovQA/lFA5rtMJC2p/mFE8Kx60YAo g27w== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [23.128.96.18]) by mx.google.com with ESMTP id p16si12473333edu.606.2021.03.29.05.11.27; Mon, 29 Mar 2021 05:11:50 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) client-ip=23.128.96.18; Authentication-Results: mx.google.com; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231520AbhC2MIP (ORCPT + 99 others); Mon, 29 Mar 2021 08:08:15 -0400 Received: from outbound-smtp32.blacknight.com ([81.17.249.64]:50934 "EHLO outbound-smtp32.blacknight.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231715AbhC2MHv (ORCPT ); Mon, 29 Mar 2021 08:07:51 -0400 Received: from mail.blacknight.com (pemlinmail01.blacknight.ie [81.17.254.10]) by outbound-smtp32.blacknight.com (Postfix) with ESMTPS id 2B68FBEB72 for ; Mon, 29 Mar 2021 13:07:50 +0100 (IST) Received: (qmail 20025 invoked from network); 29 Mar 2021 12:07:49 -0000 Received: from unknown (HELO stampy.112glenside.lan) (mgorman@techsingularity.net@[84.203.22.4]) by 81.17.254.9 with ESMTPA; 29 Mar 2021 12:07:49 -0000 From: Mel Gorman To: Linux-MM Cc: Linux-RT-Users , LKML , Chuck Lever , Jesper Dangaard Brouer , Matthew Wilcox , Mel Gorman Subject: [PATCH 5/6] mm/page_alloc: Batch the accounting updates in the bulk allocator Date: Mon, 29 Mar 2021 13:06:47 +0100 Message-Id: <20210329120648.19040-6-mgorman@techsingularity.net> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20210329120648.19040-1-mgorman@techsingularity.net> References: <20210329120648.19040-1-mgorman@techsingularity.net> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Now that the zone_statistics are a simple counter that does not require special protection, the bulk allocator accounting updates can be batch updated without requiring IRQs to be disabled. Signed-off-by: Mel Gorman --- include/linux/vmstat.h | 8 ++++++++ mm/page_alloc.c | 30 +++++++++++++----------------- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index dde4dec4e7dd..8473b8fa9756 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -246,6 +246,14 @@ __count_numa_event(struct zone *zone, enum numa_stat_item item) raw_cpu_inc(pzstats->vm_numa_event[item]); } +static inline void +__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta) +{ + struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; + + raw_cpu_add(pzstats->vm_numa_event[item], delta); +} + extern void __count_numa_event(struct zone *zone, enum numa_stat_item item); extern unsigned long sum_zone_node_page_state(int node, enum zone_stat_item item); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7eb48632bcac..32c64839c145 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3398,7 +3398,8 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt) * * Must be called with interrupts disabled. */ -static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) +static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, + long nr_account) { #ifdef CONFIG_NUMA enum numa_stat_item local_stat = NUMA_LOCAL; @@ -3411,12 +3412,12 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) local_stat = NUMA_OTHER; if (zone_to_nid(z) == zone_to_nid(preferred_zone)) - __count_numa_event(z, NUMA_HIT); + __count_numa_events(z, NUMA_HIT, nr_account); else { - __count_numa_event(z, NUMA_MISS); - __count_numa_event(preferred_zone, NUMA_FOREIGN); + __count_numa_events(z, NUMA_MISS, nr_account); + __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); } - __count_numa_event(z, local_stat); + __count_numa_events(z, local_stat, nr_account); #endif } @@ -3462,7 +3463,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); if (page) { __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); - zone_statistics(preferred_zone, zone); + zone_statistics(preferred_zone, zone, 1); } local_unlock_irqrestore(&pagesets.lock, flags); return page; @@ -3523,7 +3524,7 @@ struct page *rmqueue(struct zone *preferred_zone, get_pcppage_migratetype(page)); __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); - zone_statistics(preferred_zone, zone); + zone_statistics(preferred_zone, zone, 1); local_irq_restore(flags); out: @@ -5006,7 +5007,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, struct alloc_context ac; gfp_t alloc_gfp; unsigned int alloc_flags; - int nr_populated = 0; + int nr_populated = 0, nr_account = 0; if (unlikely(nr_pages <= 0)) return 0; @@ -5079,15 +5080,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, goto failed_irq; break; } - - /* - * Ideally this would be batched but the best way to do - * that cheaply is to first convert zone_statistics to - * be inaccurate per-cpu counter like vm_events to avoid - * a RMW cycle then do the accounting with IRQs enabled. - */ - __count_zid_vm_events(PGALLOC, zone_idx(zone), 1); - zone_statistics(ac.preferred_zoneref->zone, zone); + nr_account++; prep_new_page(page, 0, gfp, 0); if (page_list) @@ -5097,6 +5090,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, nr_populated++; } + __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); + zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); + local_unlock_irqrestore(&pagesets.lock, flags); return nr_populated; -- 2.26.2