Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S943236AbcJ0PJX (ORCPT ); Thu, 27 Oct 2016 11:09:23 -0400 Received: from mail-lf0-f66.google.com ([209.85.215.66]:34339 "EHLO mail-lf0-f66.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S934812AbcJ0PJV (ORCPT ); Thu, 27 Oct 2016 11:09:21 -0400 Date: Thu, 27 Oct 2016 13:08:03 +0200 From: Vitaly Wool To: Linux-MM , linux-kernel@vger.kernel.org Cc: Dan Streetman , Andrew Morton Subject: [PATCHv3 1/3] z3fold: make counters atomic Message-Id: <20161027130803.82fa7db8f649b5977190208b@gmail.com> In-Reply-To: <20161027130647.782b8ab1f71555200ba15605@gmail.com> References: <20161027130647.782b8ab1f71555200ba15605@gmail.com> X-Mailer: Sylpheed 3.4.1 (GTK+ 2.24.23; x86_64-pc-linux-gnu) Mime-Version: 1.0 Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: 7bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4880 Lines: 148 This patch converts pages_nr per-pool counter to atomic64_t. It also introduces a new counter, unbuddied_nr, which is atomic64_t, too, to track the number of unbuddied (compactable) z3fold pages. Signed-off-by: Vitaly Wool --- mm/z3fold.c | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/mm/z3fold.c b/mm/z3fold.c index 8f9e89c..5ac325a 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -69,6 +69,7 @@ struct z3fold_ops { * @lru: list tracking the z3fold pages in LRU order by most recently * added buddy. * @pages_nr: number of z3fold pages in the pool. + * @unbuddied_nr: number of unbuddied z3fold pages in the pool. * @ops: pointer to a structure of user defined operations specified at * pool creation time. * @@ -80,7 +81,8 @@ struct z3fold_pool { struct list_head unbuddied[NCHUNKS]; struct list_head buddied; struct list_head lru; - u64 pages_nr; + atomic64_t pages_nr; + atomic64_t unbuddied_nr; const struct z3fold_ops *ops; struct zpool *zpool; const struct zpool_ops *zpool_ops; @@ -234,7 +236,8 @@ static struct z3fold_pool *z3fold_create_pool(gfp_t gfp, INIT_LIST_HEAD(&pool->unbuddied[i]); INIT_LIST_HEAD(&pool->buddied); INIT_LIST_HEAD(&pool->lru); - pool->pages_nr = 0; + atomic64_set(&pool->pages_nr, 0); + atomic64_set(&pool->unbuddied_nr, 0); pool->ops = ops; return pool; } @@ -334,6 +337,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, continue; } list_del(&zhdr->buddy); + atomic64_dec(&pool->unbuddied_nr); goto found; } } @@ -346,7 +350,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, if (!page) return -ENOMEM; spin_lock(&pool->lock); - pool->pages_nr++; + atomic64_inc(&pool->pages_nr); zhdr = init_z3fold_page(page); if (bud == HEADLESS) { @@ -369,6 +373,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, /* Add to unbuddied list */ freechunks = num_free_chunks(zhdr); list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); + atomic64_inc(&pool->unbuddied_nr); } else { /* Add to buddied list */ list_add(&zhdr->buddy, &pool->buddied); @@ -412,6 +417,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) /* HEADLESS page stored */ bud = HEADLESS; } else { + bool is_unbuddied = zhdr->first_chunks == 0 || + zhdr->middle_chunks == 0 || + zhdr->last_chunks == 0; + bud = handle_to_buddy(handle); switch (bud) { @@ -431,6 +440,8 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) spin_unlock(&pool->lock); return; } + if (is_unbuddied) + atomic64_dec(&pool->unbuddied_nr); } if (test_bit(UNDER_RECLAIM, &page->private)) { @@ -451,12 +462,13 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) list_del(&page->lru); clear_bit(PAGE_HEADLESS, &page->private); free_z3fold_page(zhdr); - pool->pages_nr--; + atomic64_dec(&pool->pages_nr); } else { z3fold_compact_page(zhdr); /* Add to the unbuddied list */ freechunks = num_free_chunks(zhdr); list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); + atomic64_inc(&pool->unbuddied_nr); } spin_unlock(&pool->lock); @@ -520,6 +532,11 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) zhdr = page_address(page); if (!test_bit(PAGE_HEADLESS, &page->private)) { list_del(&zhdr->buddy); + if (zhdr->first_chunks == 0 || + zhdr->middle_chunks == 0 || + zhdr->last_chunks == 0) + atomic64_dec(&pool->unbuddied_nr); + /* * We need encode the handles before unlocking, since * we can race with free that will set @@ -569,7 +586,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) */ clear_bit(PAGE_HEADLESS, &page->private); free_z3fold_page(zhdr); - pool->pages_nr--; + atomic64_dec(&pool->pages_nr); spin_unlock(&pool->lock); return 0; } else if (!test_bit(PAGE_HEADLESS, &page->private)) { @@ -584,6 +601,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) freechunks = num_free_chunks(zhdr); list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); + atomic64_inc(&pool->unbuddied_nr); } } @@ -672,12 +690,11 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) * z3fold_get_pool_size() - gets the z3fold pool size in pages * @pool: pool whose size is being queried * - * Returns: size in pages of the given pool. The pool lock need not be - * taken to access pages_nr. + * Returns: size in pages of the given pool. */ static u64 z3fold_get_pool_size(struct z3fold_pool *pool) { - return pool->pages_nr; + return atomic64_read(&pool->pages_nr); } /***************** -- 2.4.2