Received: by 2002:a05:6a10:a841:0:0:0:0 with SMTP id d1csp18190pxy; Tue, 20 Apr 2021 19:03:18 -0700 (PDT) X-Google-Smtp-Source: ABdhPJzoETjNE0crOMKC0OxQdo3Lt9BdH2kexn80bzrG2wUWhrJk3sPdVjJotn1417WGZH+GhEyP X-Received: by 2002:a63:f657:: with SMTP id u23mr16211019pgj.304.1618970598333; Tue, 20 Apr 2021 19:03:18 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1618970598; cv=none; d=google.com; s=arc-20160816; b=AUl9qwqi93LXJb7qV4pugJ6jieD6y3Mph9k6vxECpTdybSn2ZGolxaWD3IwSYc9+aD 3TVbz5e8EyQRP0JHY2ACYyWjHAxVnpV5GpU/F+EG6BX1gWMFr8p9nfv/ieGJPMc4PkT6 RnBc6opY1igEhJa8FEGWzyrV4EkIn1RYJFqYpbIvk8uzl0OUwVCKeGYNk+3xPrzZettd rEfwxQYWHD16HcDVgnKd/Fh/c+w6h4/mLkSZk0VROn9El87iUkZMPLTgmSCL5M3zd0bX hkggEiwlm3D5j9fK9R60OozKJmGu73pHGfISfzdv/mvG3asgswulOGQp10CNSnBC/68U 67ag== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:content-language :in-reply-to:mime-version:user-agent:date:message-id:from:references :cc:to:subject; bh=qvoLRhGWKgBGiPo3DcsjycF67VgJukLAjDHFALopbkI=; b=tZ/W1c3NSQ/yCVKUuLAAjFsFbWByu0ZuGENh+zlFQW9nEvdFvFRVu++Na9OZMxcOfS v5/5mDtqN638PIC8RNbLoxx7CLMMXfPp8DBFEEh1yRBcorfnC43oM6WR9++Nh6gCPwPQ P8PMvMwnJEFB0VvgbA/Bpg3G8cV7cbZ9piV/iFq5xAy/20qAmVuXfLID/IDnvzbiracM jnNtoEV50hcYKu57z8KxQ7Y1CjFajLEaQowNO/jJ7keMkV8O3zCcMvHyt77Q9kzVvkwK 5AdxMCvvxSxFVdDBBam53ydK7aPpT9SUo61uamqT75rk1g1O+ylyHWcmM94AcnyMcvvR Tx8g== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=huawei.com Return-Path: Received: from vger.kernel.org (vger.kernel.org. [23.128.96.18]) by mx.google.com with ESMTP id c1si685220pfv.158.2021.04.20.19.03.05; Tue, 20 Apr 2021 19:03:18 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) client-ip=23.128.96.18; Authentication-Results: mx.google.com; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=huawei.com Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234482AbhDUCAl (ORCPT + 99 others); Tue, 20 Apr 2021 22:00:41 -0400 Received: from szxga05-in.huawei.com ([45.249.212.191]:17018 "EHLO szxga05-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233824AbhDUCAj (ORCPT ); Tue, 20 Apr 2021 22:00:39 -0400 Received: from DGGEMS408-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga05-in.huawei.com (SkyGuard) with ESMTP id 4FQ3Zk4pv2zPsXW; Wed, 21 Apr 2021 09:57:02 +0800 (CST) Received: from [10.174.178.5] (10.174.178.5) by DGGEMS408-HUB.china.huawei.com (10.3.19.208) with Microsoft SMTP Server id 14.3.498.0; Wed, 21 Apr 2021 09:59:59 +0800 Subject: Re: [PATCH v3 1/4] mm/swapfile: use percpu_ref to serialize against concurrent swapoff To: "Huang, Ying" CC: , , , , , , , , , , , , , References: <20210420133048.6773-1-linmiaohe@huawei.com> <20210420133048.6773-2-linmiaohe@huawei.com> <87wnsw1m7v.fsf@yhuang6-desk1.ccr.corp.intel.com> From: Miaohe Lin Message-ID: Date: Wed, 21 Apr 2021 09:59:58 +0800 User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Thunderbird/78.6.0 MIME-Version: 1.0 In-Reply-To: <87wnsw1m7v.fsf@yhuang6-desk1.ccr.corp.intel.com> Content-Type: text/plain; charset="windows-1252" Content-Language: en-US Content-Transfer-Encoding: 7bit X-Originating-IP: [10.174.178.5] X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On 2021/4/21 9:05, Huang, Ying wrote: > Miaohe Lin writes: > >> Using current get/put_swap_device() to guard against concurrent swapoff >> for some swap ops, e.g. swap_readpage(), looks terrible because they >> might take really long time. This patch adds the percpu_ref support to >> serialize against concurrent swapoff. Also we remove the SWP_VALID flag >> because it's used together with RCU solution. >> >> Signed-off-by: Miaohe Lin >> --- >> include/linux/swap.h | 5 +-- >> mm/swapfile.c | 79 +++++++++++++++++++++++++++----------------- >> 2 files changed, 52 insertions(+), 32 deletions(-) >> >> diff --git a/include/linux/swap.h b/include/linux/swap.h >> index 144727041e78..c9e7fea10b83 100644 >> --- a/include/linux/swap.h >> +++ b/include/linux/swap.h >> @@ -177,7 +177,6 @@ enum { >> SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ >> SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ >> SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ >> - SWP_VALID = (1 << 13), /* swap is valid to be operated on? */ >> /* add others here before... */ >> SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ >> }; >> @@ -240,6 +239,7 @@ struct swap_cluster_list { >> * The in-memory structure used to track swap areas. >> */ >> struct swap_info_struct { >> + struct percpu_ref users; /* indicate and keep swap device valid. */ >> unsigned long flags; /* SWP_USED etc: see above */ >> signed short prio; /* swap priority of this type */ >> struct plist_node list; /* entry in swap_active_head */ >> @@ -260,6 +260,7 @@ struct swap_info_struct { >> struct block_device *bdev; /* swap device or bdev of swap file */ >> struct file *swap_file; /* seldom referenced */ >> unsigned int old_block_size; /* seldom referenced */ >> + struct completion comp; /* seldom referenced */ >> #ifdef CONFIG_FRONTSWAP >> unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ >> atomic_t frontswap_pages; /* frontswap pages in-use counter */ >> @@ -511,7 +512,7 @@ sector_t swap_page_sector(struct page *page); >> >> static inline void put_swap_device(struct swap_info_struct *si) >> { >> - rcu_read_unlock(); >> + percpu_ref_put(&si->users); >> } >> >> #else /* CONFIG_SWAP */ >> diff --git a/mm/swapfile.c b/mm/swapfile.c >> index 149e77454e3c..a640fc84be5b 100644 >> --- a/mm/swapfile.c >> +++ b/mm/swapfile.c >> @@ -39,6 +39,7 @@ >> #include >> #include >> #include >> +#include >> >> #include >> #include >> @@ -511,6 +512,14 @@ static void swap_discard_work(struct work_struct *work) >> spin_unlock(&si->lock); >> } >> >> +static void swap_users_ref_free(struct percpu_ref *ref) >> +{ >> + struct swap_info_struct *si; >> + >> + si = container_of(ref, struct swap_info_struct, users); >> + complete(&si->comp); >> +} >> + >> static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) >> { >> struct swap_cluster_info *ci = si->cluster_info; >> @@ -1270,18 +1279,12 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, >> * via preventing the swap device from being swapoff, until >> * put_swap_device() is called. Otherwise return NULL. >> * >> - * The entirety of the RCU read critical section must come before the >> - * return from or after the call to synchronize_rcu() in >> - * enable_swap_info() or swapoff(). So if "si->flags & SWP_VALID" is >> - * true, the si->map, si->cluster_info, etc. must be valid in the >> - * critical section. >> - * >> * Notice that swapoff or swapoff+swapon can still happen before the >> - * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock() >> - * in put_swap_device() if there isn't any other way to prevent >> - * swapoff, such as page lock, page table lock, etc. The caller must >> - * be prepared for that. For example, the following situation is >> - * possible. >> + * percpu_ref_tryget_live() in get_swap_device() or after the >> + * percpu_ref_put() in put_swap_device() if there isn't any other way >> + * to prevent swapoff, such as page lock, page table lock, etc. The >> + * caller must be prepared for that. For example, the following >> + * situation is possible. >> * >> * CPU1 CPU2 >> * do_swap_page() >> @@ -1309,21 +1312,27 @@ struct swap_info_struct *get_swap_device(swp_entry_t entry) >> si = swp_swap_info(entry); >> if (!si) >> goto bad_nofile; >> - >> - rcu_read_lock(); >> - if (data_race(!(si->flags & SWP_VALID))) >> - goto unlock_out; >> + if (!percpu_ref_tryget_live(&si->users)) >> + goto out; >> + /* >> + * Guarantee the si->users are checked before accessing other >> + * fields of swap_info_struct. >> + * >> + * Paired with the spin_unlock() after setup_swap_info() in >> + * enable_swap_info(). >> + */ >> + smp_rmb(); >> offset = swp_offset(entry); >> if (offset >= si->max) >> - goto unlock_out; >> + goto put_out; >> >> return si; >> bad_nofile: >> pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); >> out: >> return NULL; >> -unlock_out: >> - rcu_read_unlock(); >> +put_out: >> + percpu_ref_put(&si->users); >> return NULL; >> } >> >> @@ -2466,7 +2475,7 @@ static void setup_swap_info(struct swap_info_struct *p, int prio, >> >> static void _enable_swap_info(struct swap_info_struct *p) >> { >> - p->flags |= SWP_WRITEOK | SWP_VALID; >> + p->flags |= SWP_WRITEOK; >> atomic_long_add(p->pages, &nr_swap_pages); >> total_swap_pages += p->pages; >> >> @@ -2497,10 +2506,9 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, >> spin_unlock(&p->lock); >> spin_unlock(&swap_lock); >> /* >> - * Guarantee swap_map, cluster_info, etc. fields are valid >> - * between get/put_swap_device() if SWP_VALID bit is set >> + * Finished initialized swap device, now it's safe to reference it. > > s/initialized/initializing/ > > Otherwise looks good to me! Thanks! > > Reviewed-by: "Huang, Ying" > Many thanks for your quick review and respond again. All these comments look good. Will fix it in V4. Thank you very much ! >> */ >> - synchronize_rcu(); >> + percpu_ref_resurrect(&p->users); >> spin_lock(&swap_lock); >> spin_lock(&p->lock); >> _enable_swap_info(p); >> @@ -2616,16 +2624,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) >> >> reenable_swap_slots_cache_unlock(); >> >> - spin_lock(&swap_lock); >> - spin_lock(&p->lock); >> - p->flags &= ~SWP_VALID; /* mark swap device as invalid */ >> - spin_unlock(&p->lock); >> - spin_unlock(&swap_lock); >> /* >> - * wait for swap operations protected by get/put_swap_device() >> - * to complete >> + * Wait for swap operations protected by get/put_swap_device() >> + * to complete. >> + * >> + * We need synchronize_rcu() here to protect the accessing to >> + * the swap cache data structure. >> */ >> + percpu_ref_kill(&p->users); >> synchronize_rcu(); >> + wait_for_completion(&p->comp); >> >> flush_work(&p->discard_work); >> >> @@ -2857,6 +2865,12 @@ static struct swap_info_struct *alloc_swap_info(void) >> if (!p) >> return ERR_PTR(-ENOMEM); >> >> + if (percpu_ref_init(&p->users, swap_users_ref_free, >> + PERCPU_REF_INIT_DEAD, GFP_KERNEL)) { >> + kvfree(p); >> + return ERR_PTR(-ENOMEM); >> + } >> + >> spin_lock(&swap_lock); >> for (type = 0; type < nr_swapfiles; type++) { >> if (!(swap_info[type]->flags & SWP_USED)) >> @@ -2864,6 +2878,7 @@ static struct swap_info_struct *alloc_swap_info(void) >> } >> if (type >= MAX_SWAPFILES) { >> spin_unlock(&swap_lock); >> + percpu_ref_exit(&p->users); >> kvfree(p); >> return ERR_PTR(-EPERM); >> } >> @@ -2891,9 +2906,13 @@ static struct swap_info_struct *alloc_swap_info(void) >> plist_node_init(&p->avail_lists[i], 0); >> p->flags = SWP_USED; >> spin_unlock(&swap_lock); >> - kvfree(defer); >> + if (defer) { >> + percpu_ref_exit(&defer->users); >> + kvfree(defer); >> + } >> spin_lock_init(&p->lock); >> spin_lock_init(&p->cont_lock); >> + init_completion(&p->comp); >> >> return p; >> } > . >