Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753262AbdGNIIn (ORCPT ); Fri, 14 Jul 2017 04:08:43 -0400 Received: from outboundhk.mxmail.xiaomi.com ([207.226.244.124]:51418 "EHLO xiaomi.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1750799AbdGNIIl (ORCPT ); Fri, 14 Jul 2017 04:08:41 -0400 X-Greylist: delayed 905 seconds by postgrey-1.27 at vger.kernel.org; Fri, 14 Jul 2017 04:08:40 EDT From: Hui Zhu To: , , , , CC: , Hui Zhu Subject: [PATCH] zsmalloc: zs_page_migrate: not check inuse if migrate_mode is not MIGRATE_ASYNC Date: Fri, 14 Jul 2017 15:51:07 +0800 Message-ID: <1500018667-30175-1-git-send-email-zhuhui@xiaomi.com> X-Mailer: git-send-email 1.9.1 MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.235.171.194] X-ClientProxiedBy: CNCAS3.mioffice.cn (10.237.8.133) To cnbox6.mioffice.cn (10.237.8.146) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 3586 Lines: 122 Got some -EBUSY from zs_page_migrate that will make migration slow (retry) or fail (zs_page_putback will schedule_work free_work, but it cannot ensure the success). And I didn't find anything that make zs_page_migrate cannot work with a ZS_EMPTY zspage. So make the patch to not check inuse if migrate_mode is not MIGRATE_ASYNC. Signed-off-by: Hui Zhu --- mm/zsmalloc.c | 66 +++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index d41edd2..c298e5c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1982,6 +1982,7 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage, unsigned long old_obj, new_obj; unsigned int obj_idx; int ret = -EAGAIN; + int inuse; VM_BUG_ON_PAGE(!PageMovable(page), page); VM_BUG_ON_PAGE(!PageIsolated(page), page); @@ -1996,21 +1997,24 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage, offset = get_first_obj_offset(page); spin_lock(&class->lock); - if (!get_zspage_inuse(zspage)) { + inuse = get_zspage_inuse(zspage); + if (mode == MIGRATE_ASYNC && !inuse) { ret = -EBUSY; goto unlock_class; } pos = offset; s_addr = kmap_atomic(page); - while (pos < PAGE_SIZE) { - head = obj_to_head(page, s_addr + pos); - if (head & OBJ_ALLOCATED_TAG) { - handle = head & ~OBJ_ALLOCATED_TAG; - if (!trypin_tag(handle)) - goto unpin_objects; + if (inuse) { + while (pos < PAGE_SIZE) { + head = obj_to_head(page, s_addr + pos); + if (head & OBJ_ALLOCATED_TAG) { + handle = head & ~OBJ_ALLOCATED_TAG; + if (!trypin_tag(handle)) + goto unpin_objects; + } + pos += class->size; } - pos += class->size; } /* @@ -2020,20 +2024,22 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage, memcpy(d_addr, s_addr, PAGE_SIZE); kunmap_atomic(d_addr); - for (addr = s_addr + offset; addr < s_addr + pos; - addr += class->size) { - head = obj_to_head(page, addr); - if (head & OBJ_ALLOCATED_TAG) { - handle = head & ~OBJ_ALLOCATED_TAG; - if (!testpin_tag(handle)) - BUG(); - - old_obj = handle_to_obj(handle); - obj_to_location(old_obj, &dummy, &obj_idx); - new_obj = (unsigned long)location_to_obj(newpage, - obj_idx); - new_obj |= BIT(HANDLE_PIN_BIT); - record_obj(handle, new_obj); + if (inuse) { + for (addr = s_addr + offset; addr < s_addr + pos; + addr += class->size) { + head = obj_to_head(page, addr); + if (head & OBJ_ALLOCATED_TAG) { + handle = head & ~OBJ_ALLOCATED_TAG; + if (!testpin_tag(handle)) + BUG(); + + old_obj = handle_to_obj(handle); + obj_to_location(old_obj, &dummy, &obj_idx); + new_obj = (unsigned long) + location_to_obj(newpage, obj_idx); + new_obj |= BIT(HANDLE_PIN_BIT); + record_obj(handle, new_obj); + } } } @@ -2055,14 +2061,16 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage, ret = MIGRATEPAGE_SUCCESS; unpin_objects: - for (addr = s_addr + offset; addr < s_addr + pos; + if (inuse) { + for (addr = s_addr + offset; addr < s_addr + pos; addr += class->size) { - head = obj_to_head(page, addr); - if (head & OBJ_ALLOCATED_TAG) { - handle = head & ~OBJ_ALLOCATED_TAG; - if (!testpin_tag(handle)) - BUG(); - unpin_tag(handle); + head = obj_to_head(page, addr); + if (head & OBJ_ALLOCATED_TAG) { + handle = head & ~OBJ_ALLOCATED_TAG; + if (!testpin_tag(handle)) + BUG(); + unpin_tag(handle); + } } } kunmap_atomic(s_addr); -- 1.9.1