Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753254AbcCLDKa (ORCPT ); Fri, 11 Mar 2016 22:10:30 -0500 Received: from szxga02-in.huawei.com ([119.145.14.65]:63268 "EHLO szxga02-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751974AbcCLDKZ (ORCPT ); Fri, 11 Mar 2016 22:10:25 -0500 Subject: Re: [PATCH v1 13/19] zsmalloc: factor page chain functionality out To: Minchan Kim , Andrew Morton References: <1457681423-26664-1-git-send-email-minchan@kernel.org> <1457681423-26664-14-git-send-email-minchan@kernel.org> CC: , , , , Vlastimil Babka , Joonsoo Kim , , , , Mel Gorman , Hugh Dickins , Sergey Senozhatsky , , Rik van Riel , Gioh Kim From: xuyiping Message-ID: <56E38870.5090408@hisilicon.com> Date: Sat, 12 Mar 2016 11:09:36 +0800 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Thunderbird/38.5.1 MIME-Version: 1.0 In-Reply-To: <1457681423-26664-14-git-send-email-minchan@kernel.org> Content-Type: text/plain; charset="windows-1252"; format=flowed Content-Transfer-Encoding: 7bit X-Originating-IP: [10.184.213.22] X-CFilter-Loop: Reflected X-Mirapoint-Virus-RAPID-Raw: score=unknown(0), refid=str=0001.0A090203.56E3887C.0015,ss=1,re=0.000,recu=0.000,reip=0.000,cl=1,cld=1,fgs=0, ip=0.0.0.0, so=2013-06-18 04:22:30, dmn=2013-03-21 17:37:32 X-Mirapoint-Loop-Id: 0925660a471b53a67f71a46c072b429d Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4543 Lines: 153 On 2016/3/11 15:30, Minchan Kim wrote: > For migration, we need to create sub-page chain of zspage > dynamically so this patch factors it out from alloc_zspage. > > As a minor refactoring, it makes OBJ_ALLOCATED_TAG assign > more clear in obj_malloc(it could be another patch but it's > trivial so I want to put together in this patch). > > Signed-off-by: Minchan Kim > --- > mm/zsmalloc.c | 78 ++++++++++++++++++++++++++++++++++------------------------- > 1 file changed, 45 insertions(+), 33 deletions(-) > > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c > index bfc6a048afac..f86f8aaeb902 100644 > --- a/mm/zsmalloc.c > +++ b/mm/zsmalloc.c > @@ -977,7 +977,9 @@ static void init_zspage(struct size_class *class, struct page *first_page) > unsigned long off = 0; > struct page *page = first_page; > > - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); > + first_page->freelist = NULL; > + INIT_LIST_HEAD(&first_page->lru); > + set_zspage_inuse(first_page, 0); > > while (page) { > struct page *next_page; > @@ -1022,13 +1024,44 @@ static void init_zspage(struct size_class *class, struct page *first_page) > set_freeobj(first_page, 0); > } > > +static void create_page_chain(struct page *pages[], int nr_pages) > +{ > + int i; > + struct page *page; > + struct page *prev_page = NULL; > + struct page *first_page = NULL; > + > + for (i = 0; i < nr_pages; i++) { > + page = pages[i]; > + > + INIT_LIST_HEAD(&page->lru); > + if (i == 0) { > + SetPagePrivate(page); > + set_page_private(page, 0); > + first_page = page; > + } > + > + if (i == 1) > + set_page_private(first_page, (unsigned long)page); > + if (i >= 1) > + set_page_private(page, (unsigned long)first_page); > + if (i >= 2) > + list_add(&page->lru, &prev_page->lru); > + if (i == nr_pages - 1) > + SetPagePrivate2(page); > + > + prev_page = page; > + } > +} > + > /* > * Allocate a zspage for the given size class > */ > static struct page *alloc_zspage(struct size_class *class, gfp_t flags) > { > - int i, error; > + int i; > struct page *first_page = NULL, *uninitialized_var(prev_page); > + struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; > > /* > * Allocate individual pages and link them together as: > @@ -1041,43 +1074,23 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags) *uninitialized_var(prev_page) in alloc_zspage is not in use more. > * (i.e. no other sub-page has this flag set) and PG_private_2 to > * identify the last page. > */ > - error = -ENOMEM; > for (i = 0; i < class->pages_per_zspage; i++) { > struct page *page; > > page = alloc_page(flags); > - if (!page) > - goto cleanup; > - > - INIT_LIST_HEAD(&page->lru); > - if (i == 0) { /* first page */ > - page->freelist = NULL; > - SetPagePrivate(page); > - set_page_private(page, 0); > - first_page = page; > - set_zspage_inuse(page, 0); > + if (!page) { > + while (--i >= 0) > + __free_page(pages[i]); > + return NULL; > } > - if (i == 1) > - set_page_private(first_page, (unsigned long)page); > - if (i >= 1) > - set_page_private(page, (unsigned long)first_page); > - if (i >= 2) > - list_add(&page->lru, &prev_page->lru); > - if (i == class->pages_per_zspage - 1) /* last page */ > - SetPagePrivate2(page); > - prev_page = page; > + > + pages[i] = page; > } > > + create_page_chain(pages, class->pages_per_zspage); > + first_page = pages[0]; > init_zspage(class, first_page); > > - error = 0; /* Success */ > - > -cleanup: > - if (unlikely(error) && first_page) { > - free_zspage(first_page); > - first_page = NULL; > - } > - > return first_page; > } > > @@ -1419,7 +1432,6 @@ static unsigned long obj_malloc(struct size_class *class, > unsigned long m_offset; > void *vaddr; > > - handle |= OBJ_ALLOCATED_TAG; > obj = get_freeobj(first_page); > objidx_to_page_and_ofs(class, first_page, obj, > &m_page, &m_offset); > @@ -1429,10 +1441,10 @@ static unsigned long obj_malloc(struct size_class *class, > set_freeobj(first_page, link->next >> OBJ_ALLOCATED_TAG); > if (!class->huge) > /* record handle in the header of allocated chunk */ > - link->handle = handle; > + link->handle = handle | OBJ_ALLOCATED_TAG; > else > /* record handle in first_page->private */ > - set_page_private(first_page, handle); > + set_page_private(first_page, handle | OBJ_ALLOCATED_TAG); > kunmap_atomic(vaddr); > mod_zspage_inuse(first_page, 1); > zs_stat_inc(class, OBJ_USED, 1); >