Replace migrate_page_add() with migrate_folio_add().
migrate_folio_add() does the same a migrate_page_add() but takes in a
folio instead of a page. This removes a couple of calls to
compound_head().
Signed-off-by: Vishal Moola (Oracle) <[email protected]>
---
mm/mempolicy.c | 34 +++++++++++++++-------------------
1 file changed, 15 insertions(+), 19 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0a3690ecab7d..253ce368cf16 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -414,7 +414,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
},
};
-static int migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
unsigned long flags);
struct queue_pages {
@@ -476,7 +476,7 @@ static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
/* go to folio migration */
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
if (!vma_migratable(walk->vma) ||
- migrate_page_add(&folio->page, qp->pagelist, flags)) {
+ migrate_folio_add(folio, qp->pagelist, flags)) {
ret = 1;
goto unlock;
}
@@ -544,7 +544,7 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
* temporary off LRU pages in the range. Still
* need migrate other LRU pages.
*/
- if (migrate_page_add(&folio->page, qp->pagelist, flags))
+ if (migrate_folio_add(folio, qp->pagelist, flags))
has_unmovable = true;
} else
break;
@@ -1022,27 +1022,23 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
}
#ifdef CONFIG_MIGRATION
-/*
- * page migration, thp tail pages can be passed.
- */
-static int migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
unsigned long flags)
{
- struct page *head = compound_head(page);
/*
- * Avoid migrating a page that is shared with others.
+ * Avoid migrating a folio that is shared with others.
*/
- if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
- if (!isolate_lru_page(head)) {
- list_add_tail(&head->lru, pagelist);
- mod_node_page_state(page_pgdat(head),
- NR_ISOLATED_ANON + page_is_file_lru(head),
- thp_nr_pages(head));
+ if ((flags & MPOL_MF_MOVE_ALL) || folio_mapcount(folio) == 1) {
+ if (!folio_isolate_lru(folio)) {
+ list_add_tail(&folio->lru, foliolist);
+ node_stat_mod_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio),
+ folio_nr_pages(folio));
} else if (flags & MPOL_MF_STRICT) {
/*
- * Non-movable page may reach here. And, there may be
- * temporary off LRU pages or non-LRU movable pages.
- * Treat them as unmovable pages since they can't be
+ * Non-movable folio may reach here. And, there may be
+ * temporary off LRU folios or non-LRU movable folios.
+ * Treat them as unmovable folios since they can't be
* isolated, so they can't be moved at the moment. It
* should return -EIO for this case too.
*/
@@ -1234,7 +1230,7 @@ static struct page *new_page(struct page *page, unsigned long start)
}
#else
-static int migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
unsigned long flags)
{
return -EIO;
--
2.38.1
On 1/19/2023 7:22 AM, Vishal Moola (Oracle) wrote:
> Replace migrate_page_add() with migrate_folio_add().
> migrate_folio_add() does the same a migrate_page_add() but takes in a
> folio instead of a page. This removes a couple of calls to
> compound_head().
>
> Signed-off-by: Vishal Moola (Oracle) <[email protected]>
> ---
> mm/mempolicy.c | 34 +++++++++++++++-------------------
> 1 file changed, 15 insertions(+), 19 deletions(-)
>
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 0a3690ecab7d..253ce368cf16 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -414,7 +414,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
> },
> };
>
> -static int migrate_page_add(struct page *page, struct list_head *pagelist,
> +static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
> unsigned long flags);
>
> struct queue_pages {
> @@ -476,7 +476,7 @@ static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
> /* go to folio migration */
> if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
> if (!vma_migratable(walk->vma) ||
> - migrate_page_add(&folio->page, qp->pagelist, flags)) {
> + migrate_folio_add(folio, qp->pagelist, flags)) {
> ret = 1;
> goto unlock;
> }
> @@ -544,7 +544,7 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
> * temporary off LRU pages in the range. Still
> * need migrate other LRU pages.
> */
> - if (migrate_page_add(&folio->page, qp->pagelist, flags))
> + if (migrate_folio_add(folio, qp->pagelist, flags))
> has_unmovable = true;
> } else
> break;
> @@ -1022,27 +1022,23 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
> }
>
> #ifdef CONFIG_MIGRATION
> -/*
> - * page migration, thp tail pages can be passed.
> - */
> -static int migrate_page_add(struct page *page, struct list_head *pagelist,
> +static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
> unsigned long flags)
> {
> - struct page *head = compound_head(page);
> /*
> - * Avoid migrating a page that is shared with others.
> + * Avoid migrating a folio that is shared with others.
> */
> - if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
> - if (!isolate_lru_page(head)) {
> - list_add_tail(&head->lru, pagelist);
> - mod_node_page_state(page_pgdat(head),
> - NR_ISOLATED_ANON + page_is_file_lru(head),
> - thp_nr_pages(head));
> + if ((flags & MPOL_MF_MOVE_ALL) || folio_mapcount(folio) == 1) {
One question to the page_mapcount -> folio_mapcount here.
For a large folio with 0 entire mapcount, if the first sub-page and any
other sub-page are mapped, page_mapcount(head) == 1 is true while
folio_mapcount(folio) == 1 is not.
Regards
Yin, Fengwei
> + if (!folio_isolate_lru(folio)) {
> + list_add_tail(&folio->lru, foliolist);
> + node_stat_mod_folio(folio,
> + NR_ISOLATED_ANON + folio_is_file_lru(folio),
> + folio_nr_pages(folio));
> } else if (flags & MPOL_MF_STRICT) {
> /*
> - * Non-movable page may reach here. And, there may be
> - * temporary off LRU pages or non-LRU movable pages.
> - * Treat them as unmovable pages since they can't be
> + * Non-movable folio may reach here. And, there may be
> + * temporary off LRU folios or non-LRU movable folios.
> + * Treat them as unmovable folios since they can't be
> * isolated, so they can't be moved at the moment. It
> * should return -EIO for this case too.
> */
> @@ -1234,7 +1230,7 @@ static struct page *new_page(struct page *page, unsigned long start)
> }
> #else
>
> -static int migrate_page_add(struct page *page, struct list_head *pagelist,
> +static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
> unsigned long flags)
> {
> return -EIO;
On Wed, Jan 18, 2023 at 5:24 PM Yin, Fengwei <[email protected]> wrote:
>
>
>
> On 1/19/2023 7:22 AM, Vishal Moola (Oracle) wrote:
> > Replace migrate_page_add() with migrate_folio_add().
> > migrate_folio_add() does the same a migrate_page_add() but takes in a
> > folio instead of a page. This removes a couple of calls to
> > compound_head().
> >
> > Signed-off-by: Vishal Moola (Oracle) <[email protected]>
> > ---
> > mm/mempolicy.c | 34 +++++++++++++++-------------------
> > 1 file changed, 15 insertions(+), 19 deletions(-)
> >
> > diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> > index 0a3690ecab7d..253ce368cf16 100644
> > --- a/mm/mempolicy.c
> > +++ b/mm/mempolicy.c
> > @@ -414,7 +414,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
> > },
> > };
> >
> > -static int migrate_page_add(struct page *page, struct list_head *pagelist,
> > +static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
> > unsigned long flags);
> >
> > struct queue_pages {
> > @@ -476,7 +476,7 @@ static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
> > /* go to folio migration */
> > if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
> > if (!vma_migratable(walk->vma) ||
> > - migrate_page_add(&folio->page, qp->pagelist, flags)) {
> > + migrate_folio_add(folio, qp->pagelist, flags)) {
> > ret = 1;
> > goto unlock;
> > }
> > @@ -544,7 +544,7 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
> > * temporary off LRU pages in the range. Still
> > * need migrate other LRU pages.
> > */
> > - if (migrate_page_add(&folio->page, qp->pagelist, flags))
> > + if (migrate_folio_add(folio, qp->pagelist, flags))
> > has_unmovable = true;
> > } else
> > break;
> > @@ -1022,27 +1022,23 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
> > }
> >
> > #ifdef CONFIG_MIGRATION
> > -/*
> > - * page migration, thp tail pages can be passed.
> > - */
> > -static int migrate_page_add(struct page *page, struct list_head *pagelist,
> > +static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
> > unsigned long flags)
> > {
> > - struct page *head = compound_head(page);
> > /*
> > - * Avoid migrating a page that is shared with others.
> > + * Avoid migrating a folio that is shared with others.
> > */
> > - if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
> > - if (!isolate_lru_page(head)) {
> > - list_add_tail(&head->lru, pagelist);
> > - mod_node_page_state(page_pgdat(head),
> > - NR_ISOLATED_ANON + page_is_file_lru(head),
> > - thp_nr_pages(head));
> > + if ((flags & MPOL_MF_MOVE_ALL) || folio_mapcount(folio) == 1) {
> One question to the page_mapcount -> folio_mapcount here.
>
> For a large folio with 0 entire mapcount, if the first sub-page and any
> other sub-page are mapped, page_mapcount(head) == 1 is true while
> folio_mapcount(folio) == 1 is not.
Hmm, you're right. Using page_mapcount(&folio->page) would definitely
maintain the same behavior, but I'm not sure that's what we actually want.
My understanding of the purpose of this check is to avoid migrating
pages shared with other processes. Meaning if a folio (or any pages
within) are mapped to different processes we would want to skip that
folio.
Although looking at it now, I don't think using folio_mapcount()
accomplishes this either in the case that multiple pages in a large
folio are mapped to the same process.
Does anyone have any better ideas for this?
On Thu, Jan 19, 2023 at 09:24:16AM +0800, Yin, Fengwei wrote:
> On 1/19/2023 7:22 AM, Vishal Moola (Oracle) wrote:
> > @@ -1022,27 +1022,23 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
> > }
> >
> > #ifdef CONFIG_MIGRATION
> > -/*
> > - * page migration, thp tail pages can be passed.
> > - */
> > -static int migrate_page_add(struct page *page, struct list_head *pagelist,
> > +static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
> > unsigned long flags)
> > {
> > - struct page *head = compound_head(page);
> > /*
> > - * Avoid migrating a page that is shared with others.
> > + * Avoid migrating a folio that is shared with others.
> > */
> > - if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
> > - if (!isolate_lru_page(head)) {
> > - list_add_tail(&head->lru, pagelist);
> > - mod_node_page_state(page_pgdat(head),
> > - NR_ISOLATED_ANON + page_is_file_lru(head),
> > - thp_nr_pages(head));
> > + if ((flags & MPOL_MF_MOVE_ALL) || folio_mapcount(folio) == 1) {
> One question to the page_mapcount -> folio_mapcount here.
>
> For a large folio with 0 entire mapcount, if the first sub-page and any
> other sub-page are mapped, page_mapcount(head) == 1 is true while
> folio_mapcount(folio) == 1 is not.
We had a good discussion about this in today's THP Cabal meeting [1]. I
didn't quite check everything that I said was true, so let me summarise
& correct it now ...
- This is a heuristic. We're trying to see whether this folio is
mapped by multiple processes (because if it is, it's probably not
worth migrating). If the heuristic is wrong, it probably doesn't
matter _too_ much?
- A proper heuristic for this would be
folio_total_mapcount(folio) == folio_nr_pages(folio)
but this would be expensive to calculate as it requires examining
512 cachelines for a 2MB page.
- For a large folio which is smaller than PMD size, we're guaranteed
that folio_mapcount() is 0 today.
- In the meeting I said that page_mapcount() of the head of a THP
page was zero; that's not true; I had forgotten that we added in
entire_mapcount to the individual page mapcount.
so I now think this should be:
page_mapcount(folio_page(folio, 0))
with an explanation that checking every page is too heavy-weight.
Maybe it should be its own function:
static inline int folio_estimated_mapcount(folio)
{
return page_mapcount(folio_page(folio, 0));
}
with a nice comment explaining what's going on.
[1] https://www.youtube.com/watch?v=A3PoGQQQD3Q is the recording of
today's meeting.
On 1/21/2023 3:41 AM, Vishal Moola wrote:
> My understanding of the purpose of this check is to avoid migrating
> pages shared with other processes. Meaning if a folio (or any pages
> within) are mapped to different processes we would want to skip that
> folio.
This is my understanding also. But check whether a large file folio is
mapped to different process is not very direct. Thanks.
Regards
Yin, Fengwei
On 1/21/2023 3:47 AM, Matthew Wilcox wrote:
> On Thu, Jan 19, 2023 at 09:24:16AM +0800, Yin, Fengwei wrote:
>> On 1/19/2023 7:22 AM, Vishal Moola (Oracle) wrote:
>>> @@ -1022,27 +1022,23 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
>>> }
>>>
>>> #ifdef CONFIG_MIGRATION
>>> -/*
>>> - * page migration, thp tail pages can be passed.
>>> - */
>>> -static int migrate_page_add(struct page *page, struct list_head *pagelist,
>>> +static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
>>> unsigned long flags)
>>> {
>>> - struct page *head = compound_head(page);
>>> /*
>>> - * Avoid migrating a page that is shared with others.
>>> + * Avoid migrating a folio that is shared with others.
>>> */
>>> - if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
>>> - if (!isolate_lru_page(head)) {
>>> - list_add_tail(&head->lru, pagelist);
>>> - mod_node_page_state(page_pgdat(head),
>>> - NR_ISOLATED_ANON + page_is_file_lru(head),
>>> - thp_nr_pages(head));
>>> + if ((flags & MPOL_MF_MOVE_ALL) || folio_mapcount(folio) == 1) {
>> One question to the page_mapcount -> folio_mapcount here.
>>
>> For a large folio with 0 entire mapcount, if the first sub-page and any
>> other sub-page are mapped, page_mapcount(head) == 1 is true while
>> folio_mapcount(folio) == 1 is not.
>
> We had a good discussion about this in today's THP Cabal meeting [1]. I
> didn't quite check everything that I said was true, so let me summarise
> & correct it now ...
>
> - This is a heuristic. We're trying to see whether this folio is
> mapped by multiple processes (because if it is, it's probably not
> worth migrating). If the heuristic is wrong, it probably doesn't
> matter _too_ much?
Agree.
> - A proper heuristic for this would be
> folio_total_mapcount(folio) == folio_nr_pages(folio)
I am not sure. File folio can be partially mapped. Maybe following check?
for each sub-pages:
(folio_entire_mapcount(folio) + sub-pages->_mapcount) <= 1
But it's also expensive to check all sub-pages. Maybe a bit in folio
if filio mapped to only one process is really important?
> but this would be expensive to calculate as it requires examining
> 512 cachelines for a 2MB page.
> - For a large folio which is smaller than PMD size, we're guaranteed
> that folio_mapcount() is 0 today.
My understanding is: for large folio, if any sub-page is mapped,
folio_mapcount() can not be 0.
> - In the meeting I said that page_mapcount() of the head of a THP
> page was zero; that's not true; I had forgotten that we added in
> entire_mapcount to the individual page mapcount.
>
> so I now think this should be:
>
> page_mapcount(folio_page(folio, 0))
For file large folio, it's possible folio_page(folio, 0) mapped only
once, other sub-pages mapped multiple times.
But I think this maybe the best choice here.
>
> with an explanation that checking every page is too heavy-weight.
> Maybe it should be its own function:
>
> static inline int folio_estimated_mapcount(folio)
> {
> return page_mapcount(folio_page(folio, 0));
> }
>
> with a nice comment explaining what's going on.
>
> [1] https://www.youtube.com/watch?v=A3PoGQQQD3Q is the recording of
> today's meeting.
This is nice. Thanks a lot for sharing.
Regards
Yin, Fengwei