This change makes populate_section_memmap()/depopulate_section_memmap
much simpler.
Suggested-by: Michal Hocko <[email protected]>
Signed-off-by: Baoquan He <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Acked-by: Michal Hocko <[email protected]>
---
v3->v4:
Split the old v3 into two patches, to carve out the using 'nid'
as preferred node to allocate memmap into a separate patch. This
is suggested by Michal, and the carving out is put in patch 2.
v2->v3:
Remove __GFP_NOWARN and use array_size when calling kvmalloc_node()
per Matthew's comments.
http://lkml.kernel.org/r/20200312141749.GL27711@MiWiFi-R3L-srv
mm/sparse.c | 27 +++------------------------
1 file changed, 3 insertions(+), 24 deletions(-)
diff --git a/mm/sparse.c b/mm/sparse.c
index e747a238a860..d01d09cc7d99 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -719,35 +719,14 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
{
- struct page *page, *ret;
- unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
-
- page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
- if (page)
- goto got_map_page;
-
- ret = vmalloc(memmap_size);
- if (ret)
- goto got_map_ptr;
-
- return NULL;
-got_map_page:
- ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
-got_map_ptr:
-
- return ret;
+ return kvmalloc(array_size(sizeof(struct page),
+ PAGES_PER_SECTION), GFP_KERNEL);
}
static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
{
- struct page *memmap = pfn_to_page(pfn);
-
- if (is_vmalloc_addr(memmap))
- vfree(memmap);
- else
- free_pages((unsigned long)memmap,
- get_order(sizeof(struct page) * PAGES_PER_SECTION));
+ kvfree(pfn_to_page(pfn));
}
static void free_map_bootmem(struct page *memmap)
--
2.17.2
When allocating memmap for hot added memory with the classic sparse, the
specified 'nid' is ignored in populate_section_memmap().
While in allocating memmap for the classic sparse during boot, the node
given by 'nid' is preferred. And VMEMMAP prefers the node of 'nid' in
both boot stage and memory hot adding. So seems no reason to not respect
the node of 'nid' for the classic sparse when hot adding memory.
Use kvmalloc_node instead to use the passed in 'nid'.
Signed-off-by: Baoquan He <[email protected]>
Acked-by: Michal Hocko <[email protected]>
---
mm/sparse.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/sparse.c b/mm/sparse.c
index d01d09cc7d99..513d765e8c72 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -719,8 +719,8 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
{
- return kvmalloc(array_size(sizeof(struct page),
- PAGES_PER_SECTION), GFP_KERNEL);
+ return kvmalloc_node(array_size(sizeof(struct page),
+ PAGES_PER_SECTION), GFP_KERNEL, nid);
}
static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
--
2.17.2
On 16.03.20 11:21, Baoquan He wrote:
> This change makes populate_section_memmap()/depopulate_section_memmap
> much simpler.
>
> Suggested-by: Michal Hocko <[email protected]>
> Signed-off-by: Baoquan He <[email protected]>
> Reviewed-by: David Hildenbrand <[email protected]>
> Acked-by: Michal Hocko <[email protected]>
> ---
> v3->v4:
> Split the old v3 into two patches, to carve out the using 'nid'
> as preferred node to allocate memmap into a separate patch. This
> is suggested by Michal, and the carving out is put in patch 2.
>
> v2->v3:
> Remove __GFP_NOWARN and use array_size when calling kvmalloc_node()
> per Matthew's comments.
> http://lkml.kernel.org/r/20200312141749.GL27711@MiWiFi-R3L-srv
>
> mm/sparse.c | 27 +++------------------------
> 1 file changed, 3 insertions(+), 24 deletions(-)
>
> diff --git a/mm/sparse.c b/mm/sparse.c
> index e747a238a860..d01d09cc7d99 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -719,35 +719,14 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> struct page * __meminit populate_section_memmap(unsigned long pfn,
> unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
> {
> - struct page *page, *ret;
> - unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
> -
> - page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
> - if (page)
> - goto got_map_page;
> -
> - ret = vmalloc(memmap_size);
> - if (ret)
> - goto got_map_ptr;
> -
> - return NULL;
> -got_map_page:
> - ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
> -got_map_ptr:
> -
> - return ret;
> + return kvmalloc(array_size(sizeof(struct page),
> + PAGES_PER_SECTION), GFP_KERNEL);
FWIW, this is what I meant:
return kvmalloc(array_size(sizeof(struct page),
PAGES_PER_SECTION), GFP_KERNEL);
--
Thanks,
David / dhildenb
> This change makes populate_section_memmap()/depopulate_section_memmap
> much simpler.
>
> Suggested-by: Michal Hocko <[email protected]>
> Signed-off-by: Baoquan He <[email protected]>
> Reviewed-by: David Hildenbrand <[email protected]>
> Acked-by: Michal Hocko <[email protected]>
> ---
> v3->v4:
> Split the old v3 into two patches, to carve out the using 'nid'
> as preferred node to allocate memmap into a separate patch. This
> is suggested by Michal, and the carving out is put in patch 2.
>
> v2->v3:
> Remove __GFP_NOWARN and use array_size when calling kvmalloc_node()
> per Matthew's comments.
> http://lkml.kernel.org/r/20200312141749.GL27711@MiWiFi-R3L-srv
>
> mm/sparse.c | 27 +++------------------------
> 1 file changed, 3 insertions(+), 24 deletions(-)
>
> diff --git a/mm/sparse.c b/mm/sparse.c
> index e747a238a860..d01d09cc7d99 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -719,35 +719,14 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> struct page * __meminit populate_section_memmap(unsigned long pfn,
> unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
> {
> - struct page *page, *ret;
> - unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
> -
> - page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
> - if (page)
> - goto got_map_page;
> -
> - ret = vmalloc(memmap_size);
> - if (ret)
> - goto got_map_ptr;
> -
> - return NULL;
> -got_map_page:
> - ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
> -got_map_ptr:
> -
> - return ret;
> + return kvmalloc(array_size(sizeof(struct page),
> + PAGES_PER_SECTION), GFP_KERNEL);
> }
>
> static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
> struct vmem_altmap *altmap)
> {
> - struct page *memmap = pfn_to_page(pfn);
> -
> - if (is_vmalloc_addr(memmap))
> - vfree(memmap);
> - else
> - free_pages((unsigned long)memmap,
> - get_order(sizeof(struct page) * PAGES_PER_SECTION));
> + kvfree(pfn_to_page(pfn));
> }
>
> static void free_map_bootmem(struct page *memmap)
> --
> 2.17.2
With David's indentation suggestion:
Reviewed-by: Pankaj Gupta <[email protected]>
>
>
On Mon, Mar 16, 2020 at 12:17:49PM +0100, Pankaj Gupta wrote:
> > This change makes populate_section_memmap()/depopulate_section_memmap
> > much simpler.
> >
> > Suggested-by: Michal Hocko <[email protected]>
> > Signed-off-by: Baoquan He <[email protected]>
> > Reviewed-by: David Hildenbrand <[email protected]>
> > Acked-by: Michal Hocko <[email protected]>
> With David's indentation suggestion:
> Reviewed-by: Pankaj Gupta <[email protected]>
Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
(for both patches)
On 03/16/20 at 12:00pm, David Hildenbrand wrote:
> On 16.03.20 11:21, Baoquan He wrote:
> > This change makes populate_section_memmap()/depopulate_section_memmap
> > much simpler.
> >
> > Suggested-by: Michal Hocko <[email protected]>
> > Signed-off-by: Baoquan He <[email protected]>
> > Reviewed-by: David Hildenbrand <[email protected]>
> > Acked-by: Michal Hocko <[email protected]>
> > ---
> > v3->v4:
> > Split the old v3 into two patches, to carve out the using 'nid'
> > as preferred node to allocate memmap into a separate patch. This
> > is suggested by Michal, and the carving out is put in patch 2.
> >
> > v2->v3:
> > Remove __GFP_NOWARN and use array_size when calling kvmalloc_node()
> > per Matthew's comments.
> > http://lkml.kernel.org/r/20200312141749.GL27711@MiWiFi-R3L-srv
> >
> > mm/sparse.c | 27 +++------------------------
> > 1 file changed, 3 insertions(+), 24 deletions(-)
> >
> > diff --git a/mm/sparse.c b/mm/sparse.c
> > index e747a238a860..d01d09cc7d99 100644
> > --- a/mm/sparse.c
> > +++ b/mm/sparse.c
> > @@ -719,35 +719,14 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> > struct page * __meminit populate_section_memmap(unsigned long pfn,
> > unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
> > {
> > - struct page *page, *ret;
> > - unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
> > -
> > - page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
> > - if (page)
> > - goto got_map_page;
> > -
> > - ret = vmalloc(memmap_size);
> > - if (ret)
> > - goto got_map_ptr;
> > -
> > - return NULL;
> > -got_map_page:
> > - ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
> > -got_map_ptr:
> > -
> > - return ret;
> > + return kvmalloc(array_size(sizeof(struct page),
> > + PAGES_PER_SECTION), GFP_KERNEL);
>
> FWIW, this is what I meant:
>
> return kvmalloc(array_size(sizeof(struct page),
> PAGES_PER_SECTION), GFP_KERNEL);
Since there's another parameter, I didn't indent it with sizeof. But
Pankaj and Matthew have added other two votes on this, I will change it,
thanks.
This change makes populate_section_memmap()/depopulate_section_memmap
much simpler.
Suggested-by: Michal Hocko <[email protected]>
Signed-off-by: Baoquan He <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Reviewed-by: Pankaj Gupta <[email protected]>
Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
---
mm/sparse.c | 27 +++------------------------
1 file changed, 3 insertions(+), 24 deletions(-)
diff --git a/mm/sparse.c b/mm/sparse.c
index e747a238a860..3fa407d7f70a 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -719,35 +719,14 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
{
- struct page *page, *ret;
- unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
-
- page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
- if (page)
- goto got_map_page;
-
- ret = vmalloc(memmap_size);
- if (ret)
- goto got_map_ptr;
-
- return NULL;
-got_map_page:
- ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
-got_map_ptr:
-
- return ret;
+ return kvmalloc(array_size(sizeof(struct page),
+ PAGES_PER_SECTION), GFP_KERNEL);
}
static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
{
- struct page *memmap = pfn_to_page(pfn);
-
- if (is_vmalloc_addr(memmap))
- vfree(memmap);
- else
- free_pages((unsigned long)memmap,
- get_order(sizeof(struct page) * PAGES_PER_SECTION));
+ kvfree(pfn_to_page(pfn));
}
static void free_map_bootmem(struct page *memmap)
--
2.17.2
When allocating memmap for hot added memory with the classic sparse, the
specified 'nid' is ignored in populate_section_memmap().
While in allocating memmap for the classic sparse during boot, the node
given by 'nid' is preferred. And VMEMMAP prefers the node of 'nid' in
both boot stage and memory hot adding. So seems no reason to not respect
the node of 'nid' for the classic sparse when hot adding memory.
Use kvmalloc_node instead to use the passed in 'nid'.
Signed-off-by: Baoquan He <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
---
mm/sparse.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/sparse.c b/mm/sparse.c
index 3fa407d7f70a..31dcdfb55c72 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -719,8 +719,8 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
{
- return kvmalloc(array_size(sizeof(struct page),
- PAGES_PER_SECTION), GFP_KERNEL);
+ return kvmalloc_node(array_size(sizeof(struct page),
+ PAGES_PER_SECTION), GFP_KERNEL, nid);
}
static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
--
2.17.2
> When allocating memmap for hot added memory with the classic sparse, the
> specified 'nid' is ignored in populate_section_memmap().
>
> While in allocating memmap for the classic sparse during boot, the node
> given by 'nid' is preferred. And VMEMMAP prefers the node of 'nid' in
> both boot stage and memory hot adding. So seems no reason to not respect
> the node of 'nid' for the classic sparse when hot adding memory.
>
> Use kvmalloc_node instead to use the passed in 'nid'.
>
> Signed-off-by: Baoquan He <[email protected]>
> Acked-by: Michal Hocko <[email protected]>
> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
> ---
> mm/sparse.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 3fa407d7f70a..31dcdfb55c72 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -719,8 +719,8 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> struct page * __meminit populate_section_memmap(unsigned long pfn,
> unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
> {
> - return kvmalloc(array_size(sizeof(struct page),
> - PAGES_PER_SECTION), GFP_KERNEL);
> + return kvmalloc_node(array_size(sizeof(struct page),
> + PAGES_PER_SECTION), GFP_KERNEL, nid);
> }
>
> static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
> --
Acked-by: Pankaj Gupta <[email protected]>
> 2.17.2
>
>
On 16.03.20 13:56, Baoquan He wrote:
> When allocating memmap for hot added memory with the classic sparse, the
> specified 'nid' is ignored in populate_section_memmap().
>
> While in allocating memmap for the classic sparse during boot, the node
> given by 'nid' is preferred. And VMEMMAP prefers the node of 'nid' in
> both boot stage and memory hot adding. So seems no reason to not respect
> the node of 'nid' for the classic sparse when hot adding memory.
>
> Use kvmalloc_node instead to use the passed in 'nid'.
>
> Signed-off-by: Baoquan He <[email protected]>
> Acked-by: Michal Hocko <[email protected]>
> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
> ---
> mm/sparse.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 3fa407d7f70a..31dcdfb55c72 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -719,8 +719,8 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> struct page * __meminit populate_section_memmap(unsigned long pfn,
> unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
> {
> - return kvmalloc(array_size(sizeof(struct page),
> - PAGES_PER_SECTION), GFP_KERNEL);
> + return kvmalloc_node(array_size(sizeof(struct page),
> + PAGES_PER_SECTION), GFP_KERNEL, nid);
> }
>
> static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
>
Reviewed-by: David Hildenbrand <[email protected]>
--
Thanks,
David / dhildenb
On Mon, Mar 16, 2020 at 08:54:50PM +0800, Baoquan He wrote:
>This change makes populate_section_memmap()/depopulate_section_memmap
>much simpler.
>
>Suggested-by: Michal Hocko <[email protected]>
>Signed-off-by: Baoquan He <[email protected]>
>Reviewed-by: David Hildenbrand <[email protected]>
>Acked-by: Michal Hocko <[email protected]>
>Reviewed-by: Pankaj Gupta <[email protected]>
>Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
Reviewed-by: Wei Yang <[email protected]>
--
Wei Yang
Help you, Help me
On Mon, Mar 16, 2020 at 08:56:25PM +0800, Baoquan He wrote:
>When allocating memmap for hot added memory with the classic sparse, the
>specified 'nid' is ignored in populate_section_memmap().
>
>While in allocating memmap for the classic sparse during boot, the node
>given by 'nid' is preferred. And VMEMMAP prefers the node of 'nid' in
>both boot stage and memory hot adding. So seems no reason to not respect
>the node of 'nid' for the classic sparse when hot adding memory.
>
>Use kvmalloc_node instead to use the passed in 'nid'.
>
>Signed-off-by: Baoquan He <[email protected]>
>Acked-by: Michal Hocko <[email protected]>
>Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
Reviewed-by: Wei Yang <[email protected]>
--
Wei Yang
Help you, Help me
Hi Andrew,
On 03/16/20 at 08:56pm, Baoquan He wrote:
> When allocating memmap for hot added memory with the classic sparse, the
> specified 'nid' is ignored in populate_section_memmap().
>
> While in allocating memmap for the classic sparse during boot, the node
> given by 'nid' is preferred. And VMEMMAP prefers the node of 'nid' in
> both boot stage and memory hot adding. So seems no reason to not respect
> the node of 'nid' for the classic sparse when hot adding memory.
>
> Use kvmalloc_node instead to use the passed in 'nid'.
Just checked linux-next, seems this one is missed. Michal suggested
splitting the old v4 into two patches, this patch is to use the passed
in 'nid' to allocate memmap with !VMEMMAP.
Thanks
Baoquan
>
> Signed-off-by: Baoquan He <[email protected]>
> Acked-by: Michal Hocko <[email protected]>
> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]>
> ---
> mm/sparse.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 3fa407d7f70a..31dcdfb55c72 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -719,8 +719,8 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> struct page * __meminit populate_section_memmap(unsigned long pfn,
> unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
> {
> - return kvmalloc(array_size(sizeof(struct page),
> - PAGES_PER_SECTION), GFP_KERNEL);
> + return kvmalloc_node(array_size(sizeof(struct page),
> + PAGES_PER_SECTION), GFP_KERNEL, nid);
> }
>
> static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
> --
> 2.17.2
>
>