2021-01-05 07:50:43

by Baoquan He

[permalink] [raw]
Subject: [PATCH v3 0/4] mm: clean up names and parameters of memmap_init_xxxx functions

This patchset is correcting inappropriate function names of
memmap_init_xxx, and simplify parameters of functions in the code flow
when I tried to fix a regression bug in memmap defer init. These are
taken from the v2 patchset, the bug fixing patch has bee sent alone and
merged. So send the rest as v3.

No any change comparing with v2, except of adding Mike's 'Reviewed-by' tag.

V2 post is here:
https://lore.kernel.org/linux-mm/[email protected]/

Baoquan He (4):
mm: rename memmap_init() and memmap_init_zone()
mm: simplify parater of function memmap_init_zone()
mm: simplify parameter of setup_usemap()
mm: remove unneeded local variable in free_area_init_core

arch/ia64/include/asm/pgtable.h | 3 +-
arch/ia64/mm/init.c | 14 +++++----
include/linux/mm.h | 2 +-
mm/memory_hotplug.c | 2 +-
mm/page_alloc.c | 54 +++++++++++++++------------------
5 files changed, 36 insertions(+), 39 deletions(-)

--
2.17.2


2021-01-05 07:51:11

by Baoquan He

[permalink] [raw]
Subject: [PATCH v3 3/4] mm: simplify parameter of setup_usemap()

Parameter 'zone' has got needed information, let's remove other
unnecessary parameters.

Signed-off-by: Baoquan He <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
---
mm/page_alloc.c | 17 +++++++----------
1 file changed, 7 insertions(+), 10 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b2a46ffdaf0b..e0ce6fb6373b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6824,25 +6824,22 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
return usemapsize / 8;
}

-static void __ref setup_usemap(struct pglist_data *pgdat,
- struct zone *zone,
- unsigned long zone_start_pfn,
- unsigned long zonesize)
+static void __ref setup_usemap(struct zone *zone)
{
- unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
+ unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
+ zone->spanned_pages);
zone->pageblock_flags = NULL;
if (usemapsize) {
zone->pageblock_flags =
memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
- pgdat->node_id);
+ zone_to_nid(zone));
if (!zone->pageblock_flags)
panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
- usemapsize, zone->name, pgdat->node_id);
+ usemapsize, zone->name, zone_to_nid(zone));
}
}
#else
-static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
- unsigned long zone_start_pfn, unsigned long zonesize) {}
+static inline void setup_usemap(struct zone *zone) {}
#endif /* CONFIG_SPARSEMEM */

#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -7037,7 +7034,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
continue;

set_pageblock_order();
- setup_usemap(pgdat, zone, zone_start_pfn, size);
+ setup_usemap(zone);
init_currently_empty_zone(zone, zone_start_pfn, size);
memmap_init_zone(zone);
}
--
2.17.2

2021-01-05 08:45:32

by Baoquan He

[permalink] [raw]
Subject: [PATCH v3 2/4] mm: simplify parater of function memmap_init_zone()

As David suggested, simply passing 'struct zone *zone' is enough. We can
get all needed information from 'struct zone*' easily.

Suggested-by: David Hildenbrand <[email protected]>
Signed-off-by: Baoquan He <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
---
arch/ia64/include/asm/pgtable.h | 3 +--
arch/ia64/mm/init.c | 12 +++++++-----
mm/page_alloc.c | 20 ++++++++++----------
3 files changed, 18 insertions(+), 17 deletions(-)

diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index dce2ff37df65..2c81394a2430 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -520,8 +520,7 @@ extern struct page *zero_page_memmap_ptr;

# ifdef CONFIG_VIRTUAL_MEM_MAP
/* arch mem_map init routine is needed due to holes in a virtual mem_map */
- extern void memmap_init_zone(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn);
+ extern void memmap_init_zone(struct zone *zone);
# endif /* CONFIG_VIRTUAL_MEM_MAP */
# endif /* !__ASSEMBLY__ */

diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index c8e68e92beb3..ccbda1a74c95 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -541,12 +541,14 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
return 0;
}

-void __meminit
-memmap_init_zone(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn)
+void __meminit memmap_init_zone(struct zone *zone)
{
+ unsigned long size = zone->spanned_pages;
+ int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
+ unsigned long start_pfn = zone->zone_start_pfn;
+
if (!vmem_map) {
- memmap_init_range(size, nid, zone, start_pfn, start_pfn + size,
+ memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} else {
struct page *start;
@@ -556,7 +558,7 @@ memmap_init_zone(unsigned long size, int nid, unsigned long zone,
args.start = start;
args.end = start + size;
args.nid = nid;
- args.zone = zone;
+ args.zone = zone_id;

efi_memmap_walk(virtual_memmap_init, &args);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 69ebf75be91c..b2a46ffdaf0b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6292,16 +6292,16 @@ static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
}
#endif

-void __init __weak memmap_init_zone(unsigned long size, int nid,
- unsigned long zone,
- unsigned long zone_start_pfn)
+void __init __weak memmap_init_zone(struct zone *zone)
{
unsigned long start_pfn, end_pfn, hole_start_pfn = 0;
- unsigned long zone_end_pfn = zone_start_pfn + size;
+ int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone);
+ unsigned long zone_start_pfn = zone->zone_start_pfn;
+ unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
u64 pgcnt = 0;
- int i;

for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ unsigned long size;
start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
hole_start_pfn = clamp(hole_start_pfn, zone_start_pfn,
@@ -6309,13 +6309,13 @@ void __init __weak memmap_init_zone(unsigned long size, int nid,

if (end_pfn > start_pfn) {
size = end_pfn - start_pfn;
- memmap_init_range(size, nid, zone, start_pfn, zone_end_pfn,
+ memmap_init_range(size, nid, zone_id, start_pfn, zone_end_pfn,
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
}

if (hole_start_pfn < start_pfn)
pgcnt += init_unavailable_range(hole_start_pfn,
- start_pfn, zone, nid);
+ start_pfn, zone_id, nid);
hole_start_pfn = end_pfn;
}

@@ -6328,11 +6328,11 @@ void __init __weak memmap_init_zone(unsigned long size, int nid,
*/
if (hole_start_pfn < zone_end_pfn)
pgcnt += init_unavailable_range(hole_start_pfn, zone_end_pfn,
- zone, nid);
+ zone_id, nid);

if (pgcnt)
pr_info("%s: Zeroed struct page in unavailable ranges: %lld\n",
- zone_names[zone], pgcnt);
+ zone_names[zone_id], pgcnt);
}

static int zone_batchsize(struct zone *zone)
@@ -7039,7 +7039,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
set_pageblock_order();
setup_usemap(pgdat, zone, zone_start_pfn, size);
init_currently_empty_zone(zone, zone_start_pfn, size);
- memmap_init_zone(size, nid, j, zone_start_pfn);
+ memmap_init_zone(zone);
}
}

--
2.17.2

2021-01-05 08:46:53

by Baoquan He

[permalink] [raw]
Subject: [PATCH v3 4/4] mm: remove unneeded local variable in free_area_init_core

Local variable 'zone_start_pfn' is not needed since there's only
one call site in free_area_init_core(). Let's remove it and pass
zone->zone_start_pfn directly to init_currently_empty_zone().

Signed-off-by: Baoquan He <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
---
mm/page_alloc.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e0ce6fb6373b..9cacb8652239 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6986,7 +6986,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
unsigned long size, freesize, memmap_pages;
- unsigned long zone_start_pfn = zone->zone_start_pfn;

size = zone->spanned_pages;
freesize = zone->present_pages;
@@ -7035,7 +7034,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat)

set_pageblock_order();
setup_usemap(zone);
- init_currently_empty_zone(zone, zone_start_pfn, size);
+ init_currently_empty_zone(zone, zone->zone_start_pfn, size);
memmap_init_zone(zone);
}
}
--
2.17.2

2021-01-05 16:50:28

by David Hildenbrand

[permalink] [raw]
Subject: Re: [PATCH v3 4/4] mm: remove unneeded local variable in free_area_init_core

On 05.01.21 08:47, Baoquan He wrote:
> Local variable 'zone_start_pfn' is not needed since there's only
> one call site in free_area_init_core(). Let's remove it and pass
> zone->zone_start_pfn directly to init_currently_empty_zone().
>
> Signed-off-by: Baoquan He <[email protected]>
> Reviewed-by: Mike Rapoport <[email protected]>
> ---
> mm/page_alloc.c | 3 +--
> 1 file changed, 1 insertion(+), 2 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index e0ce6fb6373b..9cacb8652239 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -6986,7 +6986,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
> for (j = 0; j < MAX_NR_ZONES; j++) {
> struct zone *zone = pgdat->node_zones + j;
> unsigned long size, freesize, memmap_pages;
> - unsigned long zone_start_pfn = zone->zone_start_pfn;
>
> size = zone->spanned_pages;
> freesize = zone->present_pages;
> @@ -7035,7 +7034,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
>
> set_pageblock_order();
> setup_usemap(zone);
> - init_currently_empty_zone(zone, zone_start_pfn, size);
> + init_currently_empty_zone(zone, zone->zone_start_pfn, size);
> memmap_init_zone(zone);
> }
> }
>

Reviewed-by: David Hildenbrand <[email protected]>

--
Thanks,

David / dhildenb

2021-01-05 16:57:33

by David Hildenbrand

[permalink] [raw]
Subject: Re: [PATCH v3 2/4] mm: simplify parater of function memmap_init_zone()

[...]

> -void __meminit
> -memmap_init_zone(unsigned long size, int nid, unsigned long zone,
> - unsigned long start_pfn)
> +void __meminit memmap_init_zone(struct zone *zone)
> {
> + unsigned long size = zone->spanned_pages;
> + int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
> + unsigned long start_pfn = zone->zone_start_pfn;
> +

Nit: reverse Christmas tree.

> if (!vmem_map) {
> - memmap_init_range(size, nid, zone, start_pfn, start_pfn + size,
> + memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size,
> MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
> } else {
> struct page *start;
> @@ -556,7 +558,7 @@ memmap_init_zone(unsigned long size, int nid, unsigned long zone,
> args.start = start;
> args.end = start + size;
> args.nid = nid;
> - args.zone = zone;
> + args.zone = zone_id;
>
> efi_memmap_walk(virtual_memmap_init, &args);
> }
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 69ebf75be91c..b2a46ffdaf0b 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -6292,16 +6292,16 @@ static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
> }
> #endif
>
> -void __init __weak memmap_init_zone(unsigned long size, int nid,
> - unsigned long zone,
> - unsigned long zone_start_pfn)
> +void __init __weak memmap_init_zone(struct zone *zone)
> {
> unsigned long start_pfn, end_pfn, hole_start_pfn = 0;
> - unsigned long zone_end_pfn = zone_start_pfn + size;
> + int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone);
> + unsigned long zone_start_pfn = zone->zone_start_pfn;
> + unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;

dito.

> u64 pgcnt = 0;
> - int i;
>
> for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
> + unsigned long size;

You can just get rid of this parameter IMHO.

(Also, there is an empty line missing right now)


Apart from that LGTM

--
Thanks,

David / dhildenb

2021-01-05 16:59:26

by David Hildenbrand

[permalink] [raw]
Subject: Re: [PATCH v3 3/4] mm: simplify parameter of setup_usemap()

On 05.01.21 08:47, Baoquan He wrote:
> Parameter 'zone' has got needed information, let's remove other
> unnecessary parameters.
>
> Signed-off-by: Baoquan He <[email protected]>
> Reviewed-by: Mike Rapoport <[email protected]>
> ---
> mm/page_alloc.c | 17 +++++++----------
> 1 file changed, 7 insertions(+), 10 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index b2a46ffdaf0b..e0ce6fb6373b 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -6824,25 +6824,22 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
> return usemapsize / 8;
> }
>
> -static void __ref setup_usemap(struct pglist_data *pgdat,
> - struct zone *zone,
> - unsigned long zone_start_pfn,
> - unsigned long zonesize)
> +static void __ref setup_usemap(struct zone *zone)
> {
> - unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
> + unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
> + zone->spanned_pages);
> zone->pageblock_flags = NULL;
> if (usemapsize) {
> zone->pageblock_flags =
> memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
> - pgdat->node_id);
> + zone_to_nid(zone));
> if (!zone->pageblock_flags)
> panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
> - usemapsize, zone->name, pgdat->node_id);
> + usemapsize, zone->name, zone_to_nid(zone));
> }
> }
> #else
> -static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
> - unsigned long zone_start_pfn, unsigned long zonesize) {}
> +static inline void setup_usemap(struct zone *zone) {}
> #endif /* CONFIG_SPARSEMEM */
>
> #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
> @@ -7037,7 +7034,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
> continue;
>
> set_pageblock_order();
> - setup_usemap(pgdat, zone, zone_start_pfn, size);
> + setup_usemap(zone);
> init_currently_empty_zone(zone, zone_start_pfn, size);
> memmap_init_zone(zone);
> }
>

Reviewed-by: David Hildenbrand <[email protected]>

--
Thanks,

David / dhildenb

2021-01-07 13:21:34

by Baoquan He

[permalink] [raw]
Subject: Re: [PATCH v3 2/4] mm: simplify parater of function memmap_init_zone()

On 01/05/21 at 05:53pm, David Hildenbrand wrote:
> [...]
>
> > -void __meminit
> > -memmap_init_zone(unsigned long size, int nid, unsigned long zone,
> > - unsigned long start_pfn)
> > +void __meminit memmap_init_zone(struct zone *zone)
> > {
> > + unsigned long size = zone->spanned_pages;
> > + int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
> > + unsigned long start_pfn = zone->zone_start_pfn;
> > +
>
> Nit: reverse Christmas tree.

Ah, yes, I will reorder these lines.

>
> > if (!vmem_map) {
> > - memmap_init_range(size, nid, zone, start_pfn, start_pfn + size,
> > + memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size,
> > MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
> > } else {
> > struct page *start;
> > @@ -556,7 +558,7 @@ memmap_init_zone(unsigned long size, int nid, unsigned long zone,
> > args.start = start;
> > args.end = start + size;
> > args.nid = nid;
> > - args.zone = zone;
> > + args.zone = zone_id;
> >
> > efi_memmap_walk(virtual_memmap_init, &args);
> > }
> > diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> > index 69ebf75be91c..b2a46ffdaf0b 100644
> > --- a/mm/page_alloc.c
> > +++ b/mm/page_alloc.c
> > @@ -6292,16 +6292,16 @@ static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
> > }
> > #endif
> >
> > -void __init __weak memmap_init_zone(unsigned long size, int nid,
> > - unsigned long zone,
> > - unsigned long zone_start_pfn)
> > +void __init __weak memmap_init_zone(struct zone *zone)
> > {
> > unsigned long start_pfn, end_pfn, hole_start_pfn = 0;
> > - unsigned long zone_end_pfn = zone_start_pfn + size;
> > + int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone);
> > + unsigned long zone_start_pfn = zone->zone_start_pfn;
> > + unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
>
> dito.

OK.

>
> > u64 pgcnt = 0;
> > - int i;
> >
> > for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
> > + unsigned long size;
>
> You can just get rid of this parameter IMHO.

Someone possibly like a intermediate local variable better in this case,
but I am fine to both, will change as you suggested.

>
> (Also, there is an empty line missing right now)

Sure. Thanks.

>
>
> Apart from that LGTM
>
> --
> Thanks,
>
> David / dhildenb