2018-09-27 07:00:52

by Arun KS

[permalink] [raw]
Subject: [PATCH v3] memory_hotplug: Free pages as higher order

When free pages are done with higher order, time spend on
coalescing pages by buddy allocator can be reduced. With
section size of 256MB, hot add latency of a single section
shows improvement from 50-60 ms to less than 1 ms, hence
improving the hot add latency by 60%.

Modify external providers of online callback to align with
the change.

Signed-off-by: Arun KS <[email protected]>
---
Changes since v2:
reuse code from __free_pages_boot_core()

Changes since v1:
- Removed prefetch()

Changes since RFC:
- Rebase.
- As suggested by Michal Hocko remove pages_per_block.
- Modifed external providers of online_page_callback.

v2: https://lore.kernel.org/patchwork/patch/991363/
v1: https://lore.kernel.org/patchwork/patch/989445/
RFC: https://lore.kernel.org/patchwork/patch/984754/

---
drivers/hv/hv_balloon.c | 6 ++++--
drivers/xen/balloon.c | 18 ++++++++++++++---
include/linux/memory_hotplug.h | 2 +-
mm/internal.h | 1 +
mm/memory_hotplug.c | 44 ++++++++++++++++++++++++++++++------------
mm/page_alloc.c | 2 +-
6 files changed, 54 insertions(+), 19 deletions(-)

diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b1b7880..c5bc0b5 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -771,7 +771,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
}
}

-static void hv_online_page(struct page *pg)
+static int hv_online_page(struct page *pg, unsigned int order)
{
struct hv_hotadd_state *has;
unsigned long flags;
@@ -783,10 +783,12 @@ static void hv_online_page(struct page *pg)
if ((pfn < has->start_pfn) || (pfn >= has->end_pfn))
continue;

- hv_page_online_one(has, pg);
+ hv_bring_pgs_online(has, pfn, (1UL << order));
break;
}
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+
+ return 0;
}

static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index e12bb25..010cf4d 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -390,8 +390,8 @@ static enum bp_state reserve_additional_memory(void)

/*
* add_memory_resource() will call online_pages() which in its turn
- * will call xen_online_page() callback causing deadlock if we don't
- * release balloon_mutex here. Unlocking here is safe because the
+ * will call xen_bring_pgs_online() callback causing deadlock if we
+ * don't release balloon_mutex here. Unlocking here is safe because the
* callers drop the mutex before trying again.
*/
mutex_unlock(&balloon_mutex);
@@ -422,6 +422,18 @@ static void xen_online_page(struct page *page)
mutex_unlock(&balloon_mutex);
}

+static int xen_bring_pgs_online(struct page *pg, unsigned int order)
+{
+ unsigned long i, size = (1 << order);
+ unsigned long start_pfn = page_to_pfn(pg);
+
+ pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
+ for (i = 0; i < size; i++)
+ xen_online_page(pfn_to_page(start_pfn + i));
+
+ return 0;
+}
+
static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
{
if (val == MEM_ONLINE)
@@ -744,7 +756,7 @@ static int __init balloon_init(void)
balloon_stats.max_retry_count = RETRY_UNLIMITED;

#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
- set_online_page_callback(&xen_online_page);
+ set_online_page_callback(&xen_bring_pgs_online);
register_memory_notifier(&xen_memory_nb);
register_sysctl_table(xen_root);

diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 34a2822..7b04c1d 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -87,7 +87,7 @@ extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
unsigned long *valid_start, unsigned long *valid_end);
extern void __offline_isolated_pages(unsigned long, unsigned long);

-typedef void (*online_page_callback_t)(struct page *page);
+typedef int (*online_page_callback_t)(struct page *page, unsigned int order);

extern int set_online_page_callback(online_page_callback_t callback);
extern int restore_online_page_callback(online_page_callback_t callback);
diff --git a/mm/internal.h b/mm/internal.h
index 87256ae..2b0efac 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -163,6 +163,7 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
extern int __isolate_free_page(struct page *page, unsigned int order);
extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
unsigned int order);
+extern void __free_pages_boot_core(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned int order);
extern void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 38d94b7..3c81f20 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -47,7 +47,7 @@
* and restore_online_page_callback() for generic callback restore.
*/

-static void generic_online_page(struct page *page);
+static int generic_online_page(struct page *page, unsigned int order);

static online_page_callback_t online_page_callback = generic_online_page;
static DEFINE_MUTEX(online_page_callback_lock);
@@ -655,26 +655,46 @@ void __online_page_free(struct page *page)
}
EXPORT_SYMBOL_GPL(__online_page_free);

-static void generic_online_page(struct page *page)
+static int generic_online_page(struct page *page, unsigned int order)
{
- __online_page_set_limits(page);
- __online_page_increment_counters(page);
- __online_page_free(page);
+ __free_pages_boot_core(page, order);
+ totalram_pages += (1UL << order);
+#ifdef CONFIG_HIGHMEM
+ if (PageHighMem(page))
+ totalhigh_pages += (1UL << order);
+#endif
+ return 0;
+}
+
+static int online_pages_blocks(unsigned long start, unsigned long nr_pages)
+{
+ unsigned long end = start + nr_pages;
+ int order, ret, onlined_pages = 0;
+
+ while (start < end) {
+ order = min(MAX_ORDER - 1UL, __ffs(start));
+
+ while (start + (1UL << order) > end)
+ order--;
+
+ ret = (*online_page_callback)(pfn_to_page(start), order);
+ if (!ret)
+ onlined_pages += (1UL << order);
+ else if (ret > 0)
+ onlined_pages += ret;
+
+ start += (1UL << order);
+ }
+ return onlined_pages;
}

static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg)
{
- unsigned long i;
unsigned long onlined_pages = *(unsigned long *)arg;
- struct page *page;

if (PageReserved(pfn_to_page(start_pfn)))
- for (i = 0; i < nr_pages; i++) {
- page = pfn_to_page(start_pfn + i);
- (*online_page_callback)(page);
- onlined_pages++;
- }
+ onlined_pages = online_pages_blocks(start_pfn, nr_pages);

online_mem_sections(start_pfn, start_pfn + nr_pages);

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 89d2a2a..a442381 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1252,7 +1252,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_restore(flags);
}

-static void __init __free_pages_boot_core(struct page *page, unsigned int order)
+void __free_pages_boot_core(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
struct page *p = page;
--
1.9.1



2018-09-27 07:11:39

by Oscar Salvador

[permalink] [raw]
Subject: Re: [PATCH v3] memory_hotplug: Free pages as higher order

On Thu, Sep 27, 2018 at 12:28:50PM +0530, Arun KS wrote:
> + __free_pages_boot_core(page, order);

I am not sure, but if we are going to use that function from the memory-hotplug code,
we might want to rename that function to something more generic?
The word "boot" suggests that this is only called from the boot stage.

And what about the prefetch operations?
I saw that you removed them in your previous patch and that had some benefits [1].

Should we remove them here as well?

[1] https://patchwork.kernel.org/patch/10613359/

Thanks
--
Oscar Salvador
SUSE L3

2018-09-27 07:11:44

by Jürgen Groß

[permalink] [raw]
Subject: Re: [PATCH v3] memory_hotplug: Free pages as higher order

On 27/09/18 08:58, Arun KS wrote:
> When free pages are done with higher order, time spend on
> coalescing pages by buddy allocator can be reduced. With
> section size of 256MB, hot add latency of a single section
> shows improvement from 50-60 ms to less than 1 ms, hence
> improving the hot add latency by 60%.
>
> Modify external providers of online callback to align with
> the change.
>
> Signed-off-by: Arun KS <[email protected]>
> ---
> Changes since v2:
> reuse code from __free_pages_boot_core()
>
> Changes since v1:
> - Removed prefetch()
>
> Changes since RFC:
> - Rebase.
> - As suggested by Michal Hocko remove pages_per_block.
> - Modifed external providers of online_page_callback.
>
> v2: https://lore.kernel.org/patchwork/patch/991363/
> v1: https://lore.kernel.org/patchwork/patch/989445/
> RFC: https://lore.kernel.org/patchwork/patch/984754/
>
> ---
> drivers/hv/hv_balloon.c | 6 ++++--
> drivers/xen/balloon.c | 18 ++++++++++++++---
> include/linux/memory_hotplug.h | 2 +-
> mm/internal.h | 1 +
> mm/memory_hotplug.c | 44 ++++++++++++++++++++++++++++++------------
> mm/page_alloc.c | 2 +-
> 6 files changed, 54 insertions(+), 19 deletions(-)
>

...

> diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
> index e12bb25..010cf4d 100644
> --- a/drivers/xen/balloon.c
> +++ b/drivers/xen/balloon.c
> @@ -390,8 +390,8 @@ static enum bp_state reserve_additional_memory(void)
>
> /*
> * add_memory_resource() will call online_pages() which in its turn
> - * will call xen_online_page() callback causing deadlock if we don't
> - * release balloon_mutex here. Unlocking here is safe because the
> + * will call xen_bring_pgs_online() callback causing deadlock if we
> + * don't release balloon_mutex here. Unlocking here is safe because the
> * callers drop the mutex before trying again.
> */
> mutex_unlock(&balloon_mutex);
> @@ -422,6 +422,18 @@ static void xen_online_page(struct page *page)
> mutex_unlock(&balloon_mutex);
> }
>
> +static int xen_bring_pgs_online(struct page *pg, unsigned int order)
> +{
> + unsigned long i, size = (1 << order);
> + unsigned long start_pfn = page_to_pfn(pg);
> +
> + pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
> + for (i = 0; i < size; i++)
> + xen_online_page(pfn_to_page(start_pfn + i));

xen_online_page() isn't very complex and this is the only user.

Why don't you move its body in here and drop the extra function?
And now you can execute the loop with balloon_mutex held instead of
taking and releasing it in each iteration of the loop.


Juergen

2018-09-27 08:11:54

by Arun KS

[permalink] [raw]
Subject: Re: [PATCH v3] memory_hotplug: Free pages as higher order

On 2018-09-27 12:39, Oscar Salvador wrote:
> On Thu, Sep 27, 2018 at 12:28:50PM +0530, Arun KS wrote:
>> + __free_pages_boot_core(page, order);
>
Hi,

> I am not sure, but if we are going to use that function from the
> memory-hotplug code,
> we might want to rename that function to something more generic?
> The word "boot" suggests that this is only called from the boot stage.
I ll rename it to __free_pages_core()

>
> And what about the prefetch operations?
> I saw that you removed them in your previous patch and that had some
> benefits [1].
>
> Should we remove them here as well?
Sure. Will update this as well.

Thanks,
Arun
>
> [1] https://patchwork.kernel.org/patch/10613359/
>
> Thanks

2018-09-27 08:12:25

by Arun KS

[permalink] [raw]
Subject: Re: [PATCH v3] memory_hotplug: Free pages as higher order

On 2018-09-27 12:41, Juergen Gross wrote:
> On 27/09/18 08:58, Arun KS wrote:
>> When free pages are done with higher order, time spend on
>> coalescing pages by buddy allocator can be reduced. With
>> section size of 256MB, hot add latency of a single section
>> shows improvement from 50-60 ms to less than 1 ms, hence
>> improving the hot add latency by 60%.
>>
>> Modify external providers of online callback to align with
>> the change.
>>
>> Signed-off-by: Arun KS <[email protected]>
>> ---
>> Changes since v2:
>> reuse code from __free_pages_boot_core()
>>
>> Changes since v1:
>> - Removed prefetch()
>>
>> Changes since RFC:
>> - Rebase.
>> - As suggested by Michal Hocko remove pages_per_block.
>> - Modifed external providers of online_page_callback.
>>
>> v2: https://lore.kernel.org/patchwork/patch/991363/
>> v1: https://lore.kernel.org/patchwork/patch/989445/
>> RFC: https://lore.kernel.org/patchwork/patch/984754/
>>
>> ---
>> drivers/hv/hv_balloon.c | 6 ++++--
>> drivers/xen/balloon.c | 18 ++++++++++++++---
>> include/linux/memory_hotplug.h | 2 +-
>> mm/internal.h | 1 +
>> mm/memory_hotplug.c | 44
>> ++++++++++++++++++++++++++++++------------
>> mm/page_alloc.c | 2 +-
>> 6 files changed, 54 insertions(+), 19 deletions(-)
>>
>
> ...
>
>> diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
>> index e12bb25..010cf4d 100644
>> --- a/drivers/xen/balloon.c
>> +++ b/drivers/xen/balloon.c
>> @@ -390,8 +390,8 @@ static enum bp_state
>> reserve_additional_memory(void)
>>
>> /*
>> * add_memory_resource() will call online_pages() which in its turn
>> - * will call xen_online_page() callback causing deadlock if we don't
>> - * release balloon_mutex here. Unlocking here is safe because the
>> + * will call xen_bring_pgs_online() callback causing deadlock if we
>> + * don't release balloon_mutex here. Unlocking here is safe because
>> the
>> * callers drop the mutex before trying again.
>> */
>> mutex_unlock(&balloon_mutex);
>> @@ -422,6 +422,18 @@ static void xen_online_page(struct page *page)
>> mutex_unlock(&balloon_mutex);
>> }
>>
>> +static int xen_bring_pgs_online(struct page *pg, unsigned int order)
>> +{
>> + unsigned long i, size = (1 << order);
>> + unsigned long start_pfn = page_to_pfn(pg);
>> +
>> + pr_debug("Online %lu pages starting at pfn 0x%lx\n", size,
>> start_pfn);
>> + for (i = 0; i < size; i++)
>> + xen_online_page(pfn_to_page(start_pfn + i));
>

Hi,

> xen_online_page() isn't very complex and this is the only user.
>
> Why don't you move its body in here and drop the extra function?
> And now you can execute the loop with balloon_mutex held instead of
> taking and releasing it in each iteration of the loop.
Point taken. Will incorporate them.

Regards,
Arun
>
>
> Juergen