Subject: [PATCH] memcg: do not recalculate section unnecessarily in init_section_page_cgroup

Signed-off-by: Fernando Luis Vazquez Cao <[email protected]>
---

diff -urNp linux-2.6.28-rc2-mm1-orig/mm/page_cgroup.c linux-2.6.28-rc2-mm1/mm/page_cgroup.c
--- linux-2.6.28-rc2-mm1-orig/mm/page_cgroup.c 2008-10-30 12:49:27.000000000 +0900
+++ linux-2.6.28-rc2-mm1/mm/page_cgroup.c 2008-10-30 12:52:41.000000000 +0900
@@ -99,13 +99,11 @@ struct page_cgroup *lookup_page_cgroup(s

int __meminit init_section_page_cgroup(unsigned long pfn)
{
- struct mem_section *section;
+ struct mem_section *section = __pfn_to_section(pfn);
struct page_cgroup *base, *pc;
unsigned long table_size;
int nid, index;

- section = __pfn_to_section(pfn);
-
if (section->page_cgroup)
return 0;

@@ -131,7 +129,6 @@ int __meminit init_section_page_cgroup(u
__init_page_cgroup(pc, pfn + index);
}

- section = __pfn_to_section(pfn);
section->page_cgroup = base - pfn;
total_usage += table_size;
return 0;


2008-10-30 06:46:18

by Kamezawa Hiroyuki

[permalink] [raw]
Subject: Re: [PATCH] memcg: do not recalculate section unnecessarily in init_section_page_cgroup

On Thu, 30 Oct 2008 13:38:50 +0900
Fernando Luis Vázquez Cao <[email protected]> wrote:

> Signed-off-by: Fernando Luis Vazquez Cao <[email protected]>
> ---
>
Thanks, but please add patch description "what this patch fixes".
Reviewed-by: KAMEZAWA Hiroyuki <[email protected]>


> diff -urNp linux-2.6.28-rc2-mm1-orig/mm/page_cgroup.c linux-2.6.28-rc2-mm1/mm/page_cgroup.c
> --- linux-2.6.28-rc2-mm1-orig/mm/page_cgroup.c 2008-10-30 12:49:27.000000000 +0900
> +++ linux-2.6.28-rc2-mm1/mm/page_cgroup.c 2008-10-30 12:52:41.000000000 +0900
> @@ -99,13 +99,11 @@ struct page_cgroup *lookup_page_cgroup(s
>
> int __meminit init_section_page_cgroup(unsigned long pfn)
> {
> - struct mem_section *section;
> + struct mem_section *section = __pfn_to_section(pfn);
> struct page_cgroup *base, *pc;
> unsigned long table_size;
> int nid, index;
>
> - section = __pfn_to_section(pfn);
> -
> if (section->page_cgroup)
> return 0;
>
> @@ -131,7 +129,6 @@ int __meminit init_section_page_cgroup(u
> __init_page_cgroup(pc, pfn + index);
> }
>
> - section = __pfn_to_section(pfn);
> section->page_cgroup = base - pfn;
> total_usage += table_size;
> return 0;
>
>
>

Subject: [PATCH] memcg: do not recalculate section unnecessarily in init_section_page_cgroup

In init_section_page_cgroup() the section a given pfn belongs to is
calculated at the top of the function and, despite the fact that the
pfn/section correspondence does not change, it is recalculated further
down the same function. By computing this just once and reusing that
value we save some bytes in the object file and do not waste CPU cycles.

Signed-off-by: Fernando Luis Vazquez Cao <[email protected]>
Reviewed-by: KAMEZAWA Hiroyuki <[email protected]>
---

diff -urNp linux-2.6.28-rc2-mm1-orig/mm/page_cgroup.c linux-2.6.28-rc2-mm1/mm/page_cgroup.c
--- linux-2.6.28-rc2-mm1-orig/mm/page_cgroup.c 2008-10-30 12:49:27.000000000 +0900
+++ linux-2.6.28-rc2-mm1/mm/page_cgroup.c 2008-10-30 12:52:41.000000000 +0900
@@ -99,13 +99,11 @@ struct page_cgroup *lookup_page_cgroup(s

int __meminit init_section_page_cgroup(unsigned long pfn)
{
- struct mem_section *section;
+ struct mem_section *section = __pfn_to_section(pfn);
struct page_cgroup *base, *pc;
unsigned long table_size;
int nid, index;

- section = __pfn_to_section(pfn);
-
if (section->page_cgroup)
return 0;

@@ -131,7 +129,6 @@ int __meminit init_section_page_cgroup(u
__init_page_cgroup(pc, pfn + index);
}

- section = __pfn_to_section(pfn);
section->page_cgroup = base - pfn;
total_usage += table_size;
return 0;