This series of patches are follow up work (and depends on)
Toshi Kani <[email protected]>'s patches "fix memory leak/
panic in ioremap huge pages".
This series of patches are tested on 4.9 kernel with Cortex-A75
based SoC.
These patches can also go into '-stable' branch.
Chintan Pandya (4):
ioremap: Update pgtable free interfaces with addr
arm64: tlbflush: Introduce __flush_tlb_kernel_pgtable
arm64: Implement page table free interfaces
Revert "arm64: Enforce BBM for huge IO/VMAP mappings"
From V4->V5:
- Add new API __flush_tlb_kernel_pgtable(unsigned long addr)
for kernel addresses
From V3->V4:
- Add header for 'addr' in x86 implementation
- Re-order pmd/pud clear and table free
- Avoid redundant TLB invalidatation in one perticular case
From V2->V3:
- Use the exisiting page table free interface to do arm64
specific things
From V1->V2:
- Rebased my patches on top of "[PATCH v2 1/2] mm/vmalloc:
Add interfaces to free unmapped page table"
- Honored BBM for ARM64
arch/arm64/include/asm/tlbflush.h | 6 ++++++
arch/arm64/mm/mmu.c | 45 ++++++++++++++++++++++++++++-----------
arch/x86/mm/pgtable.c | 6 ++++--
include/asm-generic/pgtable.h | 8 +++----
lib/ioremap.c | 4 ++--
5 files changed, 49 insertions(+), 20 deletions(-)
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
This commit 15122ee2c515a ("arm64: Enforce BBM for huge
IO/VMAP mappings") is a temporary work-around until the
issues with CONFIG_HAVE_ARCH_HUGE_VMAP gets fixed.
Revert this change as we have fixes for the issue.
Signed-off-by: Chintan Pandya <[email protected]>
---
No change in v5
arch/arm64/mm/mmu.c | 8 --------
1 file changed, 8 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3552c7a..11a24a1 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -935,10 +935,6 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot)));
- /* ioremap_page_range doesn't honour BBM */
- if (pud_present(READ_ONCE(*pudp)))
- return 0;
-
BUG_ON(phys & ~PUD_MASK);
set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
return 1;
@@ -949,10 +945,6 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot)));
- /* ioremap_page_range doesn't honour BBM */
- if (pmd_present(READ_ONCE(*pmdp)))
- return 0;
-
BUG_ON(phys & ~PMD_MASK);
set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
return 1;
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
This patch ("mm/vmalloc: Add interfaces to free unmapped
page table") adds following 2 interfaces to free the page
table in case we implement huge mapping.
pud_free_pmd_page() and pmd_free_pte_page()
Some architectures (like arm64) needs to do proper TLB
maintanance after updating pagetable entry even in map.
Why ? Read this,
https://patchwork.kernel.org/patch/10134581/
Pass 'addr' in these interfaces so that proper TLB ops
can be performed.
Signed-off-by: Chintan Pandya <[email protected]>
---
No change in v5.
arch/arm64/mm/mmu.c | 4 ++--
arch/x86/mm/pgtable.c | 6 ++++--
include/asm-generic/pgtable.h | 8 ++++----
lib/ioremap.c | 4 ++--
4 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 2dbb2c9..da98828 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -973,12 +973,12 @@ int pmd_clear_huge(pmd_t *pmdp)
return 1;
}
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return pud_none(*pud);
}
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return pmd_none(*pmd);
}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 34cda7e..f640ba3 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -706,11 +706,12 @@ int pmd_clear_huge(pmd_t *pmd)
/**
* pud_free_pmd_page - Clear pud entry and free pmd page.
* @pud: Pointer to a PUD.
+ * @addr: Virtual address associated with pud.
*
* Context: The pud range has been unmaped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
pmd_t *pmd;
int i;
@@ -733,11 +734,12 @@ int pud_free_pmd_page(pud_t *pud)
/**
* pmd_free_pte_page - Clear pmd entry and free pte page.
* @pmd: Pointer to a PMD.
+ * @addr: Virtual address associated with pmd.
*
* Context: The pmd range has been unmaped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
pte_t *pte;
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index bfbb44a..e6310f4 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -983,8 +983,8 @@ static inline int p4d_clear_huge(p4d_t *p4d)
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
-int pud_free_pmd_page(pud_t *pud);
-int pmd_free_pte_page(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -1010,11 +1010,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
-static inline int pud_free_pmd_page(pud_t *pud)
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return 0;
}
-static inline int pmd_free_pte_page(pmd_t *pmd)
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return 0;
}
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 54e5bba..517f585 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
- pmd_free_pte_page(pmd)) {
+ pmd_free_pte_page(pmd, addr)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue;
}
@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
- pud_free_pmd_page(pud)) {
+ pud_free_pmd_page(pud, addr)) {
if (pud_set_huge(pud, phys_addr + addr, prot))
continue;
}
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
Implement pud_free_pmd_page() and pmd_free_pte_page().
Implementation requires,
1) Freeing of the un-used next level page tables
2) Clearing off the current pud/pmd entry
3) Invalidate TLB which could have previously
valid but not stale entry
Signed-off-by: Chintan Pandya <[email protected]>
---
V4->V5:
- Using __flush_tlb_kernel_pgtable instead of
flush_tlb_kernel_range
arch/arm64/mm/mmu.c | 33 +++++++++++++++++++++++++++++++--
1 file changed, 31 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index da98828..3552c7a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -45,6 +45,7 @@
#include <asm/memblock.h>
#include <asm/mmu_context.h>
#include <asm/ptdump.h>
+#include <asm/tlbflush.h>
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
@@ -973,12 +974,40 @@ int pmd_clear_huge(pmd_t *pmdp)
return 1;
}
+static int __pmd_free_pte_page(pmd_t *pmd, unsigned long addr, bool tlb_inv)
+{
+ pmd_t *table;
+
+ if (pmd_val(*pmd)) {
+ table = __va(pmd_val(*pmd));
+ pmd_clear(pmd);
+ if (tlb_inv)
+ __flush_tlb_kernel_pgtable(addr);
+
+ free_page((unsigned long) table);
+ }
+ return 1;
+}
+
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
- return pud_none(*pud);
+ pmd_t *table;
+ int i;
+
+ if (pud_val(*pud)) {
+ table = __va(pud_val(*pud));
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ __pmd_free_pte_page(&table[i], addr + (i * PMD_SIZE),
+ false);
+
+ pud_clear(pud);
+ flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+ free_page((unsigned long) table);
+ }
+ return 1;
}
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
- return pmd_none(*pmd);
+ return __pmd_free_pte_page(pmd, addr, true);
}
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
Add an interface to invalidate intermediate page tables
from TLB for kernel.
Signed-off-by: Chintan Pandya <[email protected]>
---
Introduced in v5
arch/arm64/include/asm/tlbflush.h | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 9e82dd7..6a4816d 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -209,6 +209,12 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
dsb(ish);
}
+static inline void __flush_tlb_kernel_pgtable(unsigned long addr)
+{
+ addr >>= 12;
+ __tlbi(vaae1is, addr);
+ dsb(ish);
+}
#endif
#endif
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
Hi Chintan,
On Tue, Mar 27, 2018 at 06:54:59PM +0530, Chintan Pandya wrote:
> Implement pud_free_pmd_page() and pmd_free_pte_page().
>
> Implementation requires,
> 1) Freeing of the un-used next level page tables
> 2) Clearing off the current pud/pmd entry
> 3) Invalidate TLB which could have previously
> valid but not stale entry
>
> Signed-off-by: Chintan Pandya <[email protected]>
> ---
> V4->V5:
> - Using __flush_tlb_kernel_pgtable instead of
> flush_tlb_kernel_range
>
>
> arch/arm64/mm/mmu.c | 33 +++++++++++++++++++++++++++++++--
> 1 file changed, 31 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index da98828..3552c7a 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -45,6 +45,7 @@
> #include <asm/memblock.h>
> #include <asm/mmu_context.h>
> #include <asm/ptdump.h>
> +#include <asm/tlbflush.h>
>
> #define NO_BLOCK_MAPPINGS BIT(0)
> #define NO_CONT_MAPPINGS BIT(1)
> @@ -973,12 +974,40 @@ int pmd_clear_huge(pmd_t *pmdp)
> return 1;
> }
>
> +static int __pmd_free_pte_page(pmd_t *pmd, unsigned long addr, bool tlb_inv)
> +{
> + pmd_t *table;
> +
> + if (pmd_val(*pmd)) {
Please can you follow what I did in 20a004e7b017 ("arm64: mm: Use
READ_ONCE/WRITE_ONCE when accessing page tables") and:
1. Use consistent naming, so pmd_t * pmdp.
2. Use READ_ONCE to dereference the entry once into a local.
Similarly for the pud code below.
> + table = __va(pmd_val(*pmd));
> + pmd_clear(pmd);
> + if (tlb_inv)
> + __flush_tlb_kernel_pgtable(addr);
> +
> + free_page((unsigned long) table);
Hmm. Surely it's only safe to call free_page if !tlb_inv in situations when
the page table is already disconnected at a higher level? That doesn't
appear to be the case with the function below, which still has the pud
installed. What am I missing?
> + }
> + return 1;
> +}
> +
> int pud_free_pmd_page(pud_t *pud, unsigned long addr)
> {
> - return pud_none(*pud);
> + pmd_t *table;
> + int i;
> +
> + if (pud_val(*pud)) {
> + table = __va(pud_val(*pud));
> + for (i = 0; i < PTRS_PER_PMD; i++)
> + __pmd_free_pte_page(&table[i], addr + (i * PMD_SIZE),
> + false);
> +
> + pud_clear(pud);
> + flush_tlb_kernel_range(addr, addr + PUD_SIZE);
Why aren't you using __flush_tlb_kernel_pgtable here?
Will
On 3/27/2018 11:30 PM, Will Deacon wrote:
> Hi Chintan,
Hi Will,
>
> On Tue, Mar 27, 2018 at 06:54:59PM +0530, Chintan Pandya wrote:
>> Implement pud_free_pmd_page() and pmd_free_pte_page().
>>
>> Implementation requires,
>> 1) Freeing of the un-used next level page tables
>> 2) Clearing off the current pud/pmd entry
>> 3) Invalidate TLB which could have previously
>> valid but not stale entry
>>
>> Signed-off-by: Chintan Pandya <[email protected]>
>> ---
>> V4->V5:
>> - Using __flush_tlb_kernel_pgtable instead of
>> flush_tlb_kernel_range
>>
>>
>> arch/arm64/mm/mmu.c | 33 +++++++++++++++++++++++++++++++--
>> 1 file changed, 31 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index da98828..3552c7a 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -45,6 +45,7 @@
>> #include <asm/memblock.h>
>> #include <asm/mmu_context.h>
>> #include <asm/ptdump.h>
>> +#include <asm/tlbflush.h>
>>
>> #define NO_BLOCK_MAPPINGS BIT(0)
>> #define NO_CONT_MAPPINGS BIT(1)
>> @@ -973,12 +974,40 @@ int pmd_clear_huge(pmd_t *pmdp)
>> return 1;
>> }
>>
>> +static int __pmd_free_pte_page(pmd_t *pmd, unsigned long addr, bool tlb_inv)
>> +{
>> + pmd_t *table;
>> +
>> + if (pmd_val(*pmd)) {
>
> Please can you follow what I did in 20a004e7b017 ("arm64: mm: Use
> READ_ONCE/WRITE_ONCE when accessing page tables") and:
>
> 1. Use consistent naming, so pmd_t * pmdp.
> 2. Use READ_ONCE to dereference the entry once into a local.
>
> Similarly for the pud code below.
Sure. I'll fix this in v6.
>
>> + table = __va(pmd_val(*pmd));
>> + pmd_clear(pmd);
>> + if (tlb_inv)
>> + __flush_tlb_kernel_pgtable(addr);
>> +
>> + free_page((unsigned long) table);
>
> Hmm. Surely it's only safe to call free_page if !tlb_inv in situations when
> the page table is already disconnected at a higher level? That doesn't
> appear to be the case with the function below, which still has the pud
> installed. What am I missing?
>
Point ! Without the invalidation, free'ing a page is not safe. Better, I
do __flush_tlb_kernel_pgtable() every time. This might not be as costly
as flush_tlb_kernel_range().
>> + }
>> + return 1;
>> +}
>> +
>> int pud_free_pmd_page(pud_t *pud, unsigned long addr)
>> {
>> - return pud_none(*pud);
>> + pmd_t *table;
>> + int i;
>> +
>> + if (pud_val(*pud)) {
>> + table = __va(pud_val(*pud));
>> + for (i = 0; i < PTRS_PER_PMD; i++)
>> + __pmd_free_pte_page(&table[i], addr + (i * PMD_SIZE),
>> + false);
>> +
>> + pud_clear(pud);
>> + flush_tlb_kernel_range(addr, addr + PUD_SIZE);
>
> Why aren't you using __flush_tlb_kernel_pgtable here?
>
Now that I will call __flush_tlb_kernel_pgtable() for every PMD, I can
use __flush_tlb_kernel_pgtable() here as well.
Previously, the thought was, while invalidating PUD by VA would not work
always because PUD may have next level of valid mapping still present in
the table (valid next PMD but invalid next-to-next PTE). In this case
doing just __flush_tlb_kernel_pgtable() for PUD might not be enough. We
need to invalidate subsequent tables as well which I was skipping for
optimization. So, I used flush_tlb_kernel_range().
I will upload v6.
> Will
>
Chintan
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center,
Inc. is a member of the Code Aurora Forum, a Linux Foundation
Collaborative Project
Hi Chintan,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on v4.16-rc7]
[also build test ERROR on next-20180328]
[cannot apply to arm64/for-next/core tip/x86/core asm-generic/master]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/Chintan-Pandya/Fix-issues-with-huge-mapping-in-ioremap-for-ARM64/20180328-192254
config: i386-randconfig-x014-201812 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
# save the attached .config to linux build tree
make ARCH=i386
All errors (new ones prefixed by >>):
arch/x86/mm/pgtable.c: In function 'pud_free_pmd_page':
>> arch/x86/mm/pgtable.c:725:8: error: too few arguments to function 'pmd_free_pte_page'
if (!pmd_free_pte_page(&pmd[i]))
^~~~~~~~~~~~~~~~~
In file included from arch/x86/include/asm/pgtable.h:1292:0,
from include/linux/memremap.h:8,
from include/linux/mm.h:27,
from arch/x86/mm/pgtable.c:2:
include/asm-generic/pgtable.h:987:5: note: declared here
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
^~~~~~~~~~~~~~~~~
vim +/pmd_free_pte_page +725 arch/x86/mm/pgtable.c
b6bdb751 Toshi Kani 2018-03-22 705
b6bdb751 Toshi Kani 2018-03-22 706 /**
b6bdb751 Toshi Kani 2018-03-22 707 * pud_free_pmd_page - Clear pud entry and free pmd page.
b6bdb751 Toshi Kani 2018-03-22 708 * @pud: Pointer to a PUD.
5b7ee34c Chintan Pandya 2018-03-27 709 * @addr: Virtual address associated with pud.
b6bdb751 Toshi Kani 2018-03-22 710 *
b6bdb751 Toshi Kani 2018-03-22 711 * Context: The pud range has been unmaped and TLB purged.
b6bdb751 Toshi Kani 2018-03-22 712 * Return: 1 if clearing the entry succeeded. 0 otherwise.
b6bdb751 Toshi Kani 2018-03-22 713 */
5b7ee34c Chintan Pandya 2018-03-27 714 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
b6bdb751 Toshi Kani 2018-03-22 715 {
28ee90fe Toshi Kani 2018-03-22 716 pmd_t *pmd;
28ee90fe Toshi Kani 2018-03-22 717 int i;
28ee90fe Toshi Kani 2018-03-22 718
28ee90fe Toshi Kani 2018-03-22 719 if (pud_none(*pud))
28ee90fe Toshi Kani 2018-03-22 720 return 1;
28ee90fe Toshi Kani 2018-03-22 721
28ee90fe Toshi Kani 2018-03-22 722 pmd = (pmd_t *)pud_page_vaddr(*pud);
28ee90fe Toshi Kani 2018-03-22 723
28ee90fe Toshi Kani 2018-03-22 724 for (i = 0; i < PTRS_PER_PMD; i++)
28ee90fe Toshi Kani 2018-03-22 @725 if (!pmd_free_pte_page(&pmd[i]))
28ee90fe Toshi Kani 2018-03-22 726 return 0;
28ee90fe Toshi Kani 2018-03-22 727
28ee90fe Toshi Kani 2018-03-22 728 pud_clear(pud);
28ee90fe Toshi Kani 2018-03-22 729 free_page((unsigned long)pmd);
28ee90fe Toshi Kani 2018-03-22 730
28ee90fe Toshi Kani 2018-03-22 731 return 1;
b6bdb751 Toshi Kani 2018-03-22 732 }
b6bdb751 Toshi Kani 2018-03-22 733
:::::: The code at line 725 was first introduced by commit
:::::: 28ee90fe6048fa7b7ceaeb8831c0e4e454a4cf89 x86/mm: implement free pmd/pte page interfaces
:::::: TO: Toshi Kani <[email protected]>
:::::: CC: Linus Torvalds <[email protected]>
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
On 3/28/2018 5:20 PM, kbuild test robot wrote:
> @725 if (!pmd_free_pte_page(&pmd[i]))
My bad ! Will fix this in v7
Chintan
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center,
Inc. is a member of the Code Aurora Forum, a Linux Foundation
Collaborative Project
Hi Chintan,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on v4.16-rc7]
[also build test WARNING on next-20180328]
[cannot apply to arm64/for-next/core tip/x86/core asm-generic/master]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/Chintan-Pandya/Fix-issues-with-huge-mapping-in-ioremap-for-ARM64/20180328-192254
config: x86_64-randconfig-x019-201812 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64
All warnings (new ones prefixed by >>):
In file included from include/asm-generic/bug.h:5:0,
from arch/x86/include/asm/bug.h:83,
from include/linux/bug.h:5,
from include/linux/mmdebug.h:5,
from include/linux/mm.h:9,
from arch/x86//mm/pgtable.c:2:
arch/x86//mm/pgtable.c: In function 'pud_free_pmd_page':
arch/x86//mm/pgtable.c:725:8: error: too few arguments to function 'pmd_free_pte_page'
if (!pmd_free_pte_page(&pmd[i]))
^
include/linux/compiler.h:58:30: note: in definition of macro '__trace_if'
if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
^~~~
>> arch/x86//mm/pgtable.c:725:3: note: in expansion of macro 'if'
if (!pmd_free_pte_page(&pmd[i]))
^~
In file included from arch/x86/include/asm/pgtable.h:1292:0,
from include/linux/memremap.h:8,
from include/linux/mm.h:27,
from arch/x86//mm/pgtable.c:2:
include/asm-generic/pgtable.h:987:5: note: declared here
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
^~~~~~~~~~~~~~~~~
In file included from include/asm-generic/bug.h:5:0,
from arch/x86/include/asm/bug.h:83,
from include/linux/bug.h:5,
from include/linux/mmdebug.h:5,
from include/linux/mm.h:9,
from arch/x86//mm/pgtable.c:2:
arch/x86//mm/pgtable.c:725:8: error: too few arguments to function 'pmd_free_pte_page'
if (!pmd_free_pte_page(&pmd[i]))
^
include/linux/compiler.h:58:42: note: in definition of macro '__trace_if'
if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
^~~~
>> arch/x86//mm/pgtable.c:725:3: note: in expansion of macro 'if'
if (!pmd_free_pte_page(&pmd[i]))
^~
In file included from arch/x86/include/asm/pgtable.h:1292:0,
from include/linux/memremap.h:8,
from include/linux/mm.h:27,
from arch/x86//mm/pgtable.c:2:
include/asm-generic/pgtable.h:987:5: note: declared here
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
^~~~~~~~~~~~~~~~~
In file included from include/asm-generic/bug.h:5:0,
from arch/x86/include/asm/bug.h:83,
from include/linux/bug.h:5,
from include/linux/mmdebug.h:5,
from include/linux/mm.h:9,
from arch/x86//mm/pgtable.c:2:
arch/x86//mm/pgtable.c:725:8: error: too few arguments to function 'pmd_free_pte_page'
if (!pmd_free_pte_page(&pmd[i]))
^
include/linux/compiler.h:69:16: note: in definition of macro '__trace_if'
______r = !!(cond); \
^~~~
>> arch/x86//mm/pgtable.c:725:3: note: in expansion of macro 'if'
if (!pmd_free_pte_page(&pmd[i]))
^~
In file included from arch/x86/include/asm/pgtable.h:1292:0,
from include/linux/memremap.h:8,
from include/linux/mm.h:27,
from arch/x86//mm/pgtable.c:2:
include/asm-generic/pgtable.h:987:5: note: declared here
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
^~~~~~~~~~~~~~~~~
vim +/if +725 arch/x86//mm/pgtable.c
b6bdb751 Toshi Kani 2018-03-22 705
b6bdb751 Toshi Kani 2018-03-22 706 /**
b6bdb751 Toshi Kani 2018-03-22 707 * pud_free_pmd_page - Clear pud entry and free pmd page.
b6bdb751 Toshi Kani 2018-03-22 708 * @pud: Pointer to a PUD.
5b7ee34c Chintan Pandya 2018-03-27 709 * @addr: Virtual address associated with pud.
b6bdb751 Toshi Kani 2018-03-22 710 *
b6bdb751 Toshi Kani 2018-03-22 711 * Context: The pud range has been unmaped and TLB purged.
b6bdb751 Toshi Kani 2018-03-22 712 * Return: 1 if clearing the entry succeeded. 0 otherwise.
b6bdb751 Toshi Kani 2018-03-22 713 */
5b7ee34c Chintan Pandya 2018-03-27 714 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
b6bdb751 Toshi Kani 2018-03-22 715 {
28ee90fe Toshi Kani 2018-03-22 716 pmd_t *pmd;
28ee90fe Toshi Kani 2018-03-22 717 int i;
28ee90fe Toshi Kani 2018-03-22 718
28ee90fe Toshi Kani 2018-03-22 719 if (pud_none(*pud))
28ee90fe Toshi Kani 2018-03-22 720 return 1;
28ee90fe Toshi Kani 2018-03-22 721
28ee90fe Toshi Kani 2018-03-22 722 pmd = (pmd_t *)pud_page_vaddr(*pud);
28ee90fe Toshi Kani 2018-03-22 723
28ee90fe Toshi Kani 2018-03-22 724 for (i = 0; i < PTRS_PER_PMD; i++)
28ee90fe Toshi Kani 2018-03-22 @725 if (!pmd_free_pte_page(&pmd[i]))
28ee90fe Toshi Kani 2018-03-22 726 return 0;
28ee90fe Toshi Kani 2018-03-22 727
28ee90fe Toshi Kani 2018-03-22 728 pud_clear(pud);
28ee90fe Toshi Kani 2018-03-22 729 free_page((unsigned long)pmd);
28ee90fe Toshi Kani 2018-03-22 730
28ee90fe Toshi Kani 2018-03-22 731 return 1;
b6bdb751 Toshi Kani 2018-03-22 732 }
b6bdb751 Toshi Kani 2018-03-22 733
:::::: The code at line 725 was first introduced by commit
:::::: 28ee90fe6048fa7b7ceaeb8831c0e4e454a4cf89 x86/mm: implement free pmd/pte page interfaces
:::::: TO: Toshi Kani <[email protected]>
:::::: CC: Linus Torvalds <[email protected]>
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation