These series of patches are follow up work (and depends on)
Toshi Kani <[email protected]>'s patches "fix memory leak/
panic in ioremap huge pages".
IOREMAP code has been touched up to honor BBM which is
requirement for some arch (like arm64) and works well
with all other.
Chintan Pandya (4):
asm/tlbflush: Add flush_tlb_pgtable() for ARM64
ioremap: Implement TLB_INV before huge mapping
arm64: Implement page table free interfaces
Revert "arm64: Enforce BBM for huge IO/VMAP mappings"
From V1->V2:
- Rebased my patches on top of "[PATCH v2 1/2] mm/vmalloc:
Add interfaces to free unmapped page table"
- Honored BBM for ARM64
arch/arm64/include/asm/tlbflush.h | 5 +++++
arch/arm64/mm/mmu.c | 28 ++++++++++++++++++----------
include/asm-generic/tlb.h | 6 ++++++
lib/ioremap.c | 25 +++++++++++++++++++------
4 files changed, 48 insertions(+), 16 deletions(-)
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
ARM64 MMU implements invalidation of TLB for
intermediate page tables for perticular VA. This
may or may not be available for other arch. So,
provide this API hook only for ARM64, for now.
Signed-off-by: Chintan Pandya <[email protected]>
---
arch/arm64/include/asm/tlbflush.h | 5 +++++
include/asm-generic/tlb.h | 6 ++++++
2 files changed, 11 insertions(+)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 9e82dd7..5f656f0 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -209,6 +209,11 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
dsb(ish);
}
+static inline void flush_tlb_pgtable(struct mm_struct *mm,
+ unsigned long uaddr)
+{
+ __flush_tlb_pgtable(mm, uaddr);
+}
#endif
#endif
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index faddde4..7832c0a 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -295,4 +295,10 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_migrate_finish(mm) do {} while (0)
+#ifndef CONFIG_ARM64
+static inline void flush_tlb_pgtable(struct mm_struct *mm,
+ unsigned long uaddr)
+{
+}
+#endif
#endif /* _ASM_GENERIC__TLB_H */
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
Huge mapping changes PMD/PUD which could have
valid previous entries. This requires proper
TLB maintanance on some architectures, like
ARM64.
Implent BBM (break-before-make) safe TLB
invalidation.
Here, I've used flush_tlb_pgtable() instead
of flush_kernel_range() because invalidating
intermediate page_table entries could have
been optimized for specific arch. That's the
case with ARM64 at least.
Signed-off-by: Chintan Pandya <[email protected]>
---
lib/ioremap.c | 25 +++++++++++++++++++------
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 54e5bba..55f8648 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
+#include <asm-generic/tlb.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
static int __read_mostly ioremap_p4d_capable;
@@ -80,6 +81,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pmd_t *pmd;
+ pmd_t old_pmd;
unsigned long next;
phys_addr -= addr;
@@ -91,10 +93,15 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
- IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
- pmd_free_pte_page(pmd)) {
- if (pmd_set_huge(pmd, phys_addr + addr, prot))
+ IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
+ old_pmd = *pmd;
+ pmd_clear(pmd);
+ flush_tlb_pgtable(&init_mm, addr);
+ if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
+ pmd_free_pte_page(&old_pmd);
continue;
+ } else
+ set_pmd(pmd, old_pmd);
}
if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
@@ -107,6 +114,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pud_t *pud;
+ pud_t old_pud;
unsigned long next;
phys_addr -= addr;
@@ -118,10 +126,15 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
- IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
- pud_free_pmd_page(pud)) {
- if (pud_set_huge(pud, phys_addr + addr, prot))
+ IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
+ old_pud = *pud;
+ pud_clear(pud);
+ flush_tlb_pgtable(&init_mm, addr);
+ if (pud_set_huge(pud, phys_addr + addr, prot)) {
+ pud_free_pmd_page(&old_pud);
continue;
+ } else
+ set_pud(pud, old_pud);
}
if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
This commit 15122ee2c515a ("arm64: Enforce BBM for huge
IO/VMAP mappings") is a temporary work-around until the
issues with CONFIG_HAVE_ARCH_HUGE_VMAP gets fixed.
Revert this change as we have fixes for the issue.
Signed-off-by: Chintan Pandya <[email protected]>
---
arch/arm64/mm/mmu.c | 8 --------
1 file changed, 8 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6f21a65..a1b8073 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -936,10 +936,6 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot)));
- /* ioremap_page_range doesn't honour BBM */
- if (pud_present(READ_ONCE(*pudp)))
- return 0;
-
BUG_ON(phys & ~PUD_MASK);
set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
return 1;
@@ -950,10 +946,6 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot)));
- /* ioremap_page_range doesn't honour BBM */
- if (pmd_present(READ_ONCE(*pmdp)))
- return 0;
-
BUG_ON(phys & ~PMD_MASK);
set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
return 1;
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
Implement pud_free_pmd_page() and pmd_free_pte_page().
Make sure, that they are indeed a page table before
taking them to free.
Signed-off-by: Chintan Pandya <[email protected]>
---
arch/arm64/mm/mmu.c | 20 ++++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 2dbb2c9..6f21a65 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <linux/hugetlb.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -45,6 +46,7 @@
#include <asm/memblock.h>
#include <asm/mmu_context.h>
#include <asm/ptdump.h>
+#include <asm/page.h>
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
@@ -975,10 +977,24 @@ int pmd_clear_huge(pmd_t *pmdp)
int pud_free_pmd_page(pud_t *pud)
{
- return pud_none(*pud);
+ pmd_t *pmd;
+ int i;
+
+ pmd = __va(pud_val(*pud));
+ if (pud_val(*pud) && !pud_huge(*pud)) {
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ pmd_free_pte_page(&pmd[i]);
+
+ free_page((unsigned long)pmd);
+ }
+
+ return 1;
}
int pmd_free_pte_page(pmd_t *pmd)
{
- return pmd_none(*pmd);
+ if (pmd_val(*pmd) && !pmd_huge(*pmd))
+ free_page((unsigned long)__va(pmd_val(*pmd)));
+
+ return 1;
}
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project
Hi,
As a general note, pleas wrap commit text to 72 characters.
On Thu, Mar 15, 2018 at 06:15:04PM +0530, Chintan Pandya wrote:
> Huge mapping changes PMD/PUD which could have
> valid previous entries. This requires proper
> TLB maintanance on some architectures, like
> ARM64.
Just to check, I take it that you mean we could have a valid table
entry, but all the entries in that next level table must be invalid,
right?
>
> Implent BBM (break-before-make) safe TLB
> invalidation.
>
> Here, I've used flush_tlb_pgtable() instead
> of flush_kernel_range() because invalidating
> intermediate page_table entries could have
> been optimized for specific arch. That's the
> case with ARM64 at least.
... because if there are valid entries in the next level table,
__flush_tlb_pgtable() is not sufficient to ensure all of these are
removed from the TLB.
Assuming that all entries in the next level table are invalid, this
looks ok to me.
Thanks,
Mark.
> Signed-off-by: Chintan Pandya <[email protected]>
> ---
> lib/ioremap.c | 25 +++++++++++++++++++------
> 1 file changed, 19 insertions(+), 6 deletions(-)
>
> diff --git a/lib/ioremap.c b/lib/ioremap.c
> index 54e5bba..55f8648 100644
> --- a/lib/ioremap.c
> +++ b/lib/ioremap.c
> @@ -13,6 +13,7 @@
> #include <linux/export.h>
> #include <asm/cacheflush.h>
> #include <asm/pgtable.h>
> +#include <asm-generic/tlb.h>
>
> #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
> static int __read_mostly ioremap_p4d_capable;
> @@ -80,6 +81,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
> unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
> {
> pmd_t *pmd;
> + pmd_t old_pmd;
> unsigned long next;
>
> phys_addr -= addr;
> @@ -91,10 +93,15 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
>
> if (ioremap_pmd_enabled() &&
> ((next - addr) == PMD_SIZE) &&
> - IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
> - pmd_free_pte_page(pmd)) {
> - if (pmd_set_huge(pmd, phys_addr + addr, prot))
> + IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
> + old_pmd = *pmd;
> + pmd_clear(pmd);
> + flush_tlb_pgtable(&init_mm, addr);
> + if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
> + pmd_free_pte_page(&old_pmd);
> continue;
> + } else
> + set_pmd(pmd, old_pmd);
> }
>
> if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
> @@ -107,6 +114,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
> unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
> {
> pud_t *pud;
> + pud_t old_pud;
> unsigned long next;
>
> phys_addr -= addr;
> @@ -118,10 +126,15 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
>
> if (ioremap_pud_enabled() &&
> ((next - addr) == PUD_SIZE) &&
> - IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
> - pud_free_pmd_page(pud)) {
> - if (pud_set_huge(pud, phys_addr + addr, prot))
> + IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
> + old_pud = *pud;
> + pud_clear(pud);
> + flush_tlb_pgtable(&init_mm, addr);
> + if (pud_set_huge(pud, phys_addr + addr, prot)) {
> + pud_free_pmd_page(&old_pud);
> continue;
> + } else
> + set_pud(pud, old_pud);
> }
>
> if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
> --
> Qualcomm India Private Limited, on behalf of Qualcomm Innovation
> Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
> Collaborative Project
>
On Thu, Mar 15, 2018 at 06:15:05PM +0530, Chintan Pandya wrote:
> Implement pud_free_pmd_page() and pmd_free_pte_page().
>
> Make sure, that they are indeed a page table before
> taking them to free.
As mentioned on the prior patch, if the tables we're freeing contain
valid entries, then we need additional TLB maintenance to ensure that
all of these entries have been removed from TLBs.
Either, we always invalidate the entire range, or we walk the tables
and invalidate as we remove them.
Thanks,
Mark.
>
> Signed-off-by: Chintan Pandya <[email protected]>
> ---
> arch/arm64/mm/mmu.c | 20 ++++++++++++++++++--
> 1 file changed, 18 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 2dbb2c9..6f21a65 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -32,6 +32,7 @@
> #include <linux/io.h>
> #include <linux/mm.h>
> #include <linux/vmalloc.h>
> +#include <linux/hugetlb.h>
>
> #include <asm/barrier.h>
> #include <asm/cputype.h>
> @@ -45,6 +46,7 @@
> #include <asm/memblock.h>
> #include <asm/mmu_context.h>
> #include <asm/ptdump.h>
> +#include <asm/page.h>
>
> #define NO_BLOCK_MAPPINGS BIT(0)
> #define NO_CONT_MAPPINGS BIT(1)
> @@ -975,10 +977,24 @@ int pmd_clear_huge(pmd_t *pmdp)
>
> int pud_free_pmd_page(pud_t *pud)
> {
> - return pud_none(*pud);
> + pmd_t *pmd;
> + int i;
> +
> + pmd = __va(pud_val(*pud));
> + if (pud_val(*pud) && !pud_huge(*pud)) {
> + for (i = 0; i < PTRS_PER_PMD; i++)
> + pmd_free_pte_page(&pmd[i]);
> +
> + free_page((unsigned long)pmd);
> + }
> +
> + return 1;
> }
>
> int pmd_free_pte_page(pmd_t *pmd)
> {
> - return pmd_none(*pmd);
> + if (pmd_val(*pmd) && !pmd_huge(*pmd))
> + free_page((unsigned long)__va(pmd_val(*pmd)));
> +
> + return 1;
> }
> --
> Qualcomm India Private Limited, on behalf of Qualcomm Innovation
> Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
> Collaborative Project
>
On 3/15/2018 6:43 PM, Mark Rutland wrote:
> Hi,
>
> As a general note, pleas wrap commit text to 72 characters.
>
> On Thu, Mar 15, 2018 at 06:15:04PM +0530, Chintan Pandya wrote:
>> Huge mapping changes PMD/PUD which could have
>> valid previous entries. This requires proper
>> TLB maintanance on some architectures, like
>> ARM64.
>
> Just to check, I take it that you mean we could have a valid table
> entry, but all the entries in that next level table must be invalid,
> right?
That was my assumption but my assumption can be wrong if any VA gets
block mapping for 1G directly (instead of the 2M cases we discussed
so far), then this would go for a toss.
>
>>
>> Implent BBM (break-before-make) safe TLB
>> invalidation.
>>
>> Here, I've used flush_tlb_pgtable() instead
>> of flush_kernel_range() because invalidating
>> intermediate page_table entries could have
>> been optimized for specific arch. That's the
>> case with ARM64 at least.
>
> ... because if there are valid entries in the next level table,
> __flush_tlb_pgtable() is not sufficient to ensure all of these are
> removed from the TLB.
oh !! In case of huge_pgd, next level pmd may or may not be valid. So,
better I be using flush_kernel_range()
I will upload v3. But, would wait for other comments...
>
> Assuming that all entries in the next level table are invalid, this
> looks ok to me.
>
> Thanks,
> Mark.
>
>> Signed-off-by: Chintan Pandya <[email protected]>
>> ---
>> lib/ioremap.c | 25 +++++++++++++++++++------
>> 1 file changed, 19 insertions(+), 6 deletions(-)
>>
>> diff --git a/lib/ioremap.c b/lib/ioremap.c
>> index 54e5bba..55f8648 100644
>> --- a/lib/ioremap.c
>> +++ b/lib/ioremap.c
>> @@ -13,6 +13,7 @@
>> #include <linux/export.h>
>> #include <asm/cacheflush.h>
>> #include <asm/pgtable.h>
>> +#include <asm-generic/tlb.h>
>>
>> #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
>> static int __read_mostly ioremap_p4d_capable;
>> @@ -80,6 +81,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
>> unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
>> {
>> pmd_t *pmd;
>> + pmd_t old_pmd;
>> unsigned long next;
>>
>> phys_addr -= addr;
>> @@ -91,10 +93,15 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
>>
>> if (ioremap_pmd_enabled() &&
>> ((next - addr) == PMD_SIZE) &&
>> - IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
>> - pmd_free_pte_page(pmd)) {
>> - if (pmd_set_huge(pmd, phys_addr + addr, prot))
>> + IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
>> + old_pmd = *pmd;
>> + pmd_clear(pmd);
>> + flush_tlb_pgtable(&init_mm, addr);
>> + if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
>> + pmd_free_pte_page(&old_pmd);
>> continue;
>> + } else
>> + set_pmd(pmd, old_pmd);
>> }
>>
>> if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
>> @@ -107,6 +114,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
>> unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
>> {
>> pud_t *pud;
>> + pud_t old_pud;
>> unsigned long next;
>>
>> phys_addr -= addr;
>> @@ -118,10 +126,15 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
>>
>> if (ioremap_pud_enabled() &&
>> ((next - addr) == PUD_SIZE) &&
>> - IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
>> - pud_free_pmd_page(pud)) {
>> - if (pud_set_huge(pud, phys_addr + addr, prot))
>> + IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
>> + old_pud = *pud;
>> + pud_clear(pud);
>> + flush_tlb_pgtable(&init_mm, addr);
>> + if (pud_set_huge(pud, phys_addr + addr, prot)) {
>> + pud_free_pmd_page(&old_pud);
>> continue;
>> + } else
>> + set_pud(pud, old_pud);
>> }
>>
>> if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
>> --
>> Qualcomm India Private Limited, on behalf of Qualcomm Innovation
>> Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
>> Collaborative Project
>>
Chintan
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center,
Inc. is a member of the Code Aurora Forum, a Linux Foundation
Collaborative Project
On Thu, Mar 15, 2018 at 06:15:04PM +0530, Chintan Pandya wrote:
> @@ -91,10 +93,15 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
>
> if (ioremap_pmd_enabled() &&
> ((next - addr) == PMD_SIZE) &&
> - IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
> - pmd_free_pte_page(pmd)) {
> - if (pmd_set_huge(pmd, phys_addr + addr, prot))
> + IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
> + old_pmd = *pmd;
> + pmd_clear(pmd);
> + flush_tlb_pgtable(&init_mm, addr);
> + if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
> + pmd_free_pte_page(&old_pmd);
> continue;
> + } else
> + set_pmd(pmd, old_pmd);
> }
>
Can we have something like a pmd_can_set_huge() helper? Then we could
avoid pointless modification and TLB invalidation work when
pmd_set_huge() will fail.
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
pmd_can_set_huge(pmd, phys_addr + addr, prot)) {
// clear entries, invalidate TLBs, and free tables
...
continue;
}
Thanks,
MArk.
On 3/15/2018 7:01 PM, Mark Rutland wrote:
> On Thu, Mar 15, 2018 at 06:15:04PM +0530, Chintan Pandya wrote:
>> @@ -91,10 +93,15 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
>>
>> if (ioremap_pmd_enabled() &&
>> ((next - addr) == PMD_SIZE) &&
>> - IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
>> - pmd_free_pte_page(pmd)) {
>> - if (pmd_set_huge(pmd, phys_addr + addr, prot))
>> + IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
>> + old_pmd = *pmd;
>> + pmd_clear(pmd);
>> + flush_tlb_pgtable(&init_mm, addr);
>> + if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
>> + pmd_free_pte_page(&old_pmd);
>> continue;
>> + } else
>> + set_pmd(pmd, old_pmd);
>> }
>>
>
> Can we have something like a pmd_can_set_huge() helper? Then we could
> avoid pointless modification and TLB invalidation work when
> pmd_set_huge() will fail.
Actually, pmd_set_huge() will never fail because, if
CONFIG_HAVE_ARCH_HUGE_VMAP is disabled, ioremap_pmd_enabled()
will fail and if enabled (i.e. ARM64 & x86), they don't fail
in their implementation. So, rather we can do the following.
- if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
- pmd_free_pte_page(&old_pmd);
- continue;
- } else
- set_pmd(pmd, old_pmd);
+ pmd_set_huge(pmd, phys_addr + addr, prot)
+ pmd_free_pte_page(&old_pmd);
+ continue;
>
> if (ioremap_pmd_enabled() &&
> ((next - addr) == PMD_SIZE) &&
> IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
> pmd_can_set_huge(pmd, phys_addr + addr, prot)) {
> // clear entries, invalidate TLBs, and free tables
> ...
> continue;
>
> }
>
> Thanks,
> MArk.
>
> _______________________________________________
> linux-arm-kernel mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
>
Chintan
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center,
Inc. is a member of the Code Aurora Forum, a Linux Foundation
Collaborative Project
On Thu, Mar 15, 2018 at 06:55:32PM +0530, Chintan Pandya wrote:
> On 3/15/2018 6:43 PM, Mark Rutland wrote:
> > On Thu, Mar 15, 2018 at 06:15:04PM +0530, Chintan Pandya wrote:
> > > Huge mapping changes PMD/PUD which could have
> > > valid previous entries. This requires proper
> > > TLB maintanance on some architectures, like
> > > ARM64.
> >
> > Just to check, I take it that you mean we could have a valid table
> > entry, but all the entries in that next level table must be invalid,
> > right?
>
> That was my assumption but my assumption can be wrong if any VA gets
> block mapping for 1G directly (instead of the 2M cases we discussed
> so far), then this would go for a toss.
Ok. Just considering the 4K -> 2M case, is that an assumption, or a
guarantee?
Thanks,
Mark.
On Thu, Mar 15, 2018 at 07:49:01PM +0530, Chintan Pandya wrote:
> On 3/15/2018 7:01 PM, Mark Rutland wrote:
> > On Thu, Mar 15, 2018 at 06:15:04PM +0530, Chintan Pandya wrote:
> > > @@ -91,10 +93,15 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
> > > if (ioremap_pmd_enabled() &&
> > > ((next - addr) == PMD_SIZE) &&
> > > - IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
> > > - pmd_free_pte_page(pmd)) {
> > > - if (pmd_set_huge(pmd, phys_addr + addr, prot))
> > > + IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
> > > + old_pmd = *pmd;
> > > + pmd_clear(pmd);
> > > + flush_tlb_pgtable(&init_mm, addr);
> > > + if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
> > > + pmd_free_pte_page(&old_pmd);
> > > continue;
> > > + } else
> > > + set_pmd(pmd, old_pmd);
> > > }
> >
> > Can we have something like a pmd_can_set_huge() helper? Then we could
> > avoid pointless modification and TLB invalidation work when
> > pmd_set_huge() will fail.
>
> Actually, pmd_set_huge() will never fail because, if
> CONFIG_HAVE_ARCH_HUGE_VMAP is disabled, ioremap_pmd_enabled()
> will fail and if enabled (i.e. ARM64 & x86), they don't fail
> in their implementation. So, rather we can do the following.
AFAICT, that's not true. The x86 pmd_set_huge() can fail under certain
conditions:
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{
u8 mtrr, uniform;
mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
(mtrr != MTRR_TYPE_WRBACK)) {
pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
__func__, addr, addr + PMD_SIZE);
return 0;
}
prot = pgprot_4k_2_large(prot);
set_pte((pte_t *)pmd, pfn_pte(
(u64)addr >> PAGE_SHIFT,
__pgprot(pgprot_val(prot) | _PAGE_PSE)));
return 1;
}
... perhaps that can never happen in this particular case, but that's
not clear to me.
Thanks,
Mark.
On Thu, 2018-03-15 at 18:15 +0530, Chintan Pandya wrote:
> Huge mapping changes PMD/PUD which could have
> valid previous entries. This requires proper
> TLB maintanance on some architectures, like
> ARM64.
>
> Implent BBM (break-before-make) safe TLB
> invalidation.
>
> Here, I've used flush_tlb_pgtable() instead
> of flush_kernel_range() because invalidating
> intermediate page_table entries could have
> been optimized for specific arch. That's the
> case with ARM64 at least.
>
> Signed-off-by: Chintan Pandya <[email protected]>
> ---
> lib/ioremap.c | 25 +++++++++++++++++++------
> 1 file changed, 19 insertions(+), 6 deletions(-)
>
> diff --git a/lib/ioremap.c b/lib/ioremap.c
> index 54e5bba..55f8648 100644
> --- a/lib/ioremap.c
> +++ b/lib/ioremap.c
> @@ -13,6 +13,7 @@
> #include <linux/export.h>
> #include <asm/cacheflush.h>
> #include <asm/pgtable.h>
> +#include <asm-generic/tlb.h>
>
> #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
> static int __read_mostly ioremap_p4d_capable;
> @@ -80,6 +81,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
> unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
> {
> pmd_t *pmd;
> + pmd_t old_pmd;
> unsigned long next;
>
> phys_addr -= addr;
> @@ -91,10 +93,15 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
>
> if (ioremap_pmd_enabled() &&
> ((next - addr) == PMD_SIZE) &&
> - IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
> - pmd_free_pte_page(pmd)) {
> - if (pmd_set_huge(pmd, phys_addr + addr, prot))
> + IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
> + old_pmd = *pmd;
> + pmd_clear(pmd);
pmd_clear() is one of the operations pmd_free_pte_page() needs to do.
See the x86 version.
> + flush_tlb_pgtable(&init_mm, addr);
You can call it in pmd_free_pte_page() on arm64 as well.
> + if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
> + pmd_free_pte_page(&old_pmd);
> continue;
> + } else
> + set_pmd(pmd, old_pmd);
I do not understand why you needed to make this change.
pmd_free_pte_page() is defined as an arch-specific function so that you
can additionally perform TLB purges on arm64. Please try to make proper
arm64 implementation of this interface. And if you find any issue in
this interface, please let me know.
Same for pud.
Thanks,
-Toshi
On 3/15/2018 8:46 PM, Mark Rutland wrote:
> On Thu, Mar 15, 2018 at 06:55:32PM +0530, Chintan Pandya wrote:
>> On 3/15/2018 6:43 PM, Mark Rutland wrote:
>>> On Thu, Mar 15, 2018 at 06:15:04PM +0530, Chintan Pandya wrote:
>>>> Huge mapping changes PMD/PUD which could have
>>>> valid previous entries. This requires proper
>>>> TLB maintanance on some architectures, like
>>>> ARM64.
>>>
>>> Just to check, I take it that you mean we could have a valid table
>>> entry, but all the entries in that next level table must be invalid,
>>> right?
>>
>> That was my assumption but my assumption can be wrong if any VA gets
>> block mapping for 1G directly (instead of the 2M cases we discussed
>> so far), then this would go for a toss.
>
> Ok. Just considering the 4K -> 2M case, is that an assumption, or a
> guarantee?
For 4K->2M case, that's confirmed. I mean, while mapping 2M, all the
next level entries will be unmapped and cleared. That gets ensured
before we land to page table code. But if someone calls these page table
APIs directly without respecting previous mappings, we will not hit
BUG_ON() anywhere but a crash later in unfamiliar situations. But that's
the wrong thing to do.
>
> Thanks,
> Mark.
>
Chintan
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center,
Inc. is a member of the Code Aurora Forum, a Linux Foundation
Collaborative Project
On 3/15/2018 9:42 PM, Kani, Toshi wrote:
> On Thu, 2018-03-15 at 18:15 +0530, Chintan Pandya wrote:
>> Huge mapping changes PMD/PUD which could have
>> valid previous entries. This requires proper
>> TLB maintanance on some architectures, like
>> ARM64.
>>
>> Implent BBM (break-before-make) safe TLB
>> invalidation.
>>
>> Here, I've used flush_tlb_pgtable() instead
>> of flush_kernel_range() because invalidating
>> intermediate page_table entries could have
>> been optimized for specific arch. That's the
>> case with ARM64 at least.
>>
>> Signed-off-by: Chintan Pandya <[email protected]>
>> ---
>> lib/ioremap.c | 25 +++++++++++++++++++------
>> 1 file changed, 19 insertions(+), 6 deletions(-)
>>
>> diff --git a/lib/ioremap.c b/lib/ioremap.c
>> index 54e5bba..55f8648 100644
>> --- a/lib/ioremap.c
>> +++ b/lib/ioremap.c
>> @@ -13,6 +13,7 @@
>> #include <linux/export.h>
>> #include <asm/cacheflush.h>
>> #include <asm/pgtable.h>
>> +#include <asm-generic/tlb.h>
>>
>> #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
>> static int __read_mostly ioremap_p4d_capable;
>> @@ -80,6 +81,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
>> unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
>> {
>> pmd_t *pmd;
>> + pmd_t old_pmd;
>> unsigned long next;
>>
>> phys_addr -= addr;
>> @@ -91,10 +93,15 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
>>
>> if (ioremap_pmd_enabled() &&
>> ((next - addr) == PMD_SIZE) &&
>> - IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
>> - pmd_free_pte_page(pmd)) {
>> - if (pmd_set_huge(pmd, phys_addr + addr, prot))
>> + IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
>> + old_pmd = *pmd;
>> + pmd_clear(pmd);
>
> pmd_clear() is one of the operations pmd_free_pte_page() needs to do.
> See the x86 version.
>
>> + flush_tlb_pgtable(&init_mm, addr);
>
> You can call it in pmd_free_pte_page() on arm64 as well.
>
>> + if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
>> + pmd_free_pte_page(&old_pmd);
>> continue;
>> + } else
>> + set_pmd(pmd, old_pmd);
>
> I do not understand why you needed to make this change.
> pmd_free_pte_page() is defined as an arch-specific function so that you
> can additionally perform TLB purges on arm64. Please try to make proper
> arm64 implementation of this interface. And if you find any issue in
> this interface, please let me know.
TLB ops require VA at least. And this interface passes just the PMD/PUD.
Second is, if we clear the previous table entry inside the arch specific
code and then we fail in pmd/pud_set_huge, we can't fallback (x86 case).
So, we can do something like this (following Mark's suggestion),
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
pmd_can_set_huge(pmd, phys_addr + addr, prot)) {
/*
* Clear existing table entry,
* Invalidate,
* Free the page table
* inside this code
*/
pmd_free_pte_page(pmd, addr, addr + PMD_SIZE);
pmd_set_huge(...) //without fail
continue;
}
>
> Same for pud.
>
> Thanks,
> -Toshi
>
Chintan
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center,
Inc. is a member of the Code Aurora Forum, a Linux Foundation
Collaborative Project
On Fri, 2018-03-16 at 13:10 +0530, Chintan Pandya wrote:
>
> On 3/15/2018 9:42 PM, Kani, Toshi wrote:
> > On Thu, 2018-03-15 at 18:15 +0530, Chintan Pandya wrote:
> > > Huge mapping changes PMD/PUD which could have
> > > valid previous entries. This requires proper
> > > TLB maintanance on some architectures, like
> > > ARM64.
> > >
> > > Implent BBM (break-before-make) safe TLB
> > > invalidation.
> > >
> > > Here, I've used flush_tlb_pgtable() instead
> > > of flush_kernel_range() because invalidating
> > > intermediate page_table entries could have
> > > been optimized for specific arch. That's the
> > > case with ARM64 at least.
> > >
> > > Signed-off-by: Chintan Pandya <[email protected]>
> > > ---
> > > lib/ioremap.c | 25 +++++++++++++++++++------
> > > 1 file changed, 19 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/lib/ioremap.c b/lib/ioremap.c
> > > index 54e5bba..55f8648 100644
> > > --- a/lib/ioremap.c
> > > +++ b/lib/ioremap.c
> > > @@ -13,6 +13,7 @@
> > > #include <linux/export.h>
> > > #include <asm/cacheflush.h>
> > > #include <asm/pgtable.h>
> > > +#include <asm-generic/tlb.h>
> > >
> > > #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
> > > static int __read_mostly ioremap_p4d_capable;
> > > @@ -80,6 +81,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
> > > unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
> > > {
> > > pmd_t *pmd;
> > > + pmd_t old_pmd;
> > > unsigned long next;
> > >
> > > phys_addr -= addr;
> > > @@ -91,10 +93,15 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
> > >
> > > if (ioremap_pmd_enabled() &&
> > > ((next - addr) == PMD_SIZE) &&
> > > - IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
> > > - pmd_free_pte_page(pmd)) {
> > > - if (pmd_set_huge(pmd, phys_addr + addr, prot))
> > > + IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
> > > + old_pmd = *pmd;
> > > + pmd_clear(pmd);
> >
> > pmd_clear() is one of the operations pmd_free_pte_page() needs to do.
> > See the x86 version.
> >
> > > + flush_tlb_pgtable(&init_mm, addr);
> >
> > You can call it in pmd_free_pte_page() on arm64 as well.
> >
> > > + if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
> > > + pmd_free_pte_page(&old_pmd);
> > > continue;
> > > + } else
> > > + set_pmd(pmd, old_pmd);
> >
> > I do not understand why you needed to make this change.
> > pmd_free_pte_page() is defined as an arch-specific function so that you
> > can additionally perform TLB purges on arm64. Please try to make proper
> > arm64 implementation of this interface. And if you find any issue in
> > this interface, please let me know.
>
> TLB ops require VA at least. And this interface passes just the PMD/PUD.
You can add 'addr' as the 2nd arg. Such minor tweak is expected when
implementing on multiple arches.
> Second is, if we clear the previous table entry inside the arch specific
> code and then we fail in pmd/pud_set_huge, we can't fallback (x86 case).
>
> So, we can do something like this (following Mark's suggestion),
>
> if (ioremap_pmd_enabled() &&
> ((next - addr) == PMD_SIZE) &&
> IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
> pmd_can_set_huge(pmd, phys_addr + addr, prot)) {
> /*
> * Clear existing table entry,
> * Invalidate,
> * Free the page table
> * inside this code
> */
> pmd_free_pte_page(pmd, addr, addr + PMD_SIZE);
> pmd_set_huge(...) //without fail
> continue;
> }
That's not necessary. pmd being none is a legitimate state. In fact,
it is the case when pmd_alloc() allocated and populated a new pmd.
Thanks,
-Toshi
On 3/16/2018 8:20 PM, Kani, Toshi wrote:
> On Fri, 2018-03-16 at 13:10 +0530, Chintan Pandya wrote:
>>
>> On 3/15/2018 9:42 PM, Kani, Toshi wrote:
>>> On Thu, 2018-03-15 at 18:15 +0530, Chintan Pandya wrote:
>>>> Huge mapping changes PMD/PUD which could have
>>>> valid previous entries. This requires proper
>>>> TLB maintanance on some architectures, like
>>>> ARM64.
>>>>
>>>> Implent BBM (break-before-make) safe TLB
>>>> invalidation.
>>>>
>>>> Here, I've used flush_tlb_pgtable() instead
>>>> of flush_kernel_range() because invalidating
>>>> intermediate page_table entries could have
>>>> been optimized for specific arch. That's the
>>>> case with ARM64 at least.
>>>>
>>>> Signed-off-by: Chintan Pandya <[email protected]>
>>>> ---
>>>> lib/ioremap.c | 25 +++++++++++++++++++------
>>>> 1 file changed, 19 insertions(+), 6 deletions(-)
>>>>
>>>> diff --git a/lib/ioremap.c b/lib/ioremap.c
>>>> index 54e5bba..55f8648 100644
>>>> --- a/lib/ioremap.c
>>>> +++ b/lib/ioremap.c
>>>> @@ -13,6 +13,7 @@
>>>> #include <linux/export.h>
>>>> #include <asm/cacheflush.h>
>>>> #include <asm/pgtable.h>
>>>> +#include <asm-generic/tlb.h>
>>>>
>>>> #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
>>>> static int __read_mostly ioremap_p4d_capable;
>>>> @@ -80,6 +81,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
>>>> unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
>>>> {
>>>> pmd_t *pmd;
>>>> + pmd_t old_pmd;
>>>> unsigned long next;
>>>>
>>>> phys_addr -= addr;
>>>> @@ -91,10 +93,15 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
>>>>
>>>> if (ioremap_pmd_enabled() &&
>>>> ((next - addr) == PMD_SIZE) &&
>>>> - IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
>>>> - pmd_free_pte_page(pmd)) {
>>>> - if (pmd_set_huge(pmd, phys_addr + addr, prot))
>>>> + IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
>>>> + old_pmd = *pmd;
>>>> + pmd_clear(pmd);
>>>
>>> pmd_clear() is one of the operations pmd_free_pte_page() needs to do.
>>> See the x86 version.
>>>
>>>> + flush_tlb_pgtable(&init_mm, addr);
>>>
>>> You can call it in pmd_free_pte_page() on arm64 as well.
>>>
>>>> + if (pmd_set_huge(pmd, phys_addr + addr, prot)) {
>>>> + pmd_free_pte_page(&old_pmd);
>>>> continue;
>>>> + } else
>>>> + set_pmd(pmd, old_pmd);
>>>
>>> I do not understand why you needed to make this change.
>>> pmd_free_pte_page() is defined as an arch-specific function so that you
>>> can additionally perform TLB purges on arm64. Please try to make proper
>>> arm64 implementation of this interface. And if you find any issue in
>>> this interface, please let me know.
>>
>> TLB ops require VA at least. And this interface passes just the PMD/PUD.
>
> You can add 'addr' as the 2nd arg. Such minor tweak is expected when
> implementing on multiple arches.
>
>> Second is, if we clear the previous table entry inside the arch specific
>> code and then we fail in pmd/pud_set_huge, we can't fallback (x86 case).
>>
>> So, we can do something like this (following Mark's suggestion),
>>
>> if (ioremap_pmd_enabled() &&
>> ((next - addr) == PMD_SIZE) &&
>> IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
>> pmd_can_set_huge(pmd, phys_addr + addr, prot)) {
>> /*
>> * Clear existing table entry,
>> * Invalidate,
>> * Free the page table
>> * inside this code
>> */
>> pmd_free_pte_page(pmd, addr, addr + PMD_SIZE);
>> pmd_set_huge(...) //without fail
>> continue;
>> }
>
> That's not necessary. pmd being none is a legitimate state. In fact,
> it is the case when pmd_alloc() allocated and populated a new pmd.
Alright. I'll send v3 today.
>
> Thanks,
> -Toshi
>
Chintan
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center,
Inc. is a member of the Code Aurora Forum, a Linux Foundation
Collaborative Project
On 3/15/2018 6:48 PM, Mark Rutland wrote:
> On Thu, Mar 15, 2018 at 06:15:05PM +0530, Chintan Pandya wrote:
>> Implement pud_free_pmd_page() and pmd_free_pte_page().
>>
>> Make sure, that they are indeed a page table before
>> taking them to free.
>
> As mentioned on the prior patch, if the tables we're freeing contain
> valid entries, then we need additional TLB maintenance to ensure that
> all of these entries have been removed from TLBs.
>
> Either, we always invalidate the entire range, or we walk the tables
> and invalidate as we remove them.
Right. I'll send v3 and ensure this. Thinking like, we can
invalidate page table in PMD case and invalidate range if it's pud.
Will see if that also can be optimized.
>
> Thanks,
> Mark.
>
>>
>> Signed-off-by: Chintan Pandya <[email protected]>
>> ---
>> arch/arm64/mm/mmu.c | 20 ++++++++++++++++++--
>> 1 file changed, 18 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index 2dbb2c9..6f21a65 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -32,6 +32,7 @@
>> #include <linux/io.h>
>> #include <linux/mm.h>
>> #include <linux/vmalloc.h>
>> +#include <linux/hugetlb.h>
>>
>> #include <asm/barrier.h>
>> #include <asm/cputype.h>
>> @@ -45,6 +46,7 @@
>> #include <asm/memblock.h>
>> #include <asm/mmu_context.h>
>> #include <asm/ptdump.h>
>> +#include <asm/page.h>
>>
>> #define NO_BLOCK_MAPPINGS BIT(0)
>> #define NO_CONT_MAPPINGS BIT(1)
>> @@ -975,10 +977,24 @@ int pmd_clear_huge(pmd_t *pmdp)
>>
>> int pud_free_pmd_page(pud_t *pud)
>> {
>> - return pud_none(*pud);
>> + pmd_t *pmd;
>> + int i;
>> +
>> + pmd = __va(pud_val(*pud));
>> + if (pud_val(*pud) && !pud_huge(*pud)) {
>> + for (i = 0; i < PTRS_PER_PMD; i++)
>> + pmd_free_pte_page(&pmd[i]);
>> +
>> + free_page((unsigned long)pmd);
>> + }
>> +
>> + return 1;
>> }
>>
>> int pmd_free_pte_page(pmd_t *pmd)
>> {
>> - return pmd_none(*pmd);
>> + if (pmd_val(*pmd) && !pmd_huge(*pmd))
>> + free_page((unsigned long)__va(pmd_val(*pmd)));
>> +
>> + return 1;
>> }
>> --
>> Qualcomm India Private Limited, on behalf of Qualcomm Innovation
>> Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
>> Collaborative Project
>>
Chintan
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center,
Inc. is a member of the Code Aurora Forum, a Linux Foundation
Collaborative Project