This sets the HAVE_ARCH_HUGE_VMAP option. Enable pmd vmap support and
define the required page table functions(Currently, riscv has only
three-level page tables support for 64BIT).
Signed-off-by: Liu Shixin <[email protected]>
---
arch/riscv/Kconfig | 1 +
arch/riscv/include/asm/vmalloc.h | 12 ++++++++++
arch/riscv/mm/Makefile | 1 +
arch/riscv/mm/pgtable.c | 40 ++++++++++++++++++++++++++++++++
4 files changed, 54 insertions(+)
create mode 100644 arch/riscv/mm/pgtable.c
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 4c0bfb2569e9..fb3c48fa96c1 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -61,6 +61,7 @@ config RISCV
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if MMU && 64BIT
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
index ff9abc00d139..8f17f421f80c 100644
--- a/arch/riscv/include/asm/vmalloc.h
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -1,4 +1,16 @@
#ifndef _ASM_RISCV_VMALLOC_H
#define _ASM_RISCV_VMALLOC_H
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#define IOREMAP_MAX_ORDER (PMD_SHIFT)
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool __init arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return true;
+}
+
+#endif
+
#endif /* _ASM_RISCV_VMALLOC_H */
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 7ebaef10ea1b..f932b4d69946 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,6 +13,7 @@ obj-y += extable.o
obj-$(CONFIG_MMU) += fault.o pageattr.o
obj-y += cacheflush.o
obj-y += context.o
+obj-y += pgtable.o
ifeq ($(CONFIG_MMU),y)
obj-$(CONFIG_SMP) += tlbflush.o
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
new file mode 100644
index 000000000000..738dc6f3530f
--- /dev/null
+++ b/arch/riscv/mm/pgtable.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/pgalloc.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/pgtable.h>
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#ifndef __PAGETABLE_PMD_FOLDED
+int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+{
+ pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
+
+ set_pmd(pmd, new_pmd);
+ return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+ if (!pmd_leaf(READ_ONCE(*pmd)))
+ return 0;
+ pmd_clear(pmd);
+ return 1;
+}
+#endif
+
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ pte_t *pte;
+
+ pte = (pte_t *)pmd_page_vaddr(*pmd);
+ pmd_clear(pmd);
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+ pte_free_kernel(NULL, pte);
+ return 1;
+}
+
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
--
2.18.0.huawei.25
On Fri, 04 Jun 2021 22:48:37 PDT (-0700), [email protected] wrote:
> This sets the HAVE_ARCH_HUGE_VMAP option. Enable pmd vmap support and
> define the required page table functions(Currently, riscv has only
> three-level page tables support for 64BIT).
>
> Signed-off-by: Liu Shixin <[email protected]>
> ---
> arch/riscv/Kconfig | 1 +
> arch/riscv/include/asm/vmalloc.h | 12 ++++++++++
> arch/riscv/mm/Makefile | 1 +
> arch/riscv/mm/pgtable.c | 40 ++++++++++++++++++++++++++++++++
> 4 files changed, 54 insertions(+)
> create mode 100644 arch/riscv/mm/pgtable.c
>
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index 4c0bfb2569e9..fb3c48fa96c1 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -61,6 +61,7 @@ config RISCV
> select GENERIC_TIME_VSYSCALL if MMU && 64BIT
> select HANDLE_DOMAIN_IRQ
> select HAVE_ARCH_AUDITSYSCALL
> + select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
> select HAVE_ARCH_JUMP_LABEL
> select HAVE_ARCH_JUMP_LABEL_RELATIVE
> select HAVE_ARCH_KASAN if MMU && 64BIT
> diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
> index ff9abc00d139..8f17f421f80c 100644
> --- a/arch/riscv/include/asm/vmalloc.h
> +++ b/arch/riscv/include/asm/vmalloc.h
> @@ -1,4 +1,16 @@
> #ifndef _ASM_RISCV_VMALLOC_H
> #define _ASM_RISCV_VMALLOC_H
>
> +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
> +
> +#define IOREMAP_MAX_ORDER (PMD_SHIFT)
> +
> +#define arch_vmap_pmd_supported arch_vmap_pmd_supported
> +static inline bool __init arch_vmap_pmd_supported(pgprot_t prot)
> +{
> + return true;
> +}
> +
> +#endif
> +
> #endif /* _ASM_RISCV_VMALLOC_H */
> diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
> index 7ebaef10ea1b..f932b4d69946 100644
> --- a/arch/riscv/mm/Makefile
> +++ b/arch/riscv/mm/Makefile
> @@ -13,6 +13,7 @@ obj-y += extable.o
> obj-$(CONFIG_MMU) += fault.o pageattr.o
> obj-y += cacheflush.o
> obj-y += context.o
> +obj-y += pgtable.o
>
> ifeq ($(CONFIG_MMU),y)
> obj-$(CONFIG_SMP) += tlbflush.o
> diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
> new file mode 100644
> index 000000000000..738dc6f3530f
> --- /dev/null
> +++ b/arch/riscv/mm/pgtable.c
> @@ -0,0 +1,40 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <asm/pgalloc.h>
> +#include <linux/gfp.h>
> +#include <linux/kernel.h>
> +#include <linux/pgtable.h>
> +
> +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
> +
> +#ifndef __PAGETABLE_PMD_FOLDED
> +int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
> +{
> + pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
> +
> + set_pmd(pmd, new_pmd);
> + return 1;
> +}
> +
> +int pmd_clear_huge(pmd_t *pmd)
> +{
> + if (!pmd_leaf(READ_ONCE(*pmd)))
> + return 0;
> + pmd_clear(pmd);
> + return 1;
> +}
> +#endif
> +
> +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
> +{
> + pte_t *pte;
> +
> + pte = (pte_t *)pmd_page_vaddr(*pmd);
> + pmd_clear(pmd);
> +
> + flush_tlb_kernel_range(addr, addr + PMD_SIZE);
> + pte_free_kernel(NULL, pte);
> + return 1;
> +}
> +
> +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
Thanks, this is on for-next.