protection_map[] is an array based construct that translates given vm_flags
combination. This array contains page protection map, which is populated by
the platform via [__S000 .. __S111] and [__P000 .. __P111] exported macros.
Primary usage for protection_map[] is for vm_get_page_prot(), which is used
to determine page protection value for a given vm_flags. vm_get_page_prot()
implementation, could again call platform overrides arch_vm_get_page_prot()
and arch_filter_pgprot(). Some platforms override protection_map[] that was
originally built with __SXXX/__PXXX with different runtime values.
Currently there are multiple layers of abstraction i.e __SXXX/__PXXX macros
, protection_map[], arch_vm_get_page_prot() and arch_filter_pgprot() built
between the platform and generic MM, finally defining vm_get_page_prot().
Hence this series proposes to drop all these abstraction levels and instead
just move the responsibility of defining vm_get_page_prot() to the platform
itself making it clean and simple.
This first introduces ARCH_HAS_VM_GET_PAGE_PROT which enables the platforms
to define custom vm_get_page_prot(). This starts converting platforms that
either change protection_map[] or define the overrides arch_filter_pgprot()
or arch_vm_get_page_prot() which enables for those constructs to be dropped
off completely. This series then converts remaining platforms which enables
for __SXXX/__PXXX constructs to be dropped off completely. Finally it drops
the generic vm_get_page_prot() and then ARCH_HAS_VM_GET_PAGE_PROT as every
platform now defines their own vm_get_page_prot().
The series has been inspired from an earlier discuss with Christoph Hellwig
https://lore.kernel.org/all/[email protected]/
This series applies on 5.17-rc5 after the following patch.
https://lore.kernel.org/all/[email protected]/
This series has been cross built for multiple platforms.
- Anshuman
Cc: Christoph Hellwig <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Changes in V2:
- Dropped the entire comment block in [PATCH 30/30] per Geert
- Replaced __P010 (although commented) with __PAGE_COPY on arm platform
- Replaced __P101 with PAGE_READONLY on um platform
Changes in V1:
https://lore.kernel.org/all/[email protected]/
- Add white spaces around the | operators
- Moved powerpc_vm_get_page_prot() near vm_get_page_prot() on powerpc
- Moved arm64_vm_get_page_prot() near vm_get_page_prot() on arm64
- Moved sparc_vm_get_page_prot() near vm_get_page_prot() on sparc
- Compacted vm_get_page_prot() switch cases on all platforms
- _PAGE_CACHE040 inclusion is dependent on CPU_IS_040_OR_060
- VM_SHARED case should return PAGE_NONE (not PAGE_COPY) on SH platform
- Reorganized VM_SHARED, VM_EXEC, VM_WRITE, VM_READ
- Dropped the last patch [RFC V1 31/31] which added macros for vm_flags combinations
https://lore.kernel.org/all/[email protected]/
Changes in RFC:
https://lore.kernel.org/all/[email protected]/
Anshuman Khandual (29):
mm/debug_vm_pgtable: Drop protection_map[] usage
mm/mmap: Clarify protection_map[] indices
mm/mmap: Add new config ARCH_HAS_VM_GET_PAGE_PROT
powerpc/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
arm64/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
sparc/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
mips/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
m68k/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
arm/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
mm/mmap: Drop protection_map[]
mm/mmap: Drop arch_filter_pgprot()
mm/mmap: Drop arch_vm_get_page_pgprot()
s390/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
riscv/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
alpha/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
sh/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
arc/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
csky/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
extensa/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
parisc/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
openrisc/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
um/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
microblaze/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
nios2/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
hexagon/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
nds32/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
ia64/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
mm/mmap: Drop generic vm_get_page_prot()
mm/mmap: Drop ARCH_HAS_VM_GET_PAGE_PROT
Christoph Hellwig (1):
x86/mm: Enable ARCH_HAS_VM_GET_PAGE_PROT
arch/alpha/include/asm/pgtable.h | 17 ----
arch/alpha/mm/init.c | 37 ++++++++
arch/arc/include/asm/pgtable-bits-arcv2.h | 17 ----
arch/arc/mm/mmap.c | 41 +++++++++
arch/arm/include/asm/pgtable.h | 18 ----
arch/arm/lib/uaccess_with_memcpy.c | 2 +-
arch/arm/mm/mmu.c | 44 +++++++--
arch/arm64/Kconfig | 1 -
arch/arm64/include/asm/mman.h | 24 -----
arch/arm64/include/asm/pgtable-prot.h | 18 ----
arch/arm64/include/asm/pgtable.h | 11 ---
arch/arm64/mm/mmap.c | 78 ++++++++++++++++
arch/csky/include/asm/pgtable.h | 18 ----
arch/csky/mm/init.c | 32 +++++++
arch/hexagon/include/asm/pgtable.h | 24 -----
arch/hexagon/mm/init.c | 67 ++++++++++++++
arch/ia64/include/asm/pgtable.h | 17 ----
arch/ia64/mm/init.c | 41 ++++++++-
arch/m68k/include/asm/mcf_pgtable.h | 59 ------------
arch/m68k/include/asm/motorola_pgtable.h | 22 -----
arch/m68k/include/asm/sun3_pgtable.h | 22 -----
arch/m68k/mm/init.c | 104 ++++++++++++++++++++++
arch/m68k/mm/motorola.c | 48 +++++++++-
arch/microblaze/include/asm/pgtable.h | 17 ----
arch/microblaze/mm/init.c | 41 +++++++++
arch/mips/include/asm/pgtable.h | 22 -----
arch/mips/mm/cache.c | 60 +++++++------
arch/nds32/include/asm/pgtable.h | 17 ----
arch/nds32/mm/mmap.c | 37 ++++++++
arch/nios2/include/asm/pgtable.h | 16 ----
arch/nios2/mm/init.c | 45 ++++++++++
arch/openrisc/include/asm/pgtable.h | 18 ----
arch/openrisc/mm/init.c | 41 +++++++++
arch/parisc/include/asm/pgtable.h | 20 -----
arch/parisc/mm/init.c | 40 +++++++++
arch/powerpc/include/asm/mman.h | 12 ---
arch/powerpc/include/asm/pgtable.h | 19 ----
arch/powerpc/mm/mmap.c | 59 ++++++++++++
arch/riscv/include/asm/pgtable.h | 16 ----
arch/riscv/mm/init.c | 42 +++++++++
arch/s390/include/asm/pgtable.h | 17 ----
arch/s390/mm/mmap.c | 33 +++++++
arch/sh/include/asm/pgtable.h | 17 ----
arch/sh/mm/mmap.c | 38 ++++++++
arch/sparc/include/asm/mman.h | 6 --
arch/sparc/include/asm/pgtable_32.h | 19 ----
arch/sparc/include/asm/pgtable_64.h | 19 ----
arch/sparc/mm/init_32.c | 35 ++++++++
arch/sparc/mm/init_64.c | 70 +++++++++++----
arch/um/include/asm/pgtable.h | 17 ----
arch/um/kernel/mem.c | 35 ++++++++
arch/x86/Kconfig | 1 -
arch/x86/include/asm/pgtable.h | 5 --
arch/x86/include/asm/pgtable_types.h | 19 ----
arch/x86/include/uapi/asm/mman.h | 14 ---
arch/x86/mm/Makefile | 2 +-
arch/x86/mm/mem_encrypt_amd.c | 4 -
arch/x86/mm/pgprot.c | 71 +++++++++++++++
arch/x86/um/mem_32.c | 2 +-
arch/xtensa/include/asm/pgtable.h | 18 ----
arch/xtensa/mm/init.c | 35 ++++++++
include/linux/mm.h | 6 --
include/linux/mman.h | 4 -
mm/Kconfig | 3 -
mm/debug_vm_pgtable.c | 31 ++++---
mm/mmap.c | 42 ---------
66 files changed, 1142 insertions(+), 705 deletions(-)
create mode 100644 arch/x86/mm/pgprot.c
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed. While here, this also
localizes arch_vm_get_page_prot() as powerpc_vm_get_page_prot() and moves
it near vm_get_page_prot().
Cc: Michael Ellerman <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/mman.h | 12 ------
arch/powerpc/include/asm/pgtable.h | 19 ----------
arch/powerpc/mm/mmap.c | 59 ++++++++++++++++++++++++++++++
4 files changed, 60 insertions(+), 31 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b779603978e1..ddb4a3687c05 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -135,6 +135,7 @@ config PPC
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 7cb6d18f5cd6..1b024e64c8ec 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -24,18 +24,6 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
-{
-#ifdef CONFIG_PPC_MEM_KEYS
- return (vm_flags & VM_SAO) ?
- __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
- __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
-#else
- return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
-#endif
-}
-#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-
static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
{
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index d564d0ecd4cd..3cbb6de20f9d 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -20,25 +20,6 @@ struct mm_struct;
#include <asm/nohash/pgtable.h>
#endif /* !CONFIG_PPC_BOOK3S */
-/* Note due to the way vm flags are laid out, the bits are XWR */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_X
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY_X
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_X
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED_X
-#define __S111 PAGE_SHARED_X
-
#ifndef __ASSEMBLY__
#ifndef MAX_PTRS_PER_PGD
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index c475cf810aa8..ee275937fe19 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -254,3 +254,62 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
+
+static inline pgprot_t __vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ return PAGE_READONLY;
+ case VM_WRITE:
+ case VM_WRITE | VM_READ:
+ return PAGE_COPY;
+ case VM_EXEC:
+ case VM_EXEC | VM_READ:
+ return PAGE_READONLY_X;
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_COPY_X;
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_WRITE:
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC:
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PAGE_READONLY_X;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_SHARED_X;
+ default:
+ BUILD_BUG();
+ }
+}
+
+#ifdef CONFIG_PPC64
+static pgprot_t powerpc_vm_get_page_prot(unsigned long vm_flags)
+{
+#ifdef CONFIG_PPC_MEM_KEYS
+ return (vm_flags & VM_SAO) ?
+ __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
+ __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
+#else
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+#endif
+}
+#else
+static pgprot_t powerpc_vm_get_page_prot(unsigned long vm_flags)
+{
+ return __pgprot(0);
+}
+#endif /* CONFIG_PPC64 */
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ return __pgprot(pgprot_val(__vm_get_page_prot(vm_flags)) |
+ pgprot_val(powerpc_vm_get_page_prot(vm_flags)));
+}
+EXPORT_SYMBOL(vm_get_page_prot);
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: Russell King <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/arm/Kconfig | 1 +
arch/arm/include/asm/pgtable.h | 18 ------------
arch/arm/lib/uaccess_with_memcpy.c | 2 +-
arch/arm/mm/mmu.c | 44 ++++++++++++++++++++++++++----
4 files changed, 40 insertions(+), 25 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4c97cb40eebb..87b2e89ef3d6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -23,6 +23,7 @@ config ARM
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K
select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index cd1f84bb40ae..ec062dd6082a 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -137,24 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* 2) If we could do execute protection, then read is implied
* 3) write implies read permissions
*/
-#define __P000 __PAGE_NONE
-#define __P001 __PAGE_READONLY
-#define __P010 __PAGE_COPY
-#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY_EXEC
-#define __P101 __PAGE_READONLY_EXEC
-#define __P110 __PAGE_COPY_EXEC
-#define __P111 __PAGE_COPY_EXEC
-
-#define __S000 __PAGE_NONE
-#define __S001 __PAGE_READONLY
-#define __S010 __PAGE_SHARED
-#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY_EXEC
-#define __S101 __PAGE_READONLY_EXEC
-#define __S110 __PAGE_SHARED_EXEC
-#define __S111 __PAGE_SHARED_EXEC
-
#ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero: used
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 106f83a5ea6d..12d8d9794a28 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -247,7 +247,7 @@ static int __init test_size_treshold(void)
if (!dst_page)
goto no_dst;
kernel_ptr = page_address(src_page);
- user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
+ user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
if (!user_ptr)
goto no_vmap;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 274e4f73fd33..9cdf45da57de 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -403,6 +403,8 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
}
+static pteval_t user_pgprot;
+
/*
* Adjust the PMD section entries according to the CPU in use.
*/
@@ -410,7 +412,7 @@ static void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
- pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
+ pteval_t kern_pgprot, vecs_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -627,11 +629,6 @@ static void __init build_mem_type_table(void)
user_pgprot |= PTE_EXT_PXN;
#endif
- for (i = 0; i < 16; i++) {
- pteval_t v = pgprot_val(protection_map[i]);
- protection_map[i] = __pgprot(v | user_pgprot);
- }
-
mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
@@ -670,6 +667,41 @@ static void __init build_mem_type_table(void)
}
}
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return __pgprot(pgprot_val(__PAGE_NONE) | user_pgprot);
+ case VM_READ:
+ return __pgprot(pgprot_val(__PAGE_READONLY) | user_pgprot);
+ case VM_WRITE:
+ case VM_WRITE | VM_READ:
+ return __pgprot(pgprot_val(__PAGE_COPY) | user_pgprot);
+ case VM_EXEC:
+ case VM_EXEC | VM_READ:
+ return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot);
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return __pgprot(pgprot_val(__PAGE_COPY_EXEC) | user_pgprot);
+ case VM_SHARED:
+ return __pgprot(pgprot_val(__PAGE_NONE) | user_pgprot);
+ case VM_SHARED | VM_READ:
+ return __pgprot(pgprot_val(__PAGE_READONLY) | user_pgprot);
+ case VM_SHARED | VM_WRITE:
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return __pgprot(pgprot_val(__PAGE_SHARED) | user_pgprot);
+ case VM_SHARED | VM_EXEC:
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot);
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return __pgprot(pgprot_val(__PAGE_SHARED_EXEC) | user_pgprot);
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
+
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: Thomas Bogendoerfer <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/m68k/Kconfig | 1 +
arch/m68k/include/asm/mcf_pgtable.h | 59 -------------
arch/m68k/include/asm/motorola_pgtable.h | 22 -----
arch/m68k/include/asm/sun3_pgtable.h | 22 -----
arch/m68k/mm/init.c | 104 +++++++++++++++++++++++
arch/m68k/mm/motorola.c | 48 ++++++++++-
6 files changed, 150 insertions(+), 106 deletions(-)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 936e1803c7c7..114e65164692 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -11,6 +11,7 @@ config M68K
select ARCH_NO_PREEMPT if !COLDFIRE
select ARCH_USE_MEMTEST if MMU_MOTOROLA
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_HAS_VM_GET_PAGE_PROT
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE
select GENERIC_ATOMIC64
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 6f2b87d7a50d..dc5c8ab6aa57 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -86,65 +86,6 @@
| CF_PAGE_READABLE \
| CF_PAGE_DIRTY)
-/*
- * Page protections for initialising protection_map. See mm/mmap.c
- * for use. In general, the bit positions are xwr, and P-items are
- * private, the S-items are shared.
- */
-#define __P000 PAGE_NONE
-#define __P001 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE)
-#define __P010 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_WRITABLE)
-#define __P011 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_WRITABLE)
-#define __P100 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_EXEC)
-#define __P101 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-#define __P110 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_WRITABLE \
- | CF_PAGE_EXEC)
-#define __P111 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_WRITABLE \
- | CF_PAGE_EXEC)
-
-#define __S000 PAGE_NONE
-#define __S001 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE)
-#define __S010 PAGE_SHARED
-#define __S011 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_READABLE)
-#define __S100 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_EXEC)
-#define __S101 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-#define __S110 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_EXEC)
-#define __S111 __pgprot(CF_PAGE_VALID \
- | CF_PAGE_ACCESSED \
- | CF_PAGE_SHARED \
- | CF_PAGE_READABLE \
- | CF_PAGE_EXEC)
-
#define PTE_MASK PAGE_MASK
#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 022c3abc280d..4ea1bb57deee 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -83,28 +83,6 @@ extern unsigned long mm_cachebits;
#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
-/*
- * The m68k can't do page protection for execute, and considers that the same are read.
- * Also, write permissions imply read permissions. This is the closest we can get..
- */
-#define __P000 PAGE_NONE_C
-#define __P001 PAGE_READONLY_C
-#define __P010 PAGE_COPY_C
-#define __P011 PAGE_COPY_C
-#define __P100 PAGE_READONLY_C
-#define __P101 PAGE_READONLY_C
-#define __P110 PAGE_COPY_C
-#define __P111 PAGE_COPY_C
-
-#define __S000 PAGE_NONE_C
-#define __S001 PAGE_READONLY_C
-#define __S010 PAGE_SHARED_C
-#define __S011 PAGE_SHARED_C
-#define __S100 PAGE_READONLY_C
-#define __S101 PAGE_READONLY_C
-#define __S110 PAGE_SHARED_C
-#define __S111 PAGE_SHARED_C
-
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
/*
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index 5b24283a0a42..086fabdd8d4c 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -66,28 +66,6 @@
| SUN3_PAGE_SYSTEM \
| SUN3_PAGE_NOCACHE)
-/*
- * Page protections for initialising protection_map. The sun3 has only two
- * protection settings, valid (implying read and execute) and writeable. These
- * are as close as we can get...
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
/* Use these fake page-protections on PMDs. */
#define SUN3_PMD_VALID (0x00000001)
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 1b47bec15832..b6ef2c6f4e85 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -128,3 +128,107 @@ void __init mem_init(void)
memblock_free_all();
init_pointer_tables();
}
+
+#ifdef CONFIG_COLDFIRE
+/*
+ * Page protections for initialising protection_map. See mm/mmap.c
+ * for use. In general, the bit positions are xwr, and P-items are
+ * private, the S-items are shared.
+ */
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE);
+ case VM_WRITE:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_WRITABLE);
+ case VM_WRITE | VM_READ:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE | CF_PAGE_WRITABLE);
+ case VM_EXEC:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_EXEC);
+ case VM_EXEC | VM_READ:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE | CF_PAGE_EXEC);
+ case VM_EXEC | VM_WRITE:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_WRITABLE | CF_PAGE_EXEC);
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE | CF_PAGE_WRITABLE |
+ CF_PAGE_EXEC);
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE);
+ case VM_SHARED | VM_WRITE:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE | CF_PAGE_SHARED);
+ case VM_SHARED | VM_EXEC:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_EXEC);
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE | CF_PAGE_EXEC);
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_SHARED | CF_PAGE_EXEC);
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
+ CF_PAGE_READABLE | CF_PAGE_SHARED |
+ CF_PAGE_EXEC);
+ default:
+ BUILD_BUG();
+ }
+}
+#endif
+
+#ifdef CONFIG_SUN3
+/*
+ * Page protections for initialising protection_map. The sun3 has only two
+ * protection settings, valid (implying read and execute) and writeable. These
+ * are as close as we can get...
+ */
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ return PAGE_READONLY;
+ case VM_WRITE:
+ case VM_WRITE | VM_READ:
+ return PAGE_COPY;
+ case VM_EXEC:
+ case VM_EXEC | VM_READ:
+ return PAGE_READONLY;
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_COPY;
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_WRITE:
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC:
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ default:
+ BUILD_BUG();
+ }
+}
+#endif
+EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index ecbe948f4c1a..495ba0ea083c 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -400,12 +400,9 @@ void __init paging_init(void)
/* Fix the cache mode in the page descriptors for the 680[46]0. */
if (CPU_IS_040_OR_060) {
- int i;
#ifndef mm_cachebits
mm_cachebits = _PAGE_CACHE040;
#endif
- for (i = 0; i < 16; i++)
- pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
}
min_addr = m68k_memory[0].addr;
@@ -483,3 +480,48 @@ void __init paging_init(void)
max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
free_area_init(max_zone_pfn);
}
+
+/*
+ * The m68k can't do page protection for execute, and considers that
+ * the same are read. Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ unsigned long cachebits = 0;
+
+ if (CPU_IS_040_OR_060)
+ cachebits = _PAGE_CACHE040;
+
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return __pgprot(pgprot_val(PAGE_NONE_C) | cachebits);
+ case VM_READ:
+ return __pgprot(pgprot_val(PAGE_READONLY_C) | cachebits);
+ case VM_WRITE:
+ case VM_WRITE | VM_READ:
+ return __pgprot(pgprot_val(PAGE_COPY_C) | cachebits);
+ case VM_EXEC:
+ case VM_EXEC | VM_READ:
+ return __pgprot(pgprot_val(PAGE_READONLY_C) | cachebits);
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return __pgprot(pgprot_val(PAGE_COPY_C) | cachebits);
+ case VM_SHARED:
+ return __pgprot(pgprot_val(PAGE_NONE_C) | cachebits);
+ case VM_SHARED | VM_READ:
+ return __pgprot(pgprot_val(PAGE_READONLY_C) | cachebits);
+ case VM_SHARED | VM_WRITE:
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return __pgprot(pgprot_val(PAGE_SHARED_C) | cachebits);
+ case VM_SHARED | VM_EXEC:
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return __pgprot(pgprot_val(PAGE_READONLY_C) | cachebits);
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return __pgprot(pgprot_val(PAGE_SHARED_C) | cachebits);
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: Thomas Bogendoerfer <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/mips/Kconfig | 1 +
arch/mips/include/asm/pgtable.h | 22 ------------
arch/mips/mm/cache.c | 60 +++++++++++++++++++--------------
3 files changed, 36 insertions(+), 47 deletions(-)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 058446f01487..fcbfc52a1567 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -13,6 +13,7 @@ config MIPS
select ARCH_HAS_STRNLEN_USER
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_KEEP_MEMBLOCK
select ARCH_SUPPORTS_UPROBES
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 7b8037f25d9e..bf193ad4f195 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -41,28 +41,6 @@ struct vm_area_struct;
* by reasonable means..
*/
-/*
- * Dummy values to fill the table in mmap.c
- * The real values will be generated at runtime
- */
-#define __P000 __pgprot(0)
-#define __P001 __pgprot(0)
-#define __P010 __pgprot(0)
-#define __P011 __pgprot(0)
-#define __P100 __pgprot(0)
-#define __P101 __pgprot(0)
-#define __P110 __pgprot(0)
-#define __P111 __pgprot(0)
-
-#define __S000 __pgprot(0)
-#define __S001 __pgprot(0)
-#define __S010 __pgprot(0)
-#define __S011 __pgprot(0)
-#define __S100 __pgprot(0)
-#define __S101 __pgprot(0)
-#define __S110 __pgprot(0)
-#define __S111 __pgprot(0)
-
extern unsigned long _page_cachable_default;
extern void __update_cache(unsigned long address, pte_t pte);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 830ab91e574f..9f33ce4fb105 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -159,30 +159,6 @@ EXPORT_SYMBOL(_page_cachable_default);
#define PM(p) __pgprot(_page_cachable_default | (p))
-static inline void setup_protection_map(void)
-{
- protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = PM(_PAGE_PRESENT);
- protection_map[5] = PM(_PAGE_PRESENT);
- protection_map[6] = PM(_PAGE_PRESENT);
- protection_map[7] = PM(_PAGE_PRESENT);
-
- protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
- _PAGE_NO_READ);
- protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = PM(_PAGE_PRESENT);
- protection_map[13] = PM(_PAGE_PRESENT);
- protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
- protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
-}
-
-#undef PM
-
void cpu_cache_init(void)
{
if (cpu_has_3k_cache) {
@@ -206,6 +182,40 @@ void cpu_cache_init(void)
octeon_cache_init();
}
+}
- setup_protection_map();
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ case VM_READ:
+ return PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ case VM_WRITE:
+ return PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ case VM_WRITE | VM_READ:
+ return PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ case VM_EXEC:
+ case VM_EXEC | VM_READ:
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PM(_PAGE_PRESENT);
+ case VM_SHARED:
+ return PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ case VM_SHARED | VM_READ:
+ return PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ case VM_SHARED | VM_WRITE:
+ return PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+ case VM_SHARED | VM_EXEC:
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PM(_PAGE_PRESENT);
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return PM(_PAGE_PRESENT | _PAGE_WRITE);
+ default:
+ BUILD_BUG();
+ }
}
+EXPORT_SYMBOL(vm_get_page_prot);
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: Jeff Dike <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/um/Kconfig | 1 +
arch/um/include/asm/pgtable.h | 17 -----------------
arch/um/kernel/mem.c | 35 +++++++++++++++++++++++++++++++++++
arch/x86/um/mem_32.c | 2 +-
4 files changed, 37 insertions(+), 18 deletions(-)
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 4d398b80aea8..5836296868a8 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -9,6 +9,7 @@ config UML
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_NO_PREEMPT
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_SECCOMP_FILTER
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index b9e20bbe2f75..d982622c0708 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -68,23 +68,6 @@ extern unsigned long end_iomem;
* Also, write permissions imply read permissions. This is the closest we can
* get..
*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
/*
* ZERO_PAGE is a global shared page that is always zero: used
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 15295c3237a0..37c6c7b9dadc 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -197,3 +197,38 @@ void *uml_kmalloc(int size, int flags)
{
return kmalloc(size, flags);
}
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ return PAGE_READONLY;
+ case VM_WRITE:
+ case VM_WRITE | VM_READ:
+ return PAGE_COPY;
+ case VM_EXEC:
+ case VM_EXEC | VM_READ:
+ return PAGE_READONLY;
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_COPY;
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_WRITE:
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC:
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
index 19c5dbd46770..cafd01f730da 100644
--- a/arch/x86/um/mem_32.c
+++ b/arch/x86/um/mem_32.c
@@ -17,7 +17,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
- gate_vma.vm_page_prot = __P101;
+ gate_vma.vm_page_prot = PAGE_READONLY;
return 0;
}
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: "James E.J. Bottomley" <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/parisc/Kconfig | 1 +
arch/parisc/include/asm/pgtable.h | 20 ----------------
arch/parisc/mm/init.c | 40 +++++++++++++++++++++++++++++++
3 files changed, 41 insertions(+), 20 deletions(-)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 43c1c880def6..de512f120b50 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -10,6 +10,7 @@ config PARISC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_NO_SG_CHAIN
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 3e7cf882639f..80d99b2b5913 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -269,26 +269,6 @@ extern void __update_cache(pte_t pte);
* pages.
*/
- /*xwr*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 __P000 /* copy on write */
-#define __P011 __P001 /* copy on write */
-#define __P100 PAGE_EXECREAD
-#define __P101 PAGE_EXECREAD
-#define __P110 __P100 /* copy on write */
-#define __P111 __P101 /* copy on write */
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_WRITEONLY
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECREAD
-#define __S101 PAGE_EXECREAD
-#define __S110 PAGE_RWX
-#define __S111 PAGE_RWX
-
-
extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
/* initial page tables for 0-8MB for kernel */
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 1dc2e88e7b04..f9e841f874a8 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -865,3 +865,43 @@ void flush_tlb_all(void)
spin_unlock(&sid_lock);
}
#endif
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ return PAGE_READONLY;
+ /* copy on write */
+ case VM_WRITE:
+ return PAGE_NONE;
+ /* copy on write */
+ case VM_WRITE | VM_READ:
+ return PAGE_READONLY;
+ case VM_EXEC:
+ case VM_EXEC | VM_READ:
+ /* copy on write */
+ case VM_EXEC | VM_WRITE:
+ /* copy on write */
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_EXECREAD;
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_WRITE:
+ return PAGE_WRITEONLY;
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC:
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PAGE_EXECREAD;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_RWX;
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: Heiko Carstens <[email protected]>
Cc: Vasily Gorbik <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/s390/Kconfig | 1 +
arch/s390/include/asm/pgtable.h | 17 -----------------
arch/s390/mm/mmap.c | 33 +++++++++++++++++++++++++++++++++
3 files changed, 34 insertions(+), 17 deletions(-)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index be9f39fd06df..cb1b487e8201 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -78,6 +78,7 @@ config S390
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_VDSO_DATA
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 008a6c856fa4..3893ef64b439 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -422,23 +422,6 @@ static inline int is_module_addr(void *addr)
* implies read permission.
*/
/*xwr*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_RO
-#define __P010 PAGE_RO
-#define __P011 PAGE_RO
-#define __P100 PAGE_RX
-#define __P101 PAGE_RX
-#define __P110 PAGE_RX
-#define __P111 PAGE_RX
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_RO
-#define __S010 PAGE_RW
-#define __S011 PAGE_RW
-#define __S100 PAGE_RX
-#define __S101 PAGE_RX
-#define __S110 PAGE_RWX
-#define __S111 PAGE_RWX
/*
* Segment entry (large page) protection definitions.
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index e54f928503c5..e99c198aa5de 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -188,3 +188,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ case VM_WRITE:
+ case VM_WRITE | VM_READ:
+ return PAGE_RO;
+ case VM_EXEC:
+ case VM_EXEC | VM_READ:
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_RX;
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_RO;
+ case VM_SHARED | VM_WRITE:
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PAGE_RW;
+ case VM_SHARED | VM_EXEC:
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PAGE_RX;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_RWX;
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/ia64/Kconfig | 1 +
arch/ia64/include/asm/pgtable.h | 17 --------------
arch/ia64/mm/init.c | 41 ++++++++++++++++++++++++++++++++-
3 files changed, 41 insertions(+), 18 deletions(-)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index a7e01573abd8..0ab15e8d5783 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -11,6 +11,7 @@ config IA64
select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ACPI
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 9584b2c5f394..8154c78bba56 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -161,23 +161,6 @@
* attempts to write to the page.
*/
/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
-#define __P011 PAGE_READONLY /* ditto */
-#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
-#define __S011 PAGE_SHARED
-#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
#if CONFIG_PGTABLE_LEVELS == 4
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 5d165607bf35..2a922883e30f 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -273,7 +273,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
- gate_vma.vm_page_prot = __P101;
+ gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
return 0;
}
@@ -492,3 +492,42 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ /* write to priv pg -> copy & make writable */
+ case VM_WRITE:
+ /* write to priv pg -> copy & make writable */
+ case VM_WRITE | VM_READ:
+ return PAGE_READONLY;
+ case VM_EXEC:
+ return __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX);
+ case VM_EXEC | VM_READ:
+ return __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_COPY_EXEC;
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_READONLY;
+ /* we don't have (and don't need) write-only */
+ case VM_SHARED | VM_WRITE:
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC:
+ return __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX);
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX);
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: Paul Walmsley <[email protected]>
Cc: Palmer Dabbelt <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/riscv/Kconfig | 1 +
arch/riscv/include/asm/pgtable.h | 16 ------------
arch/riscv/mm/init.c | 42 ++++++++++++++++++++++++++++++++
3 files changed, 43 insertions(+), 16 deletions(-)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 5adcbd9b5e88..9391742f9286 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -31,6 +31,7 @@ config RISCV
select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7e949f25c933..d2bb14cac28b 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -183,24 +183,8 @@ extern struct pt_alloc_ops pt_ops __initdata;
extern pgd_t swapper_pg_dir[];
/* MAP_PRIVATE permissions: xwr (copy-on-write) */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READ
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_EXEC
-#define __P101 PAGE_READ_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_READ_EXEC
/* MAP_SHARED permissions: xwr */
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READ
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXEC
-#define __S101 PAGE_READ_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index c27294128e18..8cb5d1eeb287 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -1050,3 +1050,45 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return vmemmap_populate_basepages(start, end, node, NULL);
}
#endif
+
+#ifdef CONFIG_MMU
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ /* MAP_PRIVATE permissions: xwr (copy-on-write) */
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ return PAGE_READ;
+ case VM_WRITE:
+ case VM_WRITE | VM_READ:
+ return PAGE_COPY;
+ case VM_EXEC:
+ return PAGE_EXEC;
+ case VM_EXEC | VM_READ:
+ return PAGE_READ_EXEC;
+ case VM_EXEC | VM_WRITE:
+ return PAGE_COPY_EXEC;
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_COPY_READ_EXEC;
+ /* MAP_SHARED permissions: xwr */
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_READ;
+ case VM_SHARED | VM_WRITE:
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC:
+ return PAGE_EXEC;
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PAGE_READ_EXEC;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_SHARED_EXEC;
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
+#endif /* CONFIG_MMU */
--
2.25.1
protection_map[] maps vm_flags access combinations into page protection
value as defined by the platform via __PXXX and __SXXX macros. The array
indices in protection_map[], represents vm_flags access combinations but
it's not very intuitive to derive. This makes it clear and explicit.
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: [email protected]
Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: Anshuman Khandual <[email protected]>
---
mm/mmap.c | 18 ++++++++++++++++--
1 file changed, 16 insertions(+), 2 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 1e8fdb0b51ed..670c68f5fbf1 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -102,8 +102,22 @@ static void unmap_region(struct mm_struct *mm,
* x: (yes) yes
*/
pgprot_t protection_map[16] __ro_after_init = {
- __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
- __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+ [VM_NONE] = __P000,
+ [VM_READ] = __P001,
+ [VM_WRITE] = __P010,
+ [VM_WRITE | VM_READ] = __P011,
+ [VM_EXEC] = __P100,
+ [VM_EXEC | VM_READ] = __P101,
+ [VM_EXEC | VM_WRITE] = __P110,
+ [VM_EXEC | VM_WRITE | VM_READ] = __P111,
+ [VM_SHARED] = __S000,
+ [VM_SHARED | VM_READ] = __S001,
+ [VM_SHARED | VM_WRITE] = __S010,
+ [VM_SHARED | VM_WRITE | VM_READ] = __S011,
+ [VM_SHARED | VM_EXEC] = __S100,
+ [VM_SHARED | VM_EXEC | VM_READ] = __S101,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __S110,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __S111
};
#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
--
2.25.1
There are no other users for protection_map[]. Hence just drop this array
construct and instead define __vm_get_page_prot() which will provide page
protection map based on vm_flags combination switch.
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
include/linux/mm.h | 6 -----
mm/mmap.c | 61 +++++++++++++++++++++++++++++++---------------
2 files changed, 41 insertions(+), 26 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 213cc569b192..ff74bd2d7850 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -418,12 +418,6 @@ extern unsigned int kobjsize(const void *objp);
#endif
#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
-/*
- * mapping from the currently active vm_flags protection bits (the
- * low four bits) to a page protection mask..
- */
-extern pgprot_t protection_map[16];
-
/*
* The default fault flags that should be used by most of the
* arch-specific page fault handlers.
diff --git a/mm/mmap.c b/mm/mmap.c
index ffd70a0c8ddf..f61f74a61f62 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -102,24 +102,6 @@ static void unmap_region(struct mm_struct *mm,
* w: (no) no
* x: (yes) yes
*/
-pgprot_t protection_map[16] __ro_after_init = {
- [VM_NONE] = __P000,
- [VM_READ] = __P001,
- [VM_WRITE] = __P010,
- [VM_WRITE | VM_READ] = __P011,
- [VM_EXEC] = __P100,
- [VM_EXEC | VM_READ] = __P101,
- [VM_EXEC | VM_WRITE] = __P110,
- [VM_EXEC | VM_WRITE | VM_READ] = __P111,
- [VM_SHARED] = __S000,
- [VM_SHARED | VM_READ] = __S001,
- [VM_SHARED | VM_WRITE] = __S010,
- [VM_SHARED | VM_WRITE | VM_READ] = __S011,
- [VM_SHARED | VM_EXEC] = __S100,
- [VM_SHARED | VM_EXEC | VM_READ] = __S101,
- [VM_SHARED | VM_EXEC | VM_WRITE] = __S110,
- [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __S111
-};
#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
@@ -128,10 +110,49 @@ static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
}
#endif
+static inline pgprot_t __vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return __P000;
+ case VM_READ:
+ return __P001;
+ case VM_WRITE:
+ return __P010;
+ case VM_READ | VM_WRITE:
+ return __P011;
+ case VM_EXEC:
+ return __P100;
+ case VM_EXEC | VM_READ:
+ return __P101;
+ case VM_EXEC | VM_WRITE:
+ return __P110;
+ case VM_EXEC | VM_READ | VM_WRITE:
+ return __P111;
+ case VM_SHARED:
+ return __S000;
+ case VM_SHARED | VM_READ:
+ return __S001;
+ case VM_SHARED | VM_WRITE:
+ return __S010;
+ case VM_SHARED | VM_READ | VM_WRITE:
+ return __S011;
+ case VM_SHARED | VM_EXEC:
+ return __S100;
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return __S101;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ return __S110;
+ case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE:
+ return __S111;
+ default:
+ BUILD_BUG();
+ }
+}
+
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
- pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
+ pgprot_t ret = __pgprot(pgprot_val(__vm_get_page_prot(vm_flags)) |
pgprot_val(arch_vm_get_page_prot(vm_flags)));
return arch_filter_pgprot(ret);
--
2.25.1
All available platforms export their own vm_get_page_prot() implementation
via ARCH_HAS_VM_GET_PAGE_PROT. Hence a generic implementation is no longer
needed.
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
mm/mmap.c | 40 ----------------------------------------
1 file changed, 40 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 2fc597cf8b8d..368bc8aee45b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -102,46 +102,6 @@ static void unmap_region(struct mm_struct *mm,
* w: (no) no
* x: (yes) yes
*/
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
-{
- switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
- case VM_NONE:
- return __P000;
- case VM_READ:
- return __P001;
- case VM_WRITE:
- return __P010;
- case VM_READ | VM_WRITE:
- return __P011;
- case VM_EXEC:
- return __P100;
- case VM_EXEC | VM_READ:
- return __P101;
- case VM_EXEC | VM_WRITE:
- return __P110;
- case VM_EXEC | VM_READ | VM_WRITE:
- return __P111;
- case VM_SHARED:
- return __S000;
- case VM_SHARED | VM_READ:
- return __S001;
- case VM_SHARED | VM_WRITE:
- return __S010;
- case VM_SHARED | VM_READ | VM_WRITE:
- return __S011;
- case VM_SHARED | VM_EXEC:
- return __S100;
- case VM_SHARED | VM_EXEC | VM_READ:
- return __S101;
- case VM_SHARED | VM_EXEC | VM_WRITE:
- return __S110;
- case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE:
- return __S111;
- default:
- BUILD_BUG();
- }
-}
-EXPORT_SYMBOL(vm_get_page_prot);
#endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
--
2.25.1
All platforms now define their own vm_get_page_prot() and also there is no
generic version left to fallback on. Hence drop ARCH_HAS_GET_PAGE_PROT.
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/alpha/Kconfig | 1 -
arch/arc/Kconfig | 1 -
arch/arm/Kconfig | 1 -
arch/arm64/Kconfig | 1 -
arch/csky/Kconfig | 1 -
arch/hexagon/Kconfig | 1 -
arch/ia64/Kconfig | 1 -
arch/m68k/Kconfig | 1 -
arch/microblaze/Kconfig | 1 -
arch/mips/Kconfig | 1 -
arch/nds32/Kconfig | 1 -
arch/nios2/Kconfig | 1 -
arch/openrisc/Kconfig | 1 -
arch/parisc/Kconfig | 1 -
arch/powerpc/Kconfig | 1 -
arch/riscv/Kconfig | 1 -
arch/s390/Kconfig | 1 -
arch/sh/Kconfig | 1 -
arch/sparc/Kconfig | 2 --
arch/um/Kconfig | 1 -
arch/x86/Kconfig | 1 -
arch/xtensa/Kconfig | 1 -
mm/Kconfig | 3 ---
mm/mmap.c | 23 -----------------------
24 files changed, 49 deletions(-)
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 73e82fe5c770..4e87783c90ad 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -2,7 +2,6 @@
config ALPHA
bool
default y
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_32BIT_USTAT_F_TINODE
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 78ff0644b343..3c2a4753d09b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -13,7 +13,6 @@ config ARC
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
select ARCH_32BIT_OFF_T
select BUILDTIME_TABLE_SORT
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 87b2e89ef3d6..4c97cb40eebb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -23,7 +23,6 @@ config ARM
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K
select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7153d5fff603..bfb92b98d5aa 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -43,7 +43,6 @@ config ARM64
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_ELF_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 209dac5686dd..132f43f12dd8 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -6,7 +6,6 @@ config CSKY
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index cdc5df32a1e3..15dd8f38b698 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -6,7 +6,6 @@ config HEXAGON
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_NO_PREEMPT
select DMA_GLOBAL_POOL
# Other pending projects/to-do items.
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 0ab15e8d5783..a7e01573abd8 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -11,7 +11,6 @@ config IA64
select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ACPI
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 114e65164692..936e1803c7c7 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -11,7 +11,6 @@ config M68K
select ARCH_NO_PREEMPT if !COLDFIRE
select ARCH_USE_MEMTEST if MMU_MOTOROLA
select ARCH_WANT_IPC_PARSE_VERSION
- select ARCH_HAS_VM_GET_PAGE_PROT
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE
select GENERIC_ATOMIC64
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index f2c25ba8621e..59798e43cdb0 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -7,7 +7,6 @@ config MICROBLAZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index fcbfc52a1567..058446f01487 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -13,7 +13,6 @@ config MIPS
select ARCH_HAS_STRNLEN_USER
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_KEEP_MEMBLOCK
select ARCH_SUPPORTS_UPROBES
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 576e05479925..4d1421b18734 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -10,7 +10,6 @@ config NDS32
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_WANT_FRAME_POINTERS if FTRACE
select CLKSRC_MMIO
select CLONE_BACKWARDS
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 85a58a357a3b..33fd06f5fa41 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -6,7 +6,6 @@ config NIOS2
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_DMA_SET_UNCACHED
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_NO_SWAP
select COMMON_CLK
select TIMER_OF
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 842a61426816..f724b3f1aeed 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -10,7 +10,6 @@ config OPENRISC
select ARCH_HAS_DMA_SET_UNCACHED
select ARCH_HAS_DMA_CLEAR_UNCACHED
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select ARCH_HAS_VM_GET_PAGE_PROT
select COMMON_CLK
select OF
select OF_EARLY_FLATTREE
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index de512f120b50..43c1c880def6 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -10,7 +10,6 @@ config PARISC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_NO_SG_CHAIN
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ddb4a3687c05..b779603978e1 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -135,7 +135,6 @@ config PPC
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 9391742f9286..5adcbd9b5e88 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -31,7 +31,6 @@ config RISCV
select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index cb1b487e8201..be9f39fd06df 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -78,7 +78,6 @@ config S390
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_VDSO_DATA
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index f3fcd1c5e002..2474a04ceac4 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -11,7 +11,6 @@ config SUPERH
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HIBERNATION_POSSIBLE if MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index ff29156f2380..1cab1b284f1a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -59,7 +59,6 @@ config SPARC32
select HAVE_UID16
select OLD_SIGACTION
select ZONE_DMA
- select ARCH_HAS_VM_GET_PAGE_PROT
config SPARC64
def_bool 64BIT
@@ -85,7 +84,6 @@ config SPARC64
select PERF_USE_VMALLOC
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select HAVE_C_RECORDMCOUNT
- select ARCH_HAS_VM_GET_PAGE_PROT
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 5836296868a8..4d398b80aea8 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -9,7 +9,6 @@ config UML
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_NO_PREEMPT
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_SECCOMP_FILTER
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b2ea06c87708..013d8d6179e5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -93,7 +93,6 @@ config X86
select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 1608f7517546..8ac599aa6d99 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -9,7 +9,6 @@ config XTENSA
select ARCH_HAS_DMA_SET_UNCACHED if MMU
select ARCH_HAS_STRNCPY_FROM_USER if !KASAN
select ARCH_HAS_STRNLEN_USER
- select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
diff --git a/mm/Kconfig b/mm/Kconfig
index 212fb6e1ddaa..3326ee3903f3 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -744,9 +744,6 @@ config IDLE_PAGE_TRACKING
config ARCH_HAS_CACHE_LINE_SIZE
bool
-config ARCH_HAS_VM_GET_PAGE_PROT
- bool
-
config ARCH_HAS_PTE_DEVMAP
bool
diff --git a/mm/mmap.c b/mm/mmap.c
index 368bc8aee45b..00c9967bcfb4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -81,29 +81,6 @@ static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
-#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
-/* description of effects of mapping type and prot in current implementation.
- * this is due to the limited x86 page protection hardware. The expected
- * behavior is in parens:
- *
- * map_type prot
- * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
- * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
- * w: (no) no w: (no) no w: (yes) yes w: (no) no
- * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
- *
- * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
- * w: (no) no w: (no) no w: (copy) copy w: (no) no
- * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
- *
- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
- * MAP_PRIVATE (with Enhanced PAN supported):
- * r: (no) no
- * w: (no) no
- * x: (yes) yes
- */
-#endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */
-
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
{
return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: Yoshinori Sato <[email protected]>
Cc: Rich Felker <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/sh/Kconfig | 1 +
arch/sh/include/asm/pgtable.h | 17 ----------------
arch/sh/mm/mmap.c | 38 +++++++++++++++++++++++++++++++++++
3 files changed, 39 insertions(+), 17 deletions(-)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 2474a04ceac4..f3fcd1c5e002 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -11,6 +11,7 @@ config SUPERH
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HIBERNATION_POSSIBLE if MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index d7ddb1ec86a0..6fb9ec54cf9b 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -89,23 +89,6 @@ static inline unsigned long phys_addr_mask(void)
* completely separate permission bits for user and kernel space.
*/
/*xwr*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_EXECREAD
-#define __P101 PAGE_EXECREAD
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_WRITEONLY
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECREAD
-#define __S101 PAGE_EXECREAD
-#define __S110 PAGE_RWX
-#define __S111 PAGE_RWX
typedef pte_t *pte_addr_t;
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6a1a1297baae..cad14af6c8e6 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -162,3 +162,41 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
return 1;
}
+
+#ifdef CONFIG_MMU
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ return PAGE_READONLY;
+ case VM_WRITE:
+ case VM_WRITE | VM_READ:
+ return PAGE_COPY;
+ case VM_EXEC:
+ case VM_EXEC | VM_READ:
+ return PAGE_EXECREAD;
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_COPY;
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_WRITE:
+ return PAGE_WRITEONLY;
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC:
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PAGE_EXECREAD;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_RWX;
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
+#endif
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: Chris Zankel <[email protected]>
Cc: Guo Ren <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/xtensa/Kconfig | 1 +
arch/xtensa/include/asm/pgtable.h | 18 ----------------
arch/xtensa/mm/init.c | 35 +++++++++++++++++++++++++++++++
3 files changed, 36 insertions(+), 18 deletions(-)
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 8ac599aa6d99..1608f7517546 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -9,6 +9,7 @@ config XTENSA
select ARCH_HAS_DMA_SET_UNCACHED if MMU
select ARCH_HAS_STRNCPY_FROM_USER if !KASAN
select ARCH_HAS_STRNLEN_USER
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index bd5aeb795567..ed6e93097142 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -200,24 +200,6 @@
* What follows is the closest we can get by reasonable means..
* See linux/mm/mmap.c for protection_map[] array that uses these definitions.
*/
-#define __P000 PAGE_NONE /* private --- */
-#define __P001 PAGE_READONLY /* private --r */
-#define __P010 PAGE_COPY /* private -w- */
-#define __P011 PAGE_COPY /* private -wr */
-#define __P100 PAGE_READONLY_EXEC /* private x-- */
-#define __P101 PAGE_READONLY_EXEC /* private x-r */
-#define __P110 PAGE_COPY_EXEC /* private xw- */
-#define __P111 PAGE_COPY_EXEC /* private xwr */
-
-#define __S000 PAGE_NONE /* shared --- */
-#define __S001 PAGE_READONLY /* shared --r */
-#define __S010 PAGE_SHARED /* shared -w- */
-#define __S011 PAGE_SHARED /* shared -wr */
-#define __S100 PAGE_READONLY_EXEC /* shared x-- */
-#define __S101 PAGE_READONLY_EXEC /* shared x-r */
-#define __S110 PAGE_SHARED_EXEC /* shared xw- */
-#define __S111 PAGE_SHARED_EXEC /* shared xwr */
-
#ifndef __ASSEMBLY__
#define pte_ERROR(e) \
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 6a32b2cf2718..5f090749e9e0 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -216,3 +216,38 @@ static int __init parse_memmap_opt(char *str)
return 0;
}
early_param("memmap", parse_memmap_opt);
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ return PAGE_READONLY;
+ case VM_WRITE:
+ case VM_WRITE | VM_READ:
+ return PAGE_COPY;
+ case VM_EXEC:
+ case VM_EXEC | VM_READ:
+ return PAGE_READONLY_EXEC;
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_COPY_EXEC;
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_WRITE:
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC:
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PAGE_READONLY_EXEC;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_SHARED_EXEC;
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
--
2.25.1
There are no platforms left which use arch_vm_get_page_prot(). Just drop
arch_vm_get_page_prot() construct and simplify remaining code.
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
include/linux/mman.h | 4 ----
mm/mmap.c | 10 +---------
2 files changed, 1 insertion(+), 13 deletions(-)
diff --git a/include/linux/mman.h b/include/linux/mman.h
index b66e91b8176c..58b3abd457a3 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -93,10 +93,6 @@ static inline void vm_unacct_memory(long pages)
#define arch_calc_vm_flag_bits(flags) 0
#endif
-#ifndef arch_vm_get_page_prot
-#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
-#endif
-
#ifndef arch_validate_prot
/*
* This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
diff --git a/mm/mmap.c b/mm/mmap.c
index 70a75ea91e94..2fc597cf8b8d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -102,7 +102,7 @@ static void unmap_region(struct mm_struct *mm,
* w: (no) no
* x: (yes) yes
*/
-static inline pgprot_t __vm_get_page_prot(unsigned long vm_flags)
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
case VM_NONE:
@@ -141,14 +141,6 @@ static inline pgprot_t __vm_get_page_prot(unsigned long vm_flags)
BUILD_BUG();
}
}
-
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
-{
- pgprot_t ret = __pgprot(pgprot_val(__vm_get_page_prot(vm_flags)) |
- pgprot_val(arch_vm_get_page_prot(vm_flags)));
-
- return ret;
-}
EXPORT_SYMBOL(vm_get_page_prot);
#endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed.
Cc: Dinh Nguyen <[email protected]>
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
Acked-by: Dinh Nguyen <[email protected]>
---
arch/nios2/Kconfig | 1 +
arch/nios2/include/asm/pgtable.h | 16 ------------
arch/nios2/mm/init.c | 45 ++++++++++++++++++++++++++++++++
3 files changed, 46 insertions(+), 16 deletions(-)
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 33fd06f5fa41..85a58a357a3b 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -6,6 +6,7 @@ config NIOS2
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_DMA_SET_UNCACHED
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_NO_SWAP
select COMMON_CLK
select TIMER_OF
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 4a995fa628ee..2678dad58a63 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -40,24 +40,8 @@ struct mm_struct;
*/
/* Remove W bit on private pages for COW support */
-#define __P000 MKP(0, 0, 0)
-#define __P001 MKP(0, 0, 1)
-#define __P010 MKP(0, 0, 0) /* COW */
-#define __P011 MKP(0, 0, 1) /* COW */
-#define __P100 MKP(1, 0, 0)
-#define __P101 MKP(1, 0, 1)
-#define __P110 MKP(1, 0, 0) /* COW */
-#define __P111 MKP(1, 0, 1) /* COW */
/* Shared pages can have exact HW mapping */
-#define __S000 MKP(0, 0, 0)
-#define __S001 MKP(0, 0, 1)
-#define __S010 MKP(0, 1, 0)
-#define __S011 MKP(0, 1, 1)
-#define __S100 MKP(1, 0, 0)
-#define __S101 MKP(1, 0, 1)
-#define __S110 MKP(1, 1, 0)
-#define __S111 MKP(1, 1, 1)
/* Used all over the kernel */
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 613fcaa5988a..311b2146a248 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -124,3 +124,48 @@ const char *arch_vma_name(struct vm_area_struct *vma)
{
return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
}
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return MKP(0, 0, 0);
+ case VM_READ:
+ return MKP(0, 0, 1);
+ /* COW */
+ case VM_WRITE:
+ return MKP(0, 0, 0);
+ /* COW */
+ case VM_WRITE | VM_READ:
+ return MKP(0, 0, 1);
+ case VM_EXEC:
+ return MKP(1, 0, 0);
+ case VM_EXEC | VM_READ:
+ return MKP(1, 0, 1);
+ /* COW */
+ case VM_EXEC | VM_WRITE:
+ return MKP(1, 0, 0);
+ /* COW */
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return MKP(1, 0, 1);
+ case VM_SHARED:
+ return MKP(0, 0, 0);
+ case VM_SHARED | VM_READ:
+ return MKP(0, 0, 1);
+ case VM_SHARED | VM_WRITE:
+ return MKP(0, 1, 0);
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return MKP(0, 1, 1);
+ case VM_SHARED | VM_EXEC:
+ return MKP(1, 0, 0);
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return MKP(1, 0, 1);
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ return MKP(1, 1, 0);
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return MKP(1, 1, 1);
+ default:
+ BUILD_BUG();
+ }
+}
+EXPORT_SYMBOL(vm_get_page_prot);
--
2.25.1
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
macros can be dropped which are no longer needed. This also localizes both
arch_filter_pgprot and arch_vm_get_page_prot() helpers, unsubscribing from
ARCH_HAS_FILTER_PGPROT as well. Moved both these localized functions near
vm_get_page_prot().
Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: [email protected]
Cc: [email protected]
Signed-off-by: Anshuman Khandual <[email protected]>
---
arch/arm64/Kconfig | 2 +-
arch/arm64/include/asm/mman.h | 24 ---------
arch/arm64/include/asm/pgtable-prot.h | 18 -------
arch/arm64/include/asm/pgtable.h | 11 ----
arch/arm64/mm/mmap.c | 78 +++++++++++++++++++++++++++
5 files changed, 79 insertions(+), 54 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 2e5d2eac6fc6..7153d5fff603 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -23,7 +23,6 @@ config ARM64
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_FAST_MULTIPLIER
- select ARCH_HAS_FILTER_PGPROT
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
@@ -44,6 +43,7 @@ config ARM64
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_VM_GET_PAGE_PROT
select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_ELF_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
index e3e28f7daf62..5966ee4a6154 100644
--- a/arch/arm64/include/asm/mman.h
+++ b/arch/arm64/include/asm/mman.h
@@ -35,30 +35,6 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
}
#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
-{
- pteval_t prot = 0;
-
- if (vm_flags & VM_ARM64_BTI)
- prot |= PTE_GP;
-
- /*
- * There are two conditions required for returning a Normal Tagged
- * memory type: (1) the user requested it via PROT_MTE passed to
- * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
- * register (1) as VM_MTE in the vma->vm_flags and (2) as
- * VM_MTE_ALLOWED. Note that the latter can only be set during the
- * mmap() call since mprotect() does not accept MAP_* flags.
- * Checking for VM_MTE only is sufficient since arch_validate_flags()
- * does not permit (VM_MTE & !VM_MTE_ALLOWED).
- */
- if (vm_flags & VM_MTE)
- prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
-
- return __pgprot(prot);
-}
-#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-
static inline bool arch_validate_prot(unsigned long prot,
unsigned long addr __always_unused)
{
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 7032f04c8ac6..d8ee0aa7886d 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -88,24 +88,6 @@ extern bool arm64_use_ng_mappings;
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_READONLY
-#define __P011 PAGE_READONLY
-#define __P100 PAGE_EXECONLY
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_READONLY_EXEC
-#define __P111 PAGE_READONLY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECONLY
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PGTABLE_PROT_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c4ba047a82d2..94e147e5456c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1017,17 +1017,6 @@ static inline bool arch_wants_old_prefaulted_pte(void)
}
#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
-static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
-{
- if (cpus_have_const_cap(ARM64_HAS_EPAN))
- return prot;
-
- if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
- return prot;
-
- return PAGE_READONLY_EXEC;
-}
-
static inline bool pud_sect_supported(void)
{
return PAGE_SIZE == SZ_4K;
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index a38f54cd638c..bd0233d376a2 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <asm/page.h>
+#include <asm/mman.h>
/*
* You really shouldn't be using read() or write() on /dev/mem. This might go
@@ -38,3 +39,80 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
}
+
+static inline pgprot_t __vm_get_page_prot(unsigned long vm_flags)
+{
+ switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
+ case VM_NONE:
+ return PAGE_NONE;
+ case VM_READ:
+ case VM_WRITE:
+ case VM_WRITE | VM_READ:
+ return PAGE_READONLY;
+ case VM_EXEC:
+ return PAGE_EXECONLY;
+ case VM_EXEC | VM_READ:
+ case VM_EXEC | VM_WRITE:
+ case VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_READONLY_EXEC;
+ case VM_SHARED:
+ return PAGE_NONE;
+ case VM_SHARED | VM_READ:
+ return PAGE_READONLY;
+ case VM_SHARED | VM_WRITE:
+ case VM_SHARED | VM_WRITE | VM_READ:
+ return PAGE_SHARED;
+ case VM_SHARED | VM_EXEC:
+ return PAGE_EXECONLY;
+ case VM_SHARED | VM_EXEC | VM_READ:
+ return PAGE_READONLY_EXEC;
+ case VM_SHARED | VM_EXEC | VM_WRITE:
+ case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
+ return PAGE_SHARED_EXEC;
+ default:
+ BUILD_BUG();
+ }
+}
+
+static pgprot_t arm64_arch_filter_pgprot(pgprot_t prot)
+{
+ if (cpus_have_const_cap(ARM64_HAS_EPAN))
+ return prot;
+
+ if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
+ return prot;
+
+ return PAGE_READONLY_EXEC;
+}
+
+static pgprot_t arm64_arch_vm_get_page_prot(unsigned long vm_flags)
+{
+ pteval_t prot = 0;
+
+ if (vm_flags & VM_ARM64_BTI)
+ prot |= PTE_GP;
+
+ /*
+ * There are two conditions required for returning a Normal Tagged
+ * memory type: (1) the user requested it via PROT_MTE passed to
+ * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
+ * register (1) as VM_MTE in the vma->vm_flags and (2) as
+ * VM_MTE_ALLOWED. Note that the latter can only be set during the
+ * mmap() call since mprotect() does not accept MAP_* flags.
+ * Checking for VM_MTE only is sufficient since arch_validate_flags()
+ * does not permit (VM_MTE & !VM_MTE_ALLOWED).
+ */
+ if (vm_flags & VM_MTE)
+ prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
+
+ return __pgprot(prot);
+}
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ pgprot_t ret = __pgprot(pgprot_val(__vm_get_page_prot(vm_flags)) |
+ pgprot_val(arm64_arch_vm_get_page_prot(vm_flags)));
+
+ return arm64_arch_filter_pgprot(ret);
+}
+EXPORT_SYMBOL(vm_get_page_prot);
--
2.25.1
On Mon, Feb 21, 2022 at 12:08:16PM +0530, Anshuman Khandual wrote:
> This defines and exports a platform specific custom vm_get_page_prot() via
> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
> macros can be dropped which are no longer needed.
>
> Cc: Thomas Bogendoerfer <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Signed-off-by: Anshuman Khandual <[email protected]>
> ---
> arch/mips/Kconfig | 1 +
> arch/mips/include/asm/pgtable.h | 22 ------------
> arch/mips/mm/cache.c | 60 +++++++++++++++++++--------------
> 3 files changed, 36 insertions(+), 47 deletions(-)
Acked-by: Thomas Bogendoerfer <[email protected]>
--
Crap can work. Given enough thrust pigs will fly, but it's not necessarily a
good idea. [ RFC1925, 2.3 ]
Hi Anshuman,
On Mon, Feb 21, 2022 at 9:45 AM Anshuman Khandual
<[email protected]> wrote:
> This defines and exports a platform specific custom vm_get_page_prot() via
> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
> macros can be dropped which are no longer needed.
>
> Cc: Thomas Bogendoerfer <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Signed-off-by: Anshuman Khandual <[email protected]>
Thanks for your patch!
> --- a/arch/m68k/mm/init.c
> +++ b/arch/m68k/mm/init.c
> @@ -128,3 +128,107 @@ void __init mem_init(void)
> memblock_free_all();
> init_pointer_tables();
> }
> +
> +#ifdef CONFIG_COLDFIRE
> +/*
> + * Page protections for initialising protection_map. See mm/mmap.c
> + * for use. In general, the bit positions are xwr, and P-items are
> + * private, the S-items are shared.
> + */
> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
Wouldn't it make more sense to add this to arch/m68k/mm/mcfmmu.c?
> +{
> + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
> + case VM_NONE:
> + return PAGE_NONE;
> + case VM_READ:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_READABLE);
> + case VM_WRITE:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_WRITABLE);
> + case VM_WRITE | VM_READ:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_READABLE | CF_PAGE_WRITABLE);
> + case VM_EXEC:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_EXEC);
> + case VM_EXEC | VM_READ:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_READABLE | CF_PAGE_EXEC);
> + case VM_EXEC | VM_WRITE:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_WRITABLE | CF_PAGE_EXEC);
> + case VM_EXEC | VM_WRITE | VM_READ:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_READABLE | CF_PAGE_WRITABLE |
> + CF_PAGE_EXEC);
> + case VM_SHARED:
> + return PAGE_NONE;
> + case VM_SHARED | VM_READ:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_READABLE);
This is the same as the plain VM_READ case.
Perhaps they can be merged?
> + case VM_SHARED | VM_WRITE:
> + return PAGE_SHARED;
> + case VM_SHARED | VM_WRITE | VM_READ:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_READABLE | CF_PAGE_SHARED);
> + case VM_SHARED | VM_EXEC:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_EXEC);
Same as plain VM_EXEC.
> + case VM_SHARED | VM_EXEC | VM_READ:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_READABLE | CF_PAGE_EXEC);
Same as plain VM_EXEC | VM_READ.
> + case VM_SHARED | VM_EXEC | VM_WRITE:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_SHARED | CF_PAGE_EXEC);
> + case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> + CF_PAGE_READABLE | CF_PAGE_SHARED |
> + CF_PAGE_EXEC);
> + default:
> + BUILD_BUG();
> + }
> +}
> +#endif
> +
> +#ifdef CONFIG_SUN3
> +/*
> + * Page protections for initialising protection_map. The sun3 has only two
> + * protection settings, valid (implying read and execute) and writeable. These
> + * are as close as we can get...
> + */
> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
Wouldn't it make more sense to add this to arch/m68k/mm/sun3mmu.c?
> +{
> + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
> + case VM_NONE:
> + return PAGE_NONE;
> + case VM_READ:
> + return PAGE_READONLY;
> + case VM_WRITE:
> + case VM_WRITE | VM_READ:
So you did merge some of them...
> + return PAGE_COPY;
> + case VM_EXEC:
> + case VM_EXEC | VM_READ:
> + return PAGE_READONLY;
But not all? More below...
> + case VM_EXEC | VM_WRITE:
> + case VM_EXEC | VM_WRITE | VM_READ:
> + return PAGE_COPY;
> + case VM_SHARED:
> + return PAGE_NONE;
> + case VM_SHARED | VM_READ:
> + return PAGE_READONLY;
> + case VM_SHARED | VM_WRITE:
> + case VM_SHARED | VM_WRITE | VM_READ:
> + return PAGE_SHARED;
> + case VM_SHARED | VM_EXEC:
> + case VM_SHARED | VM_EXEC | VM_READ:
> + return PAGE_READONLY;
> + case VM_SHARED | VM_EXEC | VM_WRITE:
> + case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
> + return PAGE_SHARED;
> + default:
> + BUILD_BUG();
> + }
> +}
> +#endif
> +EXPORT_SYMBOL(vm_get_page_prot);
> diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
> index ecbe948f4c1a..495ba0ea083c 100644
> --- a/arch/m68k/mm/motorola.c
> +++ b/arch/m68k/mm/motorola.c
> @@ -400,12 +400,9 @@ void __init paging_init(void)
>
> /* Fix the cache mode in the page descriptors for the 680[46]0. */
> if (CPU_IS_040_OR_060) {
> - int i;
> #ifndef mm_cachebits
> mm_cachebits = _PAGE_CACHE040;
> #endif
> - for (i = 0; i < 16; i++)
> - pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
> }
>
> min_addr = m68k_memory[0].addr;
> @@ -483,3 +480,48 @@ void __init paging_init(void)
> max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
> free_area_init(max_zone_pfn);
> }
> +
> +/*
> + * The m68k can't do page protection for execute, and considers that
> + * the same are read. Also, write permissions imply read permissions.
> + * This is the closest we can get..
> + */
> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
Good, this one is in arch/m68k/mm/motorola.c :-)
> +{
> + unsigned long cachebits = 0;
> +
> + if (CPU_IS_040_OR_060)
> + cachebits = _PAGE_CACHE040;
If you would use the non-"_C"-variants (e.g. PAGE_NONE instead of
PAGE_NONE_C) below, you would get the cachebits handling for free!
After that, the "_C" variants are no longer used, and can be removed.
Cfr. arch/m68k/include/asm/motorola_pgtable.h:
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED |
mm_cachebits)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED |
mm_cachebits)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RONLY |
_PAGE_ACCESSED | mm_cachebits)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY |
_PAGE_ACCESSED | mm_cachebits)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY |
_PAGE_ACCESSED | mm_cachebits)
/* Alternate definitions that are compile time constants, for
initializing protection_map. The cachebits are fixed later. */
#define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY |
_PAGE_ACCESSED)
#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY |
_PAGE_ACCESSED)
BTW, this shows you left a reference in a comment to the now-gone
"protection_map". There are several more across the tree.
> +
> + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
> + case VM_NONE:
> + return __pgprot(pgprot_val(PAGE_NONE_C) | cachebits);
> + case VM_READ:
> + return __pgprot(pgprot_val(PAGE_READONLY_C) | cachebits);
> + case VM_WRITE:
> + case VM_WRITE | VM_READ:
> + return __pgprot(pgprot_val(PAGE_COPY_C) | cachebits);
> + case VM_EXEC:
> + case VM_EXEC | VM_READ:
> + return __pgprot(pgprot_val(PAGE_READONLY_C) | cachebits);
> + case VM_EXEC | VM_WRITE:
> + case VM_EXEC | VM_WRITE | VM_READ:
> + return __pgprot(pgprot_val(PAGE_COPY_C) | cachebits);
> + case VM_SHARED:
> + return __pgprot(pgprot_val(PAGE_NONE_C) | cachebits);
Same as the VM_NONE case. More to be merged below...
> + case VM_SHARED | VM_READ:
> + return __pgprot(pgprot_val(PAGE_READONLY_C) | cachebits);
> + case VM_SHARED | VM_WRITE:
> + case VM_SHARED | VM_WRITE | VM_READ:
> + return __pgprot(pgprot_val(PAGE_SHARED_C) | cachebits);
> + case VM_SHARED | VM_EXEC:
> + case VM_SHARED | VM_EXEC | VM_READ:
> + return __pgprot(pgprot_val(PAGE_READONLY_C) | cachebits);
> + case VM_SHARED | VM_EXEC | VM_WRITE:
> + case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
> + return __pgprot(pgprot_val(PAGE_SHARED_C) | cachebits);
> + default:
> + BUILD_BUG();
> + }
> +}
> +EXPORT_SYMBOL(vm_get_page_prot);
Gr{oetje,eeting}s,
Geert
--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- [email protected]
In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds
On 2/21/22 5:24 PM, Geert Uytterhoeven wrote:
> Hi Anshuman,
>
> On Mon, Feb 21, 2022 at 9:45 AM Anshuman Khandual
> <[email protected]> wrote:
>> This defines and exports a platform specific custom vm_get_page_prot() via
>> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
>> macros can be dropped which are no longer needed.
>>
>> Cc: Thomas Bogendoerfer <[email protected]>
>> Cc: [email protected]
>> Cc: [email protected]
>> Signed-off-by: Anshuman Khandual <[email protected]>
>
> Thanks for your patch!
>
>> --- a/arch/m68k/mm/init.c
>> +++ b/arch/m68k/mm/init.c
>> @@ -128,3 +128,107 @@ void __init mem_init(void)
>> memblock_free_all();
>> init_pointer_tables();
>> }
>> +
>> +#ifdef CONFIG_COLDFIRE
>> +/*
>> + * Page protections for initialising protection_map. See mm/mmap.c
>> + * for use. In general, the bit positions are xwr, and P-items are
>> + * private, the S-items are shared.
>> + */
>> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
>
> Wouldn't it make more sense to add this to arch/m68k/mm/mcfmmu.c?
Sure, will move (#ifdef CONFIG_COLDFIRE will not be required anymore).
>
>> +{
>> + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
>> + case VM_NONE:
>> + return PAGE_NONE;
>> + case VM_READ:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_READABLE);
>> + case VM_WRITE:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_WRITABLE);
>> + case VM_WRITE | VM_READ:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_READABLE | CF_PAGE_WRITABLE);
>> + case VM_EXEC:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_EXEC);
>> + case VM_EXEC | VM_READ:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_READABLE | CF_PAGE_EXEC);
>> + case VM_EXEC | VM_WRITE:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_WRITABLE | CF_PAGE_EXEC);
>> + case VM_EXEC | VM_WRITE | VM_READ:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_READABLE | CF_PAGE_WRITABLE |
>> + CF_PAGE_EXEC);
>> + case VM_SHARED:
>> + return PAGE_NONE;
>> + case VM_SHARED | VM_READ:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_READABLE);
>
> This is the same as the plain VM_READ case.
> Perhaps they can be merged?
IMHO, it is worth preserving the existing switch case sequence as vm_flags
moves linearly from VM_NONE to (VM_SHARED|VM_EXEC|VM_WRITE|VM_READ). This
proposal did not attempt to further optimize any common page prot values
for various vm_flags combinations even on other platforms.
>
>> + case VM_SHARED | VM_WRITE:
>> + return PAGE_SHARED;
>> + case VM_SHARED | VM_WRITE | VM_READ:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_READABLE | CF_PAGE_SHARED);
>> + case VM_SHARED | VM_EXEC:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_EXEC);
>
> Same as plain VM_EXEC.
>
>> + case VM_SHARED | VM_EXEC | VM_READ:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_READABLE | CF_PAGE_EXEC);
>
> Same as plain VM_EXEC | VM_READ.
>
>> + case VM_SHARED | VM_EXEC | VM_WRITE:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_SHARED | CF_PAGE_EXEC);
>> + case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
>> + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
>> + CF_PAGE_READABLE | CF_PAGE_SHARED |
>> + CF_PAGE_EXEC);
>> + default:
>> + BUILD_BUG();
>> + }
>> +}
>> +#endif
>> +
>> +#ifdef CONFIG_SUN3
>> +/*
>> + * Page protections for initialising protection_map. The sun3 has only two
>> + * protection settings, valid (implying read and execute) and writeable. These
>> + * are as close as we can get...
>> + */
>> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
>
> Wouldn't it make more sense to add this to arch/m68k/mm/sun3mmu.c?
Sure, will move (#ifdef CONFIG_SUN3 will not be required anymore).
>
>> +{
>> + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
>> + case VM_NONE:
>> + return PAGE_NONE;
>> + case VM_READ:
>> + return PAGE_READONLY;
>> + case VM_WRITE:
>> + case VM_WRITE | VM_READ:
>
> So you did merge some of them...
Only when they follow vm_flags linear sequence.
>
>> + return PAGE_COPY;
>> + case VM_EXEC:
>> + case VM_EXEC | VM_READ:
>> + return PAGE_READONLY;
>
> But not all? More below...
Right, because did not want to shuffle up vm_flags linear sequence.
>
>> + case VM_EXEC | VM_WRITE:
>> + case VM_EXEC | VM_WRITE | VM_READ:
>> + return PAGE_COPY;
>> + case VM_SHARED:
>> + return PAGE_NONE;
>> + case VM_SHARED | VM_READ:
>> + return PAGE_READONLY;
>> + case VM_SHARED | VM_WRITE:
>> + case VM_SHARED | VM_WRITE | VM_READ:
>> + return PAGE_SHARED;
>> + case VM_SHARED | VM_EXEC:
>> + case VM_SHARED | VM_EXEC | VM_READ:
>> + return PAGE_READONLY;
>> + case VM_SHARED | VM_EXEC | VM_WRITE:
>> + case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
>> + return PAGE_SHARED;
>> + default:
>> + BUILD_BUG();
>> + }
>> +}
>> +#endif
>> +EXPORT_SYMBOL(vm_get_page_prot);
>> diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
>> index ecbe948f4c1a..495ba0ea083c 100644
>> --- a/arch/m68k/mm/motorola.c
>> +++ b/arch/m68k/mm/motorola.c
>> @@ -400,12 +400,9 @@ void __init paging_init(void)
>>
>> /* Fix the cache mode in the page descriptors for the 680[46]0. */
>> if (CPU_IS_040_OR_060) {
>> - int i;
>> #ifndef mm_cachebits
>> mm_cachebits = _PAGE_CACHE040;
>> #endif
>> - for (i = 0; i < 16; i++)
>> - pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
>> }
>>
>> min_addr = m68k_memory[0].addr;
>> @@ -483,3 +480,48 @@ void __init paging_init(void)
>> max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
>> free_area_init(max_zone_pfn);
>> }
>> +
>> +/*
>> + * The m68k can't do page protection for execute, and considers that
>> + * the same are read. Also, write permissions imply read permissions.
>> + * This is the closest we can get..
>> + */
>> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
>
> Good, this one is in arch/m68k/mm/motorola.c :-)
>
>> +{
>> + unsigned long cachebits = 0;
>> +
>> + if (CPU_IS_040_OR_060)
>> + cachebits = _PAGE_CACHE040;
>
> If you would use the non-"_C"-variants (e.g. PAGE_NONE instead of
> PAGE_NONE_C) below, you would get the cachebits handling for free!
> After that, the "_C" variants are no longer used, and can be removed.
> Cfr. arch/m68k/include/asm/motorola_pgtable.h:
Right.
>
> #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED |
> mm_cachebits)
> #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED |
> mm_cachebits)
> #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RONLY |
> _PAGE_ACCESSED | mm_cachebits)
> #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY |
> _PAGE_ACCESSED | mm_cachebits)
> #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY |
> _PAGE_ACCESSED | mm_cachebits)
>
> /* Alternate definitions that are compile time constants, for
> initializing protection_map. The cachebits are fixed later. */
> #define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
> #define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
> #define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY |
> _PAGE_ACCESSED)
> #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY |
> _PAGE_ACCESSED)
Will drop all _C definitions and change switch case as mentioned above.
>
> BTW, this shows you left a reference in a comment to the now-gone
> "protection_map". There are several more across the tree.
Right, will remove them all.
>
>> +
>> + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
>> + case VM_NONE:
>> + return __pgprot(pgprot_val(PAGE_NONE_C) | cachebits);
>> + case VM_READ:
>> + return __pgprot(pgprot_val(PAGE_READONLY_C) | cachebits);
>> + case VM_WRITE:
>> + case VM_WRITE | VM_READ:
>> + return __pgprot(pgprot_val(PAGE_COPY_C) | cachebits);
>> + case VM_EXEC:
>> + case VM_EXEC | VM_READ:
>> + return __pgprot(pgprot_val(PAGE_READONLY_C) | cachebits);
>> + case VM_EXEC | VM_WRITE:
>> + case VM_EXEC | VM_WRITE | VM_READ:
>> + return __pgprot(pgprot_val(PAGE_COPY_C) | cachebits);
>> + case VM_SHARED:
>> + return __pgprot(pgprot_val(PAGE_NONE_C) | cachebits);
>
> Same as the VM_NONE case. More to be merged below...
As explained earlier.
On 2/21/22 00:38, Anshuman Khandual wrote:
> This defines and exports a platform specific custom vm_get_page_prot() via
> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
> macros can be dropped which are no longer needed.
>
> Cc: Dinh Nguyen <[email protected]>
> Cc: [email protected]
> Signed-off-by: Anshuman Khandual <[email protected]>
> Acked-by: Dinh Nguyen <[email protected]>
> ---
> arch/nios2/Kconfig | 1 +
> arch/nios2/include/asm/pgtable.h | 16 ------------
> arch/nios2/mm/init.c | 45 ++++++++++++++++++++++++++++++++
> 3 files changed, 46 insertions(+), 16 deletions(-)
>
> diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
> index 33fd06f5fa41..85a58a357a3b 100644
> --- a/arch/nios2/Kconfig
> +++ b/arch/nios2/Kconfig
> @@ -6,6 +6,7 @@ config NIOS2
> select ARCH_HAS_SYNC_DMA_FOR_CPU
> select ARCH_HAS_SYNC_DMA_FOR_DEVICE
> select ARCH_HAS_DMA_SET_UNCACHED
> + select ARCH_HAS_VM_GET_PAGE_PROT
> select ARCH_NO_SWAP
> select COMMON_CLK
> select TIMER_OF
> diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
> index 4a995fa628ee..2678dad58a63 100644
> --- a/arch/nios2/include/asm/pgtable.h
> +++ b/arch/nios2/include/asm/pgtable.h
> @@ -40,24 +40,8 @@ struct mm_struct;
> */
>
> /* Remove W bit on private pages for COW support */
> -#define __P000 MKP(0, 0, 0)
> -#define __P001 MKP(0, 0, 1)
> -#define __P010 MKP(0, 0, 0) /* COW */
> -#define __P011 MKP(0, 0, 1) /* COW */
> -#define __P100 MKP(1, 0, 0)
> -#define __P101 MKP(1, 0, 1)
> -#define __P110 MKP(1, 0, 0) /* COW */
> -#define __P111 MKP(1, 0, 1) /* COW */
>
> /* Shared pages can have exact HW mapping */
> -#define __S000 MKP(0, 0, 0)
> -#define __S001 MKP(0, 0, 1)
> -#define __S010 MKP(0, 1, 0)
> -#define __S011 MKP(0, 1, 1)
> -#define __S100 MKP(1, 0, 0)
> -#define __S101 MKP(1, 0, 1)
> -#define __S110 MKP(1, 1, 0)
> -#define __S111 MKP(1, 1, 1)
>
> /* Used all over the kernel */
> #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
> diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
> index 613fcaa5988a..311b2146a248 100644
> --- a/arch/nios2/mm/init.c
> +++ b/arch/nios2/mm/init.c
> @@ -124,3 +124,48 @@ const char *arch_vma_name(struct vm_area_struct *vma)
> {
> return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
> }
> +
> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
> +{
> + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
> + case VM_NONE:
> + return MKP(0, 0, 0);
> + case VM_READ:
> + return MKP(0, 0, 1);
> + /* COW */
> + case VM_WRITE:
> + return MKP(0, 0, 0);
> + /* COW */
> + case VM_WRITE | VM_READ:
> + return MKP(0, 0, 1);
> + case VM_EXEC:
> + return MKP(1, 0, 0);
> + case VM_EXEC | VM_READ:
> + return MKP(1, 0, 1);
> + /* COW */
> + case VM_EXEC | VM_WRITE:
> + return MKP(1, 0, 0);
> + /* COW */
> + case VM_EXEC | VM_WRITE | VM_READ:
> + return MKP(1, 0, 1);
> + case VM_SHARED:
> + return MKP(0, 0, 0);
> + case VM_SHARED | VM_READ:
> + return MKP(0, 0, 1);
> + case VM_SHARED | VM_WRITE:
> + return MKP(0, 1, 0);
> + case VM_SHARED | VM_WRITE | VM_READ:
> + return MKP(0, 1, 1);
> + case VM_SHARED | VM_EXEC:
> + return MKP(1, 0, 0);
> + case VM_SHARED | VM_EXEC | VM_READ:
> + return MKP(1, 0, 1);
> + case VM_SHARED | VM_EXEC | VM_WRITE:
> + return MKP(1, 1, 0);
> + case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
> + return MKP(1, 1, 1);
> + default:
> + BUILD_BUG();
> + }
> +}
> +EXPORT_SYMBOL(vm_get_page_prot);
Applied!
Thanks,
Dinh
On Mon, Feb 21, 2022 at 12:08:23PM +0530, Anshuman Khandual wrote:
> This defines and exports a platform specific custom vm_get_page_prot() via
> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
> macros can be dropped which are no longer needed.
>
> Cc: Heiko Carstens <[email protected]>
> Cc: Vasily Gorbik <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Signed-off-by: Anshuman Khandual <[email protected]>
Acked-by: Alexander Gordeev <[email protected]>
> ---
> arch/s390/Kconfig | 1 +
> arch/s390/include/asm/pgtable.h | 17 -----------------
> arch/s390/mm/mmap.c | 33 +++++++++++++++++++++++++++++++++
> 3 files changed, 34 insertions(+), 17 deletions(-)
>
> diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
> index be9f39fd06df..cb1b487e8201 100644
> --- a/arch/s390/Kconfig
> +++ b/arch/s390/Kconfig
> @@ -78,6 +78,7 @@ config S390
> select ARCH_HAS_SYSCALL_WRAPPER
> select ARCH_HAS_UBSAN_SANITIZE_ALL
> select ARCH_HAS_VDSO_DATA
> + select ARCH_HAS_VM_GET_PAGE_PROT
> select ARCH_HAVE_NMI_SAFE_CMPXCHG
> select ARCH_INLINE_READ_LOCK
> select ARCH_INLINE_READ_LOCK_BH
> diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
> index 008a6c856fa4..3893ef64b439 100644
> --- a/arch/s390/include/asm/pgtable.h
> +++ b/arch/s390/include/asm/pgtable.h
> @@ -422,23 +422,6 @@ static inline int is_module_addr(void *addr)
> * implies read permission.
> */
> /*xwr*/
> -#define __P000 PAGE_NONE
> -#define __P001 PAGE_RO
> -#define __P010 PAGE_RO
> -#define __P011 PAGE_RO
> -#define __P100 PAGE_RX
> -#define __P101 PAGE_RX
> -#define __P110 PAGE_RX
> -#define __P111 PAGE_RX
> -
> -#define __S000 PAGE_NONE
> -#define __S001 PAGE_RO
> -#define __S010 PAGE_RW
> -#define __S011 PAGE_RW
> -#define __S100 PAGE_RX
> -#define __S101 PAGE_RX
> -#define __S110 PAGE_RWX
> -#define __S111 PAGE_RWX
>
> /*
> * Segment entry (large page) protection definitions.
> diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
> index e54f928503c5..e99c198aa5de 100644
> --- a/arch/s390/mm/mmap.c
> +++ b/arch/s390/mm/mmap.c
> @@ -188,3 +188,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
> mm->get_unmapped_area = arch_get_unmapped_area_topdown;
> }
> }
> +
> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
> +{
> + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
> + case VM_NONE:
> + return PAGE_NONE;
> + case VM_READ:
> + case VM_WRITE:
> + case VM_WRITE | VM_READ:
> + return PAGE_RO;
> + case VM_EXEC:
> + case VM_EXEC | VM_READ:
> + case VM_EXEC | VM_WRITE:
> + case VM_EXEC | VM_WRITE | VM_READ:
> + return PAGE_RX;
> + case VM_SHARED:
> + return PAGE_NONE;
> + case VM_SHARED | VM_READ:
> + return PAGE_RO;
> + case VM_SHARED | VM_WRITE:
> + case VM_SHARED | VM_WRITE | VM_READ:
> + return PAGE_RW;
> + case VM_SHARED | VM_EXEC:
> + case VM_SHARED | VM_EXEC | VM_READ:
> + return PAGE_RX;
> + case VM_SHARED | VM_EXEC | VM_WRITE:
> + case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
> + return PAGE_RWX;
> + default:
> + BUILD_BUG();
> + }
> +}
> +EXPORT_SYMBOL(vm_get_page_prot);
> --
> 2.25.1
>
On 2/25/22 2:32 PM, Geert Uytterhoeven wrote:
> Hi Anshuman, Andrew,
>
> On Mon, Feb 21, 2022 at 12:54 PM Geert Uytterhoeven
> <[email protected]> wrote:
>> On Mon, Feb 21, 2022 at 9:45 AM Anshuman Khandual
>> <[email protected]> wrote:
>>> This defines and exports a platform specific custom vm_get_page_prot() via
>>> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
>>> macros can be dropped which are no longer needed.
>>>
>>> Cc: Thomas Bogendoerfer <[email protected]>
>>> Cc: [email protected]
>>> Cc: [email protected]
>>> Signed-off-by: Anshuman Khandual <[email protected]>
>>
>> Thanks for your patch!
>>
>>> --- a/arch/m68k/mm/init.c
>>> +++ b/arch/m68k/mm/init.c
>>> @@ -128,3 +128,107 @@ void __init mem_init(void)
>>> memblock_free_all();
>>> init_pointer_tables();
>>> }
>>> +
>>> +#ifdef CONFIG_COLDFIRE
>>> +/*
>>> + * Page protections for initialising protection_map. See mm/mmap.c
>>> + * for use. In general, the bit positions are xwr, and P-items are
>>> + * private, the S-items are shared.
>>> + */
>>> +pgprot_t vm_get_page_prot(unsigned long vm_flags)
>>
>> Wouldn't it make more sense to add this to arch/m68k/mm/mcfmmu.c?
>
> It's not just about sense, but also about correctness.
> The CF_PAGE_* definitions below exist only if CONFIG_MMU=y,
> thus causing breakage for cfnommu in today's linux-next.
> http://kisskb.ellerman.id.au/kisskb/buildresult/14701640/
As mentioned before, will do all these necessary changes in the next
version probably sometime earlier next week. I was waiting for other
reviews (if any) till now.
Hi Anshuman, Andrew,
On Mon, Feb 21, 2022 at 12:54 PM Geert Uytterhoeven
<[email protected]> wrote:
> On Mon, Feb 21, 2022 at 9:45 AM Anshuman Khandual
> <[email protected]> wrote:
> > This defines and exports a platform specific custom vm_get_page_prot() via
> > subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX
> > macros can be dropped which are no longer needed.
> >
> > Cc: Thomas Bogendoerfer <[email protected]>
> > Cc: [email protected]
> > Cc: [email protected]
> > Signed-off-by: Anshuman Khandual <[email protected]>
>
> Thanks for your patch!
>
> > --- a/arch/m68k/mm/init.c
> > +++ b/arch/m68k/mm/init.c
> > @@ -128,3 +128,107 @@ void __init mem_init(void)
> > memblock_free_all();
> > init_pointer_tables();
> > }
> > +
> > +#ifdef CONFIG_COLDFIRE
> > +/*
> > + * Page protections for initialising protection_map. See mm/mmap.c
> > + * for use. In general, the bit positions are xwr, and P-items are
> > + * private, the S-items are shared.
> > + */
> > +pgprot_t vm_get_page_prot(unsigned long vm_flags)
>
> Wouldn't it make more sense to add this to arch/m68k/mm/mcfmmu.c?
It's not just about sense, but also about correctness.
The CF_PAGE_* definitions below exist only if CONFIG_MMU=y,
thus causing breakage for cfnommu in today's linux-next.
http://kisskb.ellerman.id.au/kisskb/buildresult/14701640/
>
> > +{
> > + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
> > + case VM_NONE:
> > + return PAGE_NONE;
> > + case VM_READ:
> > + return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> > + CF_PAGE_READABLE);
> > + BUILD_BUG();
> > + }
> > +}
> > +#endif
> > +EXPORT_SYMBOL(vm_get_page_prot);
Having this outside the #ifdef means we now get ...
> > --- a/arch/m68k/mm/motorola.c
> > +++ b/arch/m68k/mm/motorola.c
> > +}
> > +EXPORT_SYMBOL(vm_get_page_prot);
... two of them in normal m68k builds.
Gr{oetje,eeting}s,
Geert
--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- [email protected]
In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds
Hi Anshuman,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on hnaz-mm/master]
url: https://github.com/0day-ci/linux/commits/Anshuman-Khandual/mm-mmap-Drop-protection_map-and-platform-s-__SXXX-__PXXX-requirements/20220221-144133
base: https://github.com/hnaz/linux-mm master
config: m68k-randconfig-r033-20220221 (https://download.01.org/0day-ci/archive/20220221/[email protected]/config)
compiler: m68k-linux-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/e75c29d8b212cfab904914acdd5a027fb15d2f16
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Anshuman-Khandual/mm-mmap-Drop-protection_map-and-platform-s-__SXXX-__PXXX-requirements/20220221-144133
git checkout e75c29d8b212cfab904914acdd5a027fb15d2f16
# save the config file to linux build tree
mkdir build_dir
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross O=build_dir ARCH=m68k SHELL=/bin/bash arch/m68k/mm/
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>
All errors (new ones prefixed by >>):
>> arch/m68k/mm/init.c:138:10: error: redefinition of 'vm_get_page_prot'
138 | pgprot_t vm_get_page_prot(unsigned long vm_flags)
| ^~~~~~~~~~~~~~~~
In file included from arch/m68k/mm/init.c:14:
include/linux/mm.h:2801:24: note: previous definition of 'vm_get_page_prot' with type 'pgprot_t(long unsigned int)'
2801 | static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
| ^~~~~~~~~~~~~~~~
In file included from arch/m68k/include/asm/thread_info.h:6,
from include/linux/thread_info.h:60,
from include/asm-generic/preempt.h:5,
from ./arch/m68k/include/generated/asm/preempt.h:1,
from include/linux/preempt.h:78,
from arch/m68k/include/asm/irqflags.h:6,
from include/linux/irqflags.h:16,
from arch/m68k/include/asm/atomic.h:6,
from include/linux/atomic.h:7,
from include/linux/mm_types_task.h:13,
from include/linux/mm_types.h:5,
from include/linux/buildid.h:5,
from include/linux/module.h:14,
from arch/m68k/mm/init.c:11:
arch/m68k/mm/init.c: In function 'vm_get_page_prot':
>> arch/m68k/mm/init.c:144:33: error: 'CF_PAGE_VALID' undeclared (first use in this function)
144 | return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
| ^~~~~~~~~~~~~
arch/m68k/include/asm/page.h:51:40: note: in definition of macro '__pgprot'
51 | #define __pgprot(x) ((pgprot_t) { (x) } )
| ^
arch/m68k/mm/init.c:144:33: note: each undeclared identifier is reported only once for each function it appears in
144 | return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
| ^~~~~~~~~~~~~
arch/m68k/include/asm/page.h:51:40: note: in definition of macro '__pgprot'
51 | #define __pgprot(x) ((pgprot_t) { (x) } )
| ^
>> arch/m68k/mm/init.c:144:49: error: 'CF_PAGE_ACCESSED' undeclared (first use in this function); did you mean 'FGP_ACCESSED'?
144 | return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
| ^~~~~~~~~~~~~~~~
arch/m68k/include/asm/page.h:51:40: note: in definition of macro '__pgprot'
51 | #define __pgprot(x) ((pgprot_t) { (x) } )
| ^
>> arch/m68k/mm/init.c:145:33: error: 'CF_PAGE_READABLE' undeclared (first use in this function); did you mean 'PAGE_READONLY'?
145 | CF_PAGE_READABLE);
| ^~~~~~~~~~~~~~~~
arch/m68k/include/asm/page.h:51:40: note: in definition of macro '__pgprot'
51 | #define __pgprot(x) ((pgprot_t) { (x) } )
| ^
>> arch/m68k/mm/init.c:148:33: error: 'CF_PAGE_WRITABLE' undeclared (first use in this function); did you mean 'NR_PAGETABLE'?
148 | CF_PAGE_WRITABLE);
| ^~~~~~~~~~~~~~~~
arch/m68k/include/asm/page.h:51:40: note: in definition of macro '__pgprot'
51 | #define __pgprot(x) ((pgprot_t) { (x) } )
| ^
>> arch/m68k/mm/init.c:154:33: error: 'CF_PAGE_EXEC' undeclared (first use in this function)
154 | CF_PAGE_EXEC);
| ^~~~~~~~~~~~
arch/m68k/include/asm/page.h:51:40: note: in definition of macro '__pgprot'
51 | #define __pgprot(x) ((pgprot_t) { (x) } )
| ^
>> arch/m68k/mm/init.c:174:52: error: 'CF_PAGE_SHARED' undeclared (first use in this function); did you mean 'PAGE_SHARED'?
174 | CF_PAGE_READABLE | CF_PAGE_SHARED);
| ^~~~~~~~~~~~~~
arch/m68k/include/asm/page.h:51:40: note: in definition of macro '__pgprot'
51 | #define __pgprot(x) ((pgprot_t) { (x) } )
| ^
vim +/vm_get_page_prot +138 arch/m68k/mm/init.c
> 11 #include <linux/module.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/swap.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/init.h>
20 #include <linux/memblock.h>
21 #include <linux/gfp.h>
22
23 #include <asm/setup.h>
24 #include <linux/uaccess.h>
25 #include <asm/page.h>
26 #include <asm/pgalloc.h>
27 #include <asm/traps.h>
28 #include <asm/machdep.h>
29 #include <asm/io.h>
30 #ifdef CONFIG_ATARI
31 #include <asm/atari_stram.h>
32 #endif
33 #include <asm/sections.h>
34 #include <asm/tlb.h>
35
36 /*
37 * ZERO_PAGE is a special page that is used for zero-initialized
38 * data and COW.
39 */
40 void *empty_zero_page;
41 EXPORT_SYMBOL(empty_zero_page);
42
43 #ifdef CONFIG_MMU
44
45 int m68k_virt_to_node_shift;
46
47 void __init m68k_setup_node(int node)
48 {
49 node_set_online(node);
50 }
51
52 #else /* CONFIG_MMU */
53
54 /*
55 * paging_init() continues the virtual memory environment setup which
56 * was begun by the code in arch/head.S.
57 * The parameters are pointers to where to stick the starting and ending
58 * addresses of available kernel virtual memory.
59 */
60 void __init paging_init(void)
61 {
62 /*
63 * Make sure start_mem is page aligned, otherwise bootmem and
64 * page_alloc get different views of the world.
65 */
66 unsigned long end_mem = memory_end & PAGE_MASK;
67 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
68
69 high_memory = (void *) end_mem;
70
71 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
72 if (!empty_zero_page)
73 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
74 __func__, PAGE_SIZE, PAGE_SIZE);
75 max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
76 free_area_init(max_zone_pfn);
77 }
78
79 #endif /* CONFIG_MMU */
80
81 void free_initmem(void)
82 {
83 #ifndef CONFIG_MMU_SUN3
84 free_initmem_default(-1);
85 #endif /* CONFIG_MMU_SUN3 */
86 }
87
88 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
89 #define VECTORS &vectors[0]
90 #else
91 #define VECTORS _ramvec
92 #endif
93
94 static inline void init_pointer_tables(void)
95 {
96 #if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
97 int i, j;
98
99 /* insert pointer tables allocated so far into the tablelist */
100 init_pointer_table(kernel_pg_dir, TABLE_PGD);
101 for (i = 0; i < PTRS_PER_PGD; i++) {
102 pud_t *pud = (pud_t *)&kernel_pg_dir[i];
103 pmd_t *pmd_dir;
104
105 if (!pud_present(*pud))
106 continue;
107
108 pmd_dir = (pmd_t *)pgd_page_vaddr(kernel_pg_dir[i]);
109 init_pointer_table(pmd_dir, TABLE_PMD);
110
111 for (j = 0; j < PTRS_PER_PMD; j++) {
112 pmd_t *pmd = &pmd_dir[j];
113 pte_t *pte_dir;
114
115 if (!pmd_present(*pmd))
116 continue;
117
118 pte_dir = (pte_t *)pmd_page_vaddr(*pmd);
119 init_pointer_table(pte_dir, TABLE_PTE);
120 }
121 }
122 #endif
123 }
124
125 void __init mem_init(void)
126 {
127 /* this will put all memory onto the freelists */
128 memblock_free_all();
129 init_pointer_tables();
130 }
131
132 #ifdef CONFIG_COLDFIRE
133 /*
134 * Page protections for initialising protection_map. See mm/mmap.c
135 * for use. In general, the bit positions are xwr, and P-items are
136 * private, the S-items are shared.
137 */
> 138 pgprot_t vm_get_page_prot(unsigned long vm_flags)
139 {
140 switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) {
141 case VM_NONE:
142 return PAGE_NONE;
143 case VM_READ:
> 144 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> 145 CF_PAGE_READABLE);
146 case VM_WRITE:
147 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> 148 CF_PAGE_WRITABLE);
149 case VM_WRITE | VM_READ:
150 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
151 CF_PAGE_READABLE | CF_PAGE_WRITABLE);
152 case VM_EXEC:
153 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> 154 CF_PAGE_EXEC);
155 case VM_EXEC | VM_READ:
156 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
157 CF_PAGE_READABLE | CF_PAGE_EXEC);
158 case VM_EXEC | VM_WRITE:
159 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
160 CF_PAGE_WRITABLE | CF_PAGE_EXEC);
161 case VM_EXEC | VM_WRITE | VM_READ:
162 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
163 CF_PAGE_READABLE | CF_PAGE_WRITABLE |
164 CF_PAGE_EXEC);
165 case VM_SHARED:
166 return PAGE_NONE;
167 case VM_SHARED | VM_READ:
168 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
169 CF_PAGE_READABLE);
170 case VM_SHARED | VM_WRITE:
171 return PAGE_SHARED;
172 case VM_SHARED | VM_WRITE | VM_READ:
173 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
> 174 CF_PAGE_READABLE | CF_PAGE_SHARED);
175 case VM_SHARED | VM_EXEC:
176 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
177 CF_PAGE_EXEC);
178 case VM_SHARED | VM_EXEC | VM_READ:
179 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
180 CF_PAGE_READABLE | CF_PAGE_EXEC);
181 case VM_SHARED | VM_EXEC | VM_WRITE:
182 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
183 CF_PAGE_SHARED | CF_PAGE_EXEC);
184 case VM_SHARED | VM_EXEC | VM_WRITE | VM_READ:
185 return __pgprot(CF_PAGE_VALID | CF_PAGE_ACCESSED |
186 CF_PAGE_READABLE | CF_PAGE_SHARED |
187 CF_PAGE_EXEC);
188 default:
189 BUILD_BUG();
190 }
191 }
192 #endif
193
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]