The set_memory_{ro/rw/nx/x}() functions are required for STRICT_MODULE_RWX,
and are generally useful primitives to have. This implementation is
designed to be completely generic across powerpc's many MMUs.
It's possible that this could be optimised to be faster for specific
MMUs, but the focus is on having a generic and safe implementation for
now.
This implementation does not handle cases where the caller is attempting
to change the mapping of the page it is executing from, or if another
CPU is concurrently using the page being altered. These cases likely
shouldn't happen, but a more complex implementation with MMU-specific code
could safely handle them, so that is left as a TODO for now.
Signed-off-by: Russell Currey <[email protected]>
Signed-off-by: Christophe Leroy <[email protected]>
---
v2:
- use integers instead of pointers for action
- drop action check, nobody should call change_memory_attr() directly.
Should it happen, the function will just do nothing.
- Renamed confusing 'pte_val' var to 'pte' as pte_val() is already a function.
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/set_memory.h | 32 ++++++++++++
arch/powerpc/mm/Makefile | 1 +
arch/powerpc/mm/pageattr.c | 74 +++++++++++++++++++++++++++
4 files changed, 108 insertions(+)
create mode 100644 arch/powerpc/include/asm/set_memory.h
create mode 100644 arch/powerpc/mm/pageattr.c
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a8fdb36a841e..ae6a27d07406 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -129,6 +129,7 @@ config PPC
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
+ select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
new file mode 100644
index 000000000000..c3621030a3fb
--- /dev/null
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SET_MEMORY_H
+#define _ASM_POWERPC_SET_MEMORY_H
+
+#define SET_MEMORY_RO 0
+#define SET_MEMORY_RW 1
+#define SET_MEMORY_NX 2
+#define SET_MEMORY_X 3
+
+int change_memory_attr(unsigned long addr, int numpages, int action);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_X);
+}
+
+#endif
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 5e147986400d..d0a0bcbc9289 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
obj-$(CONFIG_PPC_PTDUMP) += ptdump/
obj-$(CONFIG_KASAN) += kasan/
+obj-$(CONFIG_ARCH_HAS_SET_MEMORY) += pageattr.o
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
new file mode 100644
index 000000000000..0cb5294732b7
--- /dev/null
+++ b/arch/powerpc/mm/pageattr.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * MMU-generic set_memory implementation for powerpc
+ *
+ * Copyright 2019, IBM Corporation.
+ */
+
+#include <linux/mm.h>
+#include <linux/set_memory.h>
+
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+
+/*
+ * Updates the attributes of a page in three steps:
+ *
+ * 1. invalidate the page table entry
+ * 2. flush the TLB
+ * 3. install the new entry with the updated attributes
+ *
+ * This is unsafe if the caller is attempting to change the mapping of the
+ * page it is executing from, or if another CPU is concurrently using the
+ * page being altered.
+ *
+ * TODO make the implementation resistant to this.
+ */
+static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+ int action = (int)data;
+ pte_t pte;
+
+ spin_lock(&init_mm.page_table_lock);
+
+ /* invalidate the PTE so it's safe to modify */
+ pte = ptep_get_and_clear(&init_mm, addr, ptep);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ /* modify the PTE bits as desired, then apply */
+ switch (action) {
+ case SET_MEMORY_RO:
+ pte = pte_wrprotect(pte);
+ break;
+ case SET_MEMORY_RW:
+ pte = pte_mkwrite(pte);
+ break;
+ case SET_MEMORY_NX:
+ pte = pte_exprotect(pte);
+ break;
+ case SET_MEMORY_X:
+ pte = pte_mkexec(pte);
+ break;
+ default:
+ break;
+ }
+
+ set_pte_at(&init_mm, addr, ptep, pte);
+ spin_unlock(&init_mm.page_table_lock);
+
+ return 0;
+}
+
+int change_memory_attr(unsigned long addr, int numpages, int action)
+{
+ unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+ unsigned long sz = numpages * PAGE_SIZE;
+
+ if (!numpages)
+ return 0;
+
+ return apply_to_page_range(&init_mm, start, sz, change_page_attr, (void *)action);
+}
--
2.25.0
From: Russell Currey <[email protected]>
skiroot_defconfig is the only powerpc defconfig with STRICT_KERNEL_RWX
enabled, and if you want memory protection for kernel text you'd want it
for modules too, so enable STRICT_MODULE_RWX there.
Acked-by: Joel Stanley <[email protected]>
Signed-off-by: Russell Currey <[email protected]>
---
v2: no change
Signed-off-by: Christophe Leroy <[email protected]>
---
arch/powerpc/configs/skiroot_defconfig | 1 +
1 file changed, 1 insertion(+)
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 069f67f12731..b74358c3ede8 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -31,6 +31,7 @@ CONFIG_PERF_EVENTS=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_JUMP_LABEL=y
CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_STRICT_MODULE_RWX=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_SIG=y
--
2.25.0
Hi Christophe,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on powerpc/next]
[also build test ERROR on next-20200131]
[cannot apply to v5.5]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]
url: https://github.com/0day-ci/linux/commits/Christophe-Leroy/powerpc-mm-Implement-set_memory-routines/20200203-060234
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-ppc64_defconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 7.5.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
GCC_VERSION=7.5.0 make.cross ARCH=powerpc
If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <[email protected]>
All errors (new ones prefixed by >>):
arch/powerpc/mm/pageattr.c: In function 'change_page_attr':
>> arch/powerpc/mm/pageattr.c:32:15: error: cast from pointer to integer of different size [-Werror=pointer-to-int-cast]
int action = (int)data;
^
arch/powerpc/mm/pageattr.c: In function 'change_memory_attr':
>> arch/powerpc/mm/pageattr.c:73:68: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast]
return apply_to_page_range(&init_mm, start, sz, change_page_attr, (void *)action);
^
cc1: all warnings being treated as errors
vim +32 arch/powerpc/mm/pageattr.c
15
16
17 /*
18 * Updates the attributes of a page in three steps:
19 *
20 * 1. invalidate the page table entry
21 * 2. flush the TLB
22 * 3. install the new entry with the updated attributes
23 *
24 * This is unsafe if the caller is attempting to change the mapping of the
25 * page it is executing from, or if another CPU is concurrently using the
26 * page being altered.
27 *
28 * TODO make the implementation resistant to this.
29 */
30 static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
31 {
> 32 int action = (int)data;
33 pte_t pte;
34
35 spin_lock(&init_mm.page_table_lock);
36
37 /* invalidate the PTE so it's safe to modify */
38 pte = ptep_get_and_clear(&init_mm, addr, ptep);
39 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
40
41 /* modify the PTE bits as desired, then apply */
42 switch (action) {
43 case SET_MEMORY_RO:
44 pte = pte_wrprotect(pte);
45 break;
46 case SET_MEMORY_RW:
47 pte = pte_mkwrite(pte);
48 break;
49 case SET_MEMORY_NX:
50 pte = pte_exprotect(pte);
51 break;
52 case SET_MEMORY_X:
53 pte = pte_mkexec(pte);
54 break;
55 default:
56 break;
57 }
58
59 set_pte_at(&init_mm, addr, ptep, pte);
60 spin_unlock(&init_mm.page_table_lock);
61
62 return 0;
63 }
64
65 int change_memory_attr(unsigned long addr, int numpages, int action)
66 {
67 unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
68 unsigned long sz = numpages * PAGE_SIZE;
69
70 if (!numpages)
71 return 0;
72
> 73 return apply_to_page_range(&init_mm, start, sz, change_page_attr, (void *)action);
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/hyperkitty/list/[email protected] Intel Corporation