This implements CONFIG_DEBUG_RODATA, along with ftrace support. It is
intended to be used on top of Rabin Vincent's patch "arm: ftrace: work
with CONFIG_DEBUG_SET_MODULE_RONX", so that ftrace will work with both
config options.
Thanks!
-Kees
Adds CONFIG_ARM_KERNMEM_PERMS to separate the kernel memory regions
into section-sized areas that can have different permisions. Performs
the NX permission changes during free_initmem, so that init memory can be
reclaimed.
This uses section size instead of PMD size to reduce memory lost to
padding on non-LPAE systems.
Based on work by Brad Spengler, Larry Bassel, and Laura Abbott.
Signed-off-by: Kees Cook <[email protected]>
---
arch/arm/kernel/vmlinux.lds.S | 17 +++++++
arch/arm/mm/Kconfig | 9 ++++
arch/arm/mm/init.c | 106 +++++++++++++++++++++++++++++++++++++++++
arch/arm/mm/mmu.c | 13 ++++-
4 files changed, 144 insertions(+), 1 deletion(-)
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 7bcee5c9b604..08fa667ef2f1 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -8,6 +8,9 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+#include <asm/pgtable.h>
+#endif
#define PROC_INFO \
. = ALIGN(4); \
@@ -90,6 +93,11 @@ SECTIONS
_text = .;
HEAD_TEXT
}
+
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+ . = ALIGN(1<<SECTION_SHIFT);
+#endif
+
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@@ -145,7 +153,11 @@ SECTIONS
_etext = .; /* End of text and rodata section */
#ifndef CONFIG_XIP_KERNEL
+# ifdef CONFIG_ARM_KERNMEM_PERMS
+ . = ALIGN(1<<SECTION_SHIFT);
+# else
. = ALIGN(PAGE_SIZE);
+# endif
__init_begin = .;
#endif
/*
@@ -220,7 +232,12 @@ SECTIONS
. = PAGE_OFFSET + TEXT_OFFSET;
#else
__init_end = .;
+
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+ . = ALIGN(1<<SECTION_SHIFT);
+#else
. = ALIGN(THREAD_SIZE);
+#endif
__data_loc = .;
#endif
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index f5ad9ee70426..b484fb04cf6f 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -957,3 +957,12 @@ config ARCH_SUPPORTS_BIG_ENDIAN
help
This option specifies the architecture can support big endian
operation.
+
+config ARM_KERNMEM_PERMS
+ bool "Restrict kernel memory permissions"
+ help
+ If this is set, kernel memory other than kernel text (and rodata)
+ will be made non-executable. The tradeoff is that each region is
+ padded to section-size (1MiB) boundaries (because their permissions
+ are different and splitting the 1M pages into 4K ones causes TLB
+ performance problems), wasting memory.
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 2a77ba8796ae..66a7283583cd 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -31,6 +31,11 @@
#include <asm/tlb.h>
#include <asm/fixmap.h>
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+#include <asm/system_info.h>
+#include <asm/cp15.h>
+#endif
+
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -623,11 +628,112 @@ void __init mem_init(void)
}
}
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+struct section_perm {
+ unsigned long start;
+ unsigned long end;
+ pmdval_t mask;
+ pmdval_t prot;
+};
+
+struct section_perm nx_perms[] = {
+ /* Make pages tables, etc before _stext RW (set NX). */
+ {
+ .start = PAGE_OFFSET,
+ .end = (unsigned long)_stext,
+ .mask = ~PMD_SECT_XN,
+ .prot = PMD_SECT_XN,
+ },
+ /* Make init RW (set NX). */
+ {
+ .start = (unsigned long)__init_begin,
+ .end = (unsigned long)_sdata,
+ .mask = ~PMD_SECT_XN,
+ .prot = PMD_SECT_XN,
+ },
+};
+
+/*
+ * Updates section permissions only for the current mm (sections are
+ * copied into each mm). During startup, this is the init_mm.
+ */
+static inline void section_update(unsigned long addr, pmdval_t mask,
+ pmdval_t prot)
+{
+ struct mm_struct *mm;
+ pmd_t *pmd;
+
+ mm = current->active_mm;
+ pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+
+#ifdef CONFIG_ARM_LPAE
+ pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
+#else
+ if (addr & SECTION_SIZE)
+ pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
+ else
+ pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
+#endif
+ flush_pmd_entry(pmd);
+ local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
+}
+
+/* Make sure extended page tables are in use. */
+static inline bool arch_has_strict_perms(void)
+{
+ unsigned int cr;
+
+ if (cpu_architecture() < CPU_ARCH_ARMv6)
+ return false;
+
+ cr = get_cr();
+ if (!(cr & CR_XP))
+ return false;
+
+ return true;
+}
+
+#define set_section_perms(perms, field) { \
+ size_t i; \
+ unsigned long addr; \
+ \
+ if (!arch_has_strict_perms()) \
+ return; \
+ \
+ for (i = 0; i < ARRAY_SIZE(perms); i++) { \
+ if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
+ !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
+ pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
+ perms[i].start, perms[i].end, \
+ SECTION_SIZE); \
+ continue; \
+ } \
+ \
+ for (addr = perms[i].start; \
+ addr < perms[i].end; \
+ addr += SECTION_SIZE) \
+ section_update(addr, perms[i].mask, \
+ perms[i].field); \
+ } \
+}
+
+static inline void fix_kernmem_perms(void)
+{
+ set_section_perms(nx_perms, prot);
+}
+#else
+static inline void fix_kernmem_perms(void) { }
+#endif /* CONFIG_ARM_KERNMEM_PERMS */
+
void free_initmem(void)
{
#ifdef CONFIG_HAVE_TCM
extern char __tcm_start, __tcm_end;
+#endif
+
+ fix_kernmem_perms();
+#ifdef CONFIG_HAVE_TCM
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
#endif
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b68c6b22e1c8..3e163071bdcb 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1379,13 +1379,24 @@ static void __init map_lowmem(void)
if (start >= end)
break;
- if (end < kernel_x_start || start >= kernel_x_end) {
+ if (end < kernel_x_start) {
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_RWX;
create_mapping(&map);
+ } else if (start >= kernel_x_end) {
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+#ifdef CONFIG_ARM_KERNMEM_PERMS
+ map.type = MT_MEMORY_RW;
+#else
+ map.type = MT_MEMORY_RWX;
+#endif
+
+ create_mapping(&map);
} else {
/* This better cover the entire kernel */
if (start < kernel_x_start) {
--
1.7.9.5
This introduces CONFIG_DEBUG_RODATA, making kernel text and rodata
read-only. Additionally, this splits rodata from text so that rodata can
also be NX, which may lead to wasted memory when aligning to SECTION_SIZE.
The read-only areas are made writable during ftrace updates. Additional
work is needed for kprobes and kexec, so the feature is temporarily
marked as unavailable in Kconfig when those options are selected.
Signed-off-by: Kees Cook <[email protected]>
---
arch/arm/include/asm/cacheflush.h | 9 ++++++++
arch/arm/kernel/ftrace.c | 17 ++++++++++++++
arch/arm/kernel/vmlinux.lds.S | 3 +++
arch/arm/mm/Kconfig | 12 ++++++++++
arch/arm/mm/init.c | 46 +++++++++++++++++++++++++++++++++++++
5 files changed, 87 insertions(+)
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 8b8b61685a34..b6fea0a1a88b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -487,4 +487,13 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
#endif
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index af9a8a927a4e..ea446ae09c89 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -15,6 +15,7 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/opcodes.h>
@@ -35,6 +36,22 @@
#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
+static int __ftrace_modify_code(void *data)
+{
+ int *command = data;
+
+ set_kernel_text_rw();
+ ftrace_modify_all_code(*command);
+ set_kernel_text_ro();
+
+ return 0;
+}
+
+void arch_ftrace_update_code(int command)
+{
+ stop_machine(__ftrace_modify_code, &command, NULL);
+}
+
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
return rec->arch.old_mcount ? OLD_NOP : NOP;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 08fa667ef2f1..ec79e7268e09 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -120,6 +120,9 @@ SECTIONS
ARM_CPU_KEEP(PROC_INFO)
}
+#ifdef CONFIG_DEBUG_RODATA
+ . = ALIGN(1<<SECTION_SHIFT);
+#endif
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index b484fb04cf6f..e767ab6e1c27 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -966,3 +966,15 @@ config ARM_KERNMEM_PERMS
padded to section-size (1MiB) boundaries (because their permissions
are different and splitting the 1M pages into 4K ones causes TLB
performance problems), wasting memory.
+
+config DEBUG_RODATA
+ bool "Make kernel text and rodata read-only"
+ depends on ARM_KERNMEM_PERMS && KEXEC=n && KPROBES=n
+ default y
+ help
+ If this is set, kernel text and rodata will be made read-only. This
+ is to help catch accidental or malicious attempts to change the
+ kernel's executable code. Additionally splits rodata from kernel
+ text so it can be made explicitly non-executable. This creates
+ another section-size padded region, so it can waste more memory
+ space while gaining the read-only protections.
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 66a7283583cd..74647a4f1af4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -634,6 +634,7 @@ struct section_perm {
unsigned long end;
pmdval_t mask;
pmdval_t prot;
+ pmdval_t clear;
};
struct section_perm nx_perms[] = {
@@ -651,8 +652,35 @@ struct section_perm nx_perms[] = {
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
+#ifdef CONFIG_DEBUG_RODATA
+ /* Make rodata NX (set RO in ro_perms below). */
+ {
+ .start = (unsigned long)__start_rodata,
+ .end = (unsigned long)__init_begin,
+ .mask = ~PMD_SECT_XN,
+ .prot = PMD_SECT_XN,
+ },
+#endif
};
+#ifdef CONFIG_DEBUG_RODATA
+struct section_perm ro_perms[] = {
+ /* Make kernel code and rodata RX (set RO). */
+ {
+ .start = (unsigned long)_stext,
+ .end = (unsigned long)__init_begin,
+#ifdef CONFIG_ARM_LPAE
+ .mask = ~PMD_SECT_RDONLY,
+ .prot = PMD_SECT_RDONLY,
+#else
+ .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
+ .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
+ .clear = PMD_SECT_AP_WRITE,
+#endif
+ },
+};
+#endif
+
/*
* Updates section permissions only for the current mm (sections are
* copied into each mm). During startup, this is the init_mm.
@@ -721,6 +749,24 @@ static inline void fix_kernmem_perms(void)
{
set_section_perms(nx_perms, prot);
}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+ set_section_perms(ro_perms, prot);
+}
+
+void set_kernel_text_rw(void)
+{
+ set_section_perms(ro_perms, clear);
+}
+
+void set_kernel_text_ro(void)
+{
+ set_section_perms(ro_perms, prot);
+}
+#endif /* CONFIG_DEBUG_RODATA */
+
#else
static inline void fix_kernmem_perms(void) { }
#endif /* CONFIG_ARM_KERNMEM_PERMS */
--
1.7.9.5
Hi Kees,
On Mon, Apr 07, 2014 at 08:15:10PM -0700, Kees Cook wrote:
> This introduces CONFIG_DEBUG_RODATA, making kernel text and rodata
> read-only. Additionally, this splits rodata from text so that rodata can
> also be NX, which may lead to wasted memory when aligning to SECTION_SIZE.
>
> The read-only areas are made writable during ftrace updates. Additional
> work is needed for kprobes and kexec, so the feature is temporarily
> marked as unavailable in Kconfig when those options are selected.
>
> Signed-off-by: Kees Cook <[email protected]>
> ---
> arch/arm/include/asm/cacheflush.h | 9 ++++++++
> arch/arm/kernel/ftrace.c | 17 ++++++++++++++
> arch/arm/kernel/vmlinux.lds.S | 3 +++
> arch/arm/mm/Kconfig | 12 ++++++++++
> arch/arm/mm/init.c | 46 +++++++++++++++++++++++++++++++++++++
> 5 files changed, 87 insertions(+)
>
> diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
> index 8b8b61685a34..b6fea0a1a88b 100644
> --- a/arch/arm/include/asm/cacheflush.h
> +++ b/arch/arm/include/asm/cacheflush.h
> @@ -487,4 +487,13 @@ int set_memory_rw(unsigned long addr, int numpages);
> int set_memory_x(unsigned long addr, int numpages);
> int set_memory_nx(unsigned long addr, int numpages);
>
> +#ifdef CONFIG_DEBUG_RODATA
> +void mark_rodata_ro(void);
> +void set_kernel_text_rw(void);
> +void set_kernel_text_ro(void);
> +#else
> +static inline void set_kernel_text_rw(void) { }
> +static inline void set_kernel_text_ro(void) { }
> +#endif
> +
> #endif
> diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
> index af9a8a927a4e..ea446ae09c89 100644
> --- a/arch/arm/kernel/ftrace.c
> +++ b/arch/arm/kernel/ftrace.c
> @@ -15,6 +15,7 @@
> #include <linux/ftrace.h>
> #include <linux/uaccess.h>
> #include <linux/module.h>
> +#include <linux/stop_machine.h>
>
> #include <asm/cacheflush.h>
> #include <asm/opcodes.h>
> @@ -35,6 +36,22 @@
>
> #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
>
> +static int __ftrace_modify_code(void *data)
> +{
> + int *command = data;
> +
> + set_kernel_text_rw();
> + ftrace_modify_all_code(*command);
> + set_kernel_text_ro();
> +
> + return 0;
> +}
Would another approach be to keep all the kernel .text ro then override
probe_kernel_write (which has a weak reference), to create a separate
temporary rw mapping to the specific page that needs to be modified?
That way you only worry about TLB and cache maintenance for a smaller
area. Also, your kernel .text VAs never actually become writable, so
you don't need to worry as much about unauthorised changes whilst your
guard is temporarily down.
(Though lots of small changes could probably make this more
expensive, and you will need to double check aliasing in pre-ARMv7).
Cheers,
--
Steve
On Mon, Apr 07, 2014 at 08:15:09PM -0700, Kees Cook wrote:
> Adds CONFIG_ARM_KERNMEM_PERMS to separate the kernel memory regions
> into section-sized areas that can have different permisions. Performs
> the NX permission changes during free_initmem, so that init memory can be
> reclaimed.
>
> This uses section size instead of PMD size to reduce memory lost to
> padding on non-LPAE systems.
>
> Based on work by Brad Spengler, Larry Bassel, and Laura Abbott.
>
> Signed-off-by: Kees Cook <[email protected]>
> ---
[ ... ]
> diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
> index 2a77ba8796ae..66a7283583cd 100644
> --- a/arch/arm/mm/init.c
> +++ b/arch/arm/mm/init.c
> @@ -31,6 +31,11 @@
> #include <asm/tlb.h>
> #include <asm/fixmap.h>
>
> +#ifdef CONFIG_ARM_KERNMEM_PERMS
> +#include <asm/system_info.h>
> +#include <asm/cp15.h>
> +#endif
> +
> #include <asm/mach/arch.h>
> #include <asm/mach/map.h>
>
> @@ -623,11 +628,112 @@ void __init mem_init(void)
> }
> }
>
> +#ifdef CONFIG_ARM_KERNMEM_PERMS
> +struct section_perm {
> + unsigned long start;
> + unsigned long end;
> + pmdval_t mask;
> + pmdval_t prot;
> +};
> +
> +struct section_perm nx_perms[] = {
> + /* Make pages tables, etc before _stext RW (set NX). */
> + {
> + .start = PAGE_OFFSET,
> + .end = (unsigned long)_stext,
> + .mask = ~PMD_SECT_XN,
> + .prot = PMD_SECT_XN,
> + },
> + /* Make init RW (set NX). */
> + {
> + .start = (unsigned long)__init_begin,
> + .end = (unsigned long)_sdata,
> + .mask = ~PMD_SECT_XN,
> + .prot = PMD_SECT_XN,
> + },
> +};
> +
> +/*
> + * Updates section permissions only for the current mm (sections are
> + * copied into each mm). During startup, this is the init_mm.
> + */
> +static inline void section_update(unsigned long addr, pmdval_t mask,
> + pmdval_t prot)
> +{
> + struct mm_struct *mm;
> + pmd_t *pmd;
> +
> + mm = current->active_mm;
> + pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
> +
> +#ifdef CONFIG_ARM_LPAE
> + pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
> +#else
> + if (addr & SECTION_SIZE)
> + pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
> + else
> + pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
> +#endif
> + flush_pmd_entry(pmd);
> + local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
> +}
> +
> +/* Make sure extended page tables are in use. */
> +static inline bool arch_has_strict_perms(void)
> +{
> + unsigned int cr;
> +
> + if (cpu_architecture() < CPU_ARCH_ARMv6)
> + return false;
> +
> + cr = get_cr();
> + if (!(cr & CR_XP))
> + return false;
> +
> + return true;
> +}
> +
> +#define set_section_perms(perms, field) { \
> + size_t i; \
> + unsigned long addr; \
> + \
> + if (!arch_has_strict_perms()) \
> + return; \
> + \
> + for (i = 0; i < ARRAY_SIZE(perms); i++) { \
> + if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
> + !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
> + pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
> + perms[i].start, perms[i].end, \
> + SECTION_SIZE); \
> + continue; \
> + } \
> + \
> + for (addr = perms[i].start; \
> + addr < perms[i].end; \
> + addr += SECTION_SIZE) \
> + section_update(addr, perms[i].mask, \
> + perms[i].field); \
> + } \
> +}
> +
> +static inline void fix_kernmem_perms(void)
> +{
> + set_section_perms(nx_perms, prot);
> +}
> +#else
> +static inline void fix_kernmem_perms(void) { }
> +#endif /* CONFIG_ARM_KERNMEM_PERMS */
> +
> void free_initmem(void)
> {
> #ifdef CONFIG_HAVE_TCM
> extern char __tcm_start, __tcm_end;
> +#endif
> +
> + fix_kernmem_perms();
If it's practical to allow kprobes to modify the underlying .text
without changing the section mappings; then I think it would be lot
cleaner to put down read only .text sections from the beginning, and
not have any section modification code.
Cheers,
--
Steve
On Wed, Apr 9, 2014 at 2:02 AM, Steve Capper <[email protected]> wrote:
> Hi Kees,
>
> On Mon, Apr 07, 2014 at 08:15:10PM -0700, Kees Cook wrote:
>> This introduces CONFIG_DEBUG_RODATA, making kernel text and rodata
>> read-only. Additionally, this splits rodata from text so that rodata can
>> also be NX, which may lead to wasted memory when aligning to SECTION_SIZE.
>>
>> The read-only areas are made writable during ftrace updates. Additional
>> work is needed for kprobes and kexec, so the feature is temporarily
>> marked as unavailable in Kconfig when those options are selected.
>>
>> Signed-off-by: Kees Cook <[email protected]>
>> ---
>> arch/arm/include/asm/cacheflush.h | 9 ++++++++
>> arch/arm/kernel/ftrace.c | 17 ++++++++++++++
>> arch/arm/kernel/vmlinux.lds.S | 3 +++
>> arch/arm/mm/Kconfig | 12 ++++++++++
>> arch/arm/mm/init.c | 46 +++++++++++++++++++++++++++++++++++++
>> 5 files changed, 87 insertions(+)
>>
>> diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
>> index 8b8b61685a34..b6fea0a1a88b 100644
>> --- a/arch/arm/include/asm/cacheflush.h
>> +++ b/arch/arm/include/asm/cacheflush.h
>> @@ -487,4 +487,13 @@ int set_memory_rw(unsigned long addr, int numpages);
>> int set_memory_x(unsigned long addr, int numpages);
>> int set_memory_nx(unsigned long addr, int numpages);
>>
>> +#ifdef CONFIG_DEBUG_RODATA
>> +void mark_rodata_ro(void);
>> +void set_kernel_text_rw(void);
>> +void set_kernel_text_ro(void);
>> +#else
>> +static inline void set_kernel_text_rw(void) { }
>> +static inline void set_kernel_text_ro(void) { }
>> +#endif
>> +
>> #endif
>> diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
>> index af9a8a927a4e..ea446ae09c89 100644
>> --- a/arch/arm/kernel/ftrace.c
>> +++ b/arch/arm/kernel/ftrace.c
>> @@ -15,6 +15,7 @@
>> #include <linux/ftrace.h>
>> #include <linux/uaccess.h>
>> #include <linux/module.h>
>> +#include <linux/stop_machine.h>
>>
>> #include <asm/cacheflush.h>
>> #include <asm/opcodes.h>
>> @@ -35,6 +36,22 @@
>>
>> #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
>>
>> +static int __ftrace_modify_code(void *data)
>> +{
>> + int *command = data;
>> +
>> + set_kernel_text_rw();
>> + ftrace_modify_all_code(*command);
>> + set_kernel_text_ro();
>> +
>> + return 0;
>> +}
>
> Would another approach be to keep all the kernel .text ro then override
> probe_kernel_write (which has a weak reference), to create a separate
> temporary rw mapping to the specific page that needs to be modified?
>
> That way you only worry about TLB and cache maintenance for a smaller
> area. Also, your kernel .text VAs never actually become writable, so
> you don't need to worry as much about unauthorised changes whilst your
> guard is temporarily down.
>
> (Though lots of small changes could probably make this more
> expensive, and you will need to double check aliasing in pre-ARMv7).
As I understand it, early boot needs some of these areas RWX. Doing
the protection during init-free means we can avoid all that and still
allow the memory to get reclaimed. As to not doing section
re-mappings, I share the same concern about it being very expensive to
do lots of small changes. As such, I think this is the cleanest
approach that is still portable.
-Kees
--
Kees Cook
Chrome OS Security
On 4/9/2014 9:12 AM, Kees Cook wrote:
> On Wed, Apr 9, 2014 at 2:02 AM, Steve Capper <[email protected]> wrote:
>> Hi Kees,
>>
>> On Mon, Apr 07, 2014 at 08:15:10PM -0700, Kees Cook wrote:
>>> This introduces CONFIG_DEBUG_RODATA, making kernel text and rodata
>>> read-only. Additionally, this splits rodata from text so that rodata can
>>> also be NX, which may lead to wasted memory when aligning to SECTION_SIZE.
>>>
>>> The read-only areas are made writable during ftrace updates. Additional
>>> work is needed for kprobes and kexec, so the feature is temporarily
>>> marked as unavailable in Kconfig when those options are selected.
>>>
>>> Signed-off-by: Kees Cook <[email protected]>
>>> ---
>>> arch/arm/include/asm/cacheflush.h | 9 ++++++++
>>> arch/arm/kernel/ftrace.c | 17 ++++++++++++++
>>> arch/arm/kernel/vmlinux.lds.S | 3 +++
>>> arch/arm/mm/Kconfig | 12 ++++++++++
>>> arch/arm/mm/init.c | 46 +++++++++++++++++++++++++++++++++++++
>>> 5 files changed, 87 insertions(+)
>>>
>>> diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
>>> index 8b8b61685a34..b6fea0a1a88b 100644
>>> --- a/arch/arm/include/asm/cacheflush.h
>>> +++ b/arch/arm/include/asm/cacheflush.h
>>> @@ -487,4 +487,13 @@ int set_memory_rw(unsigned long addr, int numpages);
>>> int set_memory_x(unsigned long addr, int numpages);
>>> int set_memory_nx(unsigned long addr, int numpages);
>>>
>>> +#ifdef CONFIG_DEBUG_RODATA
>>> +void mark_rodata_ro(void);
>>> +void set_kernel_text_rw(void);
>>> +void set_kernel_text_ro(void);
>>> +#else
>>> +static inline void set_kernel_text_rw(void) { }
>>> +static inline void set_kernel_text_ro(void) { }
>>> +#endif
>>> +
>>> #endif
>>> diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
>>> index af9a8a927a4e..ea446ae09c89 100644
>>> --- a/arch/arm/kernel/ftrace.c
>>> +++ b/arch/arm/kernel/ftrace.c
>>> @@ -15,6 +15,7 @@
>>> #include <linux/ftrace.h>
>>> #include <linux/uaccess.h>
>>> #include <linux/module.h>
>>> +#include <linux/stop_machine.h>
>>>
>>> #include <asm/cacheflush.h>
>>> #include <asm/opcodes.h>
>>> @@ -35,6 +36,22 @@
>>>
>>> #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
>>>
>>> +static int __ftrace_modify_code(void *data)
>>> +{
>>> + int *command = data;
>>> +
>>> + set_kernel_text_rw();
>>> + ftrace_modify_all_code(*command);
>>> + set_kernel_text_ro();
>>> +
>>> + return 0;
>>> +}
>>
>> Would another approach be to keep all the kernel .text ro then override
>> probe_kernel_write (which has a weak reference), to create a separate
>> temporary rw mapping to the specific page that needs to be modified?
>>
>> That way you only worry about TLB and cache maintenance for a smaller
>> area. Also, your kernel .text VAs never actually become writable, so
>> you don't need to worry as much about unauthorised changes whilst your
>> guard is temporarily down.
>>
>> (Though lots of small changes could probably make this more
>> expensive, and you will need to double check aliasing in pre-ARMv7).
>
> As I understand it, early boot needs some of these areas RWX. Doing
> the protection during init-free means we can avoid all that and still
> allow the memory to get reclaimed. As to not doing section
> re-mappings, I share the same concern about it being very expensive to
> do lots of small changes. As such, I think this is the cleanest
> approach that is still portable.
>
FWIW, our out of tree patches set up the permissions at map_lowmem time
and we've never run into any issue with incorrect RWX permissions to
the best of my knowledge.
Just for comparison, how many small changes would need to happen for an
ftrace use case? Would these changes be happening on a hot path?
> -Kees
>
Thanks,
Laura
--
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
hosted by The Linux Foundation
On Wed, Apr 9, 2014 at 12:52 PM, Laura Abbott <[email protected]> wrote:
> On 4/9/2014 9:12 AM, Kees Cook wrote:
>> On Wed, Apr 9, 2014 at 2:02 AM, Steve Capper <[email protected]> wrote:
>>> Hi Kees,
>>>
>>> On Mon, Apr 07, 2014 at 08:15:10PM -0700, Kees Cook wrote:
>>>> This introduces CONFIG_DEBUG_RODATA, making kernel text and rodata
>>>> read-only. Additionally, this splits rodata from text so that rodata can
>>>> also be NX, which may lead to wasted memory when aligning to SECTION_SIZE.
>>>>
>>>> The read-only areas are made writable during ftrace updates. Additional
>>>> work is needed for kprobes and kexec, so the feature is temporarily
>>>> marked as unavailable in Kconfig when those options are selected.
>>>>
>>>> Signed-off-by: Kees Cook <[email protected]>
>>>> ---
>>>> arch/arm/include/asm/cacheflush.h | 9 ++++++++
>>>> arch/arm/kernel/ftrace.c | 17 ++++++++++++++
>>>> arch/arm/kernel/vmlinux.lds.S | 3 +++
>>>> arch/arm/mm/Kconfig | 12 ++++++++++
>>>> arch/arm/mm/init.c | 46 +++++++++++++++++++++++++++++++++++++
>>>> 5 files changed, 87 insertions(+)
>>>>
>>>> diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
>>>> index 8b8b61685a34..b6fea0a1a88b 100644
>>>> --- a/arch/arm/include/asm/cacheflush.h
>>>> +++ b/arch/arm/include/asm/cacheflush.h
>>>> @@ -487,4 +487,13 @@ int set_memory_rw(unsigned long addr, int numpages);
>>>> int set_memory_x(unsigned long addr, int numpages);
>>>> int set_memory_nx(unsigned long addr, int numpages);
>>>>
>>>> +#ifdef CONFIG_DEBUG_RODATA
>>>> +void mark_rodata_ro(void);
>>>> +void set_kernel_text_rw(void);
>>>> +void set_kernel_text_ro(void);
>>>> +#else
>>>> +static inline void set_kernel_text_rw(void) { }
>>>> +static inline void set_kernel_text_ro(void) { }
>>>> +#endif
>>>> +
>>>> #endif
>>>> diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
>>>> index af9a8a927a4e..ea446ae09c89 100644
>>>> --- a/arch/arm/kernel/ftrace.c
>>>> +++ b/arch/arm/kernel/ftrace.c
>>>> @@ -15,6 +15,7 @@
>>>> #include <linux/ftrace.h>
>>>> #include <linux/uaccess.h>
>>>> #include <linux/module.h>
>>>> +#include <linux/stop_machine.h>
>>>>
>>>> #include <asm/cacheflush.h>
>>>> #include <asm/opcodes.h>
>>>> @@ -35,6 +36,22 @@
>>>>
>>>> #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
>>>>
>>>> +static int __ftrace_modify_code(void *data)
>>>> +{
>>>> + int *command = data;
>>>> +
>>>> + set_kernel_text_rw();
>>>> + ftrace_modify_all_code(*command);
>>>> + set_kernel_text_ro();
>>>> +
>>>> + return 0;
>>>> +}
>>>
>>> Would another approach be to keep all the kernel .text ro then override
>>> probe_kernel_write (which has a weak reference), to create a separate
>>> temporary rw mapping to the specific page that needs to be modified?
>>>
>>> That way you only worry about TLB and cache maintenance for a smaller
>>> area. Also, your kernel .text VAs never actually become writable, so
>>> you don't need to worry as much about unauthorised changes whilst your
>>> guard is temporarily down.
>>>
>>> (Though lots of small changes could probably make this more
>>> expensive, and you will need to double check aliasing in pre-ARMv7).
>>
>> As I understand it, early boot needs some of these areas RWX. Doing
>> the protection during init-free means we can avoid all that and still
>> allow the memory to get reclaimed. As to not doing section
>> re-mappings, I share the same concern about it being very expensive to
>> do lots of small changes. As such, I think this is the cleanest
>> approach that is still portable.
>>
>
> FWIW, our out of tree patches set up the permissions at map_lowmem time
> and we've never run into any issue with incorrect RWX permissions to
> the best of my knowledge.
I thought there were problems with not being able to free init mem in this case?
> Just for comparison, how many small changes would need to happen for an
> ftrace use case? Would these changes be happening on a hot path?
I'm not familiar with the internals, but it seemed like it was fixing
up a lot of entry points.
-Kees
>
>> -Kees
>>
>
> Thanks,
> Laura
> --
> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
> hosted by The Linux Foundation
--
Kees Cook
Chrome OS Security