Wire up the code introduced in v5.2 to manage the permissions
of executable vmalloc regions (and their linear aliases) more
strictly.
One of the things that came up in the internal discussion is
whether non-x86 architectures have any benefit at all from the
lazy vunmap feature, and whether it would perhaps be better to
implement eager vunmap instead.
Cc: Nadav Amit <[email protected]>
Cc: Rick Edgecombe <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Masami Hiramatsu <[email protected]>
Cc: James Morse <[email protected]>
Ard Biesheuvel (4):
arm64: module: create module allocations without exec permissions
arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP
arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages
arm64: bpf: do not allocate executable memory
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/cacheflush.h | 3 ++
arch/arm64/kernel/module.c | 4 +-
arch/arm64/kernel/probes/kprobes.c | 4 +-
arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
arch/arm64/net/bpf_jit_comp.c | 2 +-
mm/vmalloc.c | 11 -----
7 files changed, 50 insertions(+), 23 deletions(-)
--
2.17.1
Now that the core code manages the executable permissions of code
regions of modules explicitly, it is no longer necessary to create
the module vmalloc regions with RWX permissions, and we can create
them with RW- permissions instead, which is preferred from a
security perspective.
Signed-off-by: Ard Biesheuvel <[email protected]>
---
arch/arm64/kernel/module.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 2e4e3915b4d0..88f0ed31d9aa 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -41,7 +41,7 @@ void *module_alloc(unsigned long size)
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + MODULES_VSIZE,
- gfp_mask, PAGE_KERNEL_EXEC, 0,
+ gfp_mask, PAGE_KERNEL, 0,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
@@ -57,7 +57,7 @@ void *module_alloc(unsigned long size)
*/
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + SZ_4G, GFP_KERNEL,
- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (p && (kasan_module_alloc(p, size) < 0)) {
--
2.17.1
Wire up the special helper functions to manipulate aliases of vmalloc
regions in the linear map.
Signed-off-by: Ard Biesheuvel <[email protected]>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/cacheflush.h | 3 ++
arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
mm/vmalloc.c | 11 -----
4 files changed, 44 insertions(+), 19 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ca9c175fb949..4ab32180eabd 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -26,6 +26,7 @@ config ARM64
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SETUP_DMA_OPS
+ select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 19844211a4e6..b9ee5510067f 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -187,4 +187,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
int set_memory_valid(unsigned long addr, int numpages, int enable);
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+
#endif
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 6cd645edcf35..9c6b9039ec8f 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -159,17 +159,48 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
__pgprot(PTE_VALID));
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ struct page_change_data data = {
+ .set_mask = __pgprot(0),
+ .clear_mask = __pgprot(PTE_VALID),
+ };
+
+ if (!rodata_full)
+ return 0;
+
+ return apply_to_page_range(&init_mm,
+ (unsigned long)page_address(page),
+ PAGE_SIZE, change_page_range, &data);
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ struct page_change_data data = {
+ .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
+ .clear_mask = __pgprot(PTE_RDONLY),
+ };
+
+ if (!rodata_full)
+ return 0;
+
+ return apply_to_page_range(&init_mm,
+ (unsigned long)page_address(page),
+ PAGE_SIZE, change_page_range, &data);
+}
+
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
+ if (!debug_pagealloc_enabled() && !rodata_full)
+ return;
+
set_memory_valid((unsigned long)page_address(page), numpages, enable);
}
-#ifdef CONFIG_HIBERNATION
+
/*
- * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
- * is used to determine if a linear map page has been marked as not-valid by
- * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
- * This is based on kern_addr_valid(), which almost does what we need.
+ * This function is used to determine if a linear map page has been marked as
+ * not-valid. Walk the page table and check the PTE_VALID bit. This is based
+ * on kern_addr_valid(), which almost does what we need.
*
* Because this is only called on the kernel linear map, p?d_sect() implies
* p?d_present(). When debug_pagealloc is enabled, sections mappings are
@@ -183,6 +214,9 @@ bool kernel_page_present(struct page *page)
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
+ if (!debug_pagealloc_enabled() && !rodata_full)
+ return true;
+
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
@@ -204,5 +238,3 @@ bool kernel_page_present(struct page *page)
ptep = pte_offset_kernel(pmdp, addr);
return pte_valid(READ_ONCE(*ptep));
}
-#endif /* CONFIG_HIBERNATION */
-#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 233af6936c93..1135dd8f2665 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2128,17 +2128,6 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
int i;
- /*
- * The below block can be removed when all architectures that have
- * direct map permissions also have set_direct_map_() implementations.
- * This is concerned with resetting the direct map any an vm alias with
- * execute permissions, without leaving a RW+X window.
- */
- if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
- set_memory_nx(addr, area->nr_pages);
- set_memory_rw(addr, area->nr_pages);
- }
-
remove_vm_area(area->addr);
/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
--
2.17.1
In order to avoid transient inconsistencies where freed code pages
are remapped writable while stale TLB entries still exist on other
cores, mark the kprobes text pages with the VM_FLUSH_RESET_PERMS
attribute. This instructs the core vmalloc code not to defer the
TLB flush when this region is unmapped and returned to the page
allocator.
Signed-off-by: Ard Biesheuvel <[email protected]>
---
arch/arm64/kernel/probes/kprobes.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2509fcb6d404..036cfbf9682a 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -131,8 +131,10 @@ void *alloc_insn_page(void)
void *page;
page = vmalloc_exec(PAGE_SIZE);
- if (page)
+ if (page) {
set_memory_ro((unsigned long)page, 1);
+ set_vm_flush_reset_perms(page);
+ }
return page;
}
--
2.17.1
The BPF code now takes care of mapping the code pages executable
after mapping them read-only, to ensure that no RWX mapped regions
are needed, even transiently. This means we can drop the executable
permissions from the mapping at allocation time.
Signed-off-by: Ard Biesheuvel <[email protected]>
---
arch/arm64/net/bpf_jit_comp.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index df845cee438e..aef4ff467222 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -981,7 +981,7 @@ void *bpf_jit_alloc_exec(unsigned long size)
{
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
BPF_JIT_REGION_END, GFP_KERNEL,
- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
--
2.17.1
On 05/23/2019 03:52 PM, Ard Biesheuvel wrote:
> Now that the core code manages the executable permissions of code
> regions of modules explicitly, it is no longer necessary to create
I guess the permission transition for various module sections happen
through module_enable_[ro|nx]() after allocating via module_alloc().
> the module vmalloc regions with RWX permissions, and we can create
> them with RW- permissions instead, which is preferred from a
> security perspective.
Makes sense. Will this be followed in all architectures now ?
>
> Signed-off-by: Ard Biesheuvel <[email protected]>
> ---
> arch/arm64/kernel/module.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
> index 2e4e3915b4d0..88f0ed31d9aa 100644
> --- a/arch/arm64/kernel/module.c
> +++ b/arch/arm64/kernel/module.c
> @@ -41,7 +41,7 @@ void *module_alloc(unsigned long size)
>
> p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
> module_alloc_base + MODULES_VSIZE,
> - gfp_mask, PAGE_KERNEL_EXEC, 0,
> + gfp_mask, PAGE_KERNEL, 0,
> NUMA_NO_NODE, __builtin_return_address(0));
>
> if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
> @@ -57,7 +57,7 @@ void *module_alloc(unsigned long size)
> */
> p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
> module_alloc_base + SZ_4G, GFP_KERNEL,
> - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
> + PAGE_KERNEL, 0, NUMA_NO_NODE,
> __builtin_return_address(0));
>
> if (p && (kasan_module_alloc(p, size) < 0)) {
>
Which just makes sure that PTE_PXN never gets dropped while creating
these mappings.
On 5/28/19 7:35 AM, Anshuman Khandual wrote:
>
>
> On 05/23/2019 03:52 PM, Ard Biesheuvel wrote:
>> Now that the core code manages the executable permissions of code
>> regions of modules explicitly, it is no longer necessary to create
>
> I guess the permission transition for various module sections happen
> through module_enable_[ro|nx]() after allocating via module_alloc().
>
Indeed.
>> the module vmalloc regions with RWX permissions, and we can create
>> them with RW- permissions instead, which is preferred from a
>> security perspective.
>
> Makes sense. Will this be followed in all architectures now ?
>
I am not sure if every architecture implements module_enable_[ro|nx](),
but if they do, they should probably apply this change as well.
>>
>> Signed-off-by: Ard Biesheuvel <[email protected]>
>> ---
>> arch/arm64/kernel/module.c | 4 ++--
>> 1 file changed, 2 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
>> index 2e4e3915b4d0..88f0ed31d9aa 100644
>> --- a/arch/arm64/kernel/module.c
>> +++ b/arch/arm64/kernel/module.c
>> @@ -41,7 +41,7 @@ void *module_alloc(unsigned long size)
>>
>> p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
>> module_alloc_base + MODULES_VSIZE,
>> - gfp_mask, PAGE_KERNEL_EXEC, 0,
>> + gfp_mask, PAGE_KERNEL, 0,
>> NUMA_NO_NODE, __builtin_return_address(0));
>>
>> if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
>> @@ -57,7 +57,7 @@ void *module_alloc(unsigned long size)
>> */
>> p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
>> module_alloc_base + SZ_4G, GFP_KERNEL,
>> - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
>> + PAGE_KERNEL, 0, NUMA_NO_NODE,
>> __builtin_return_address(0));
>>
>> if (p && (kasan_module_alloc(p, size) < 0)) {
>>
>
> Which just makes sure that PTE_PXN never gets dropped while creating
> these mappings.
>
Not sure what you mean. Is there a question here?
On 05/23/2019 03:52 PM, Ard Biesheuvel wrote:
> Wire up the special helper functions to manipulate aliases of vmalloc
> regions in the linear map.
IMHO the commit message here could be bit more descriptive because of the
amount of changes this patch brings in.
>
> Signed-off-by: Ard Biesheuvel <[email protected]>
> ---
> arch/arm64/Kconfig | 1 +
> arch/arm64/include/asm/cacheflush.h | 3 ++
> arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
> mm/vmalloc.c | 11 -----
> 4 files changed, 44 insertions(+), 19 deletions(-)
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index ca9c175fb949..4ab32180eabd 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -26,6 +26,7 @@ config ARM64
> select ARCH_HAS_MEMBARRIER_SYNC_CORE
> select ARCH_HAS_PTE_SPECIAL
> select ARCH_HAS_SETUP_DMA_OPS
> + select ARCH_HAS_SET_DIRECT_MAP
> select ARCH_HAS_SET_MEMORY
> select ARCH_HAS_STRICT_KERNEL_RWX
> select ARCH_HAS_STRICT_MODULE_RWX
> diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
> index 19844211a4e6..b9ee5510067f 100644
> --- a/arch/arm64/include/asm/cacheflush.h
> +++ b/arch/arm64/include/asm/cacheflush.h
> @@ -187,4 +187,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
>
> int set_memory_valid(unsigned long addr, int numpages, int enable);
>
> +int set_direct_map_invalid_noflush(struct page *page);
> +int set_direct_map_default_noflush(struct page *page);
> +
> #endif
> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> index 6cd645edcf35..9c6b9039ec8f 100644
> --- a/arch/arm64/mm/pageattr.c
> +++ b/arch/arm64/mm/pageattr.c
> @@ -159,17 +159,48 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
> __pgprot(PTE_VALID));
> }
>
> -#ifdef CONFIG_DEBUG_PAGEALLOC
> +int set_direct_map_invalid_noflush(struct page *page)
> +{
> + struct page_change_data data = {
> + .set_mask = __pgprot(0),
> + .clear_mask = __pgprot(PTE_VALID),
> + };
> +
> + if (!rodata_full)
> + return 0;
Why rodata_full needs to be probed here ? Should not we still require the following
transition even if rodata_full is not enabled. Just wondering whether we can use
VM_FLUSH_RESET_PERMS feature without these required transitions.
/*
* Set direct map to something invalid so that it won't be cached if
* there are any accesses after the TLB flush, then flush the TLB and
* reset the direct map permissions to the default.
*/
set_area_direct_map(area, set_direct_map_invalid_noflush);
_vm_unmap_aliases(start, end, 1);
set_area_direct_map(area, set_direct_map_default_noflush);
> +
> + return apply_to_page_range(&init_mm,
> + (unsigned long)page_address(page),
> + PAGE_SIZE, change_page_range, &data);
> +}
> +
> +int set_direct_map_default_noflush(struct page *page)
> +{
> + struct page_change_data data = {
> + .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
> + .clear_mask = __pgprot(PTE_RDONLY),
Replace __pgprot(PTE_VALID | PTE_WRITE) with PAGE_KERNEL instead !
> + };
> +
> + if (!rodata_full)
> + return 0;
> +
> + return apply_to_page_range(&init_mm,
> + (unsigned long)page_address(page),
> + PAGE_SIZE, change_page_range, &data);
> +}
> +
IIUC set_direct_map_invalid_noflush() and set_direct_map_default_noflush()
should set *appropriate* permissions as seen fit from platform perspective
to implement this transition.
In here set_direct_map_invalid_noflush() makes the entry invalid preventing
further MMU walks (hence new TLB entries). set_direct_map_default_noflush()
makes it a valid write entry. Though it looks similar to PAGE_KERNEL which
is the default permission for linear mapping on arm64 via __map_memblock().
Should not PAGE_KERNEL be used explicitly as suggested above.
> void __kernel_map_pages(struct page *page, int numpages, int enable)
> {
> + if (!debug_pagealloc_enabled() && !rodata_full)
> + return;
> +
I guess this is not related to CONFIG_ARCH_HAS_SET_DIRECT_MAP here and should
be a fix or an enhancement to CONFIG_DEBUG_PAGEALLOC implementation. Just
curious, !rodata_full check here to ensure that linear mapping does not have
block or contig mappings and should be backed by regular pages only ?
> set_memory_valid((unsigned long)page_address(page), numpages, enable);
> }
> -#ifdef CONFIG_HIBERNATION
> +
> /*
> - * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
> - * is used to determine if a linear map page has been marked as not-valid by
> - * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
> - * This is based on kern_addr_valid(), which almost does what we need.
> + * This function is used to determine if a linear map page has been marked as
> + * not-valid. Walk the page table and check the PTE_VALID bit. This is based
> + * on kern_addr_valid(), which almost does what we need.
> *
> * Because this is only called on the kernel linear map, p?d_sect() implies
> * p?d_present(). When debug_pagealloc is enabled, sections mappings are
> @@ -183,6 +214,9 @@ bool kernel_page_present(struct page *page)
> pte_t *ptep;
> unsigned long addr = (unsigned long)page_address(page);
>
> + if (!debug_pagealloc_enabled() && !rodata_full)
> + return true;
> +
Ditto, not related to CONFIG_ARCH_HAS_SET_DIRECT_MAP.
On 5/28/19 10:20 AM, Anshuman Khandual wrote:
>
>
> On 05/23/2019 03:52 PM, Ard Biesheuvel wrote:
>> In order to avoid transient inconsistencies where freed code pages
>> are remapped writable while stale TLB entries still exist on other
>> cores, mark the kprobes text pages with the VM_FLUSH_RESET_PERMS
>> attribute. This instructs the core vmalloc code not to defer the
>> TLB flush when this region is unmapped and returned to the page
>> allocator.
>
> Makes sense.
>
>>
>> Signed-off-by: Ard Biesheuvel <[email protected]>
>> ---
>> arch/arm64/kernel/probes/kprobes.c | 4 +++-
>> 1 file changed, 3 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
>> index 2509fcb6d404..036cfbf9682a 100644
>> --- a/arch/arm64/kernel/probes/kprobes.c
>> +++ b/arch/arm64/kernel/probes/kprobes.c
>> @@ -131,8 +131,10 @@ void *alloc_insn_page(void)
>> void *page;
>>
>> page = vmalloc_exec(PAGE_SIZE);
>> - if (page)
>> + if (page) {
>> set_memory_ro((unsigned long)page, 1);
>> + set_vm_flush_reset_perms(page);
>> + }
>
> Looks good. It seems there might be more users who would like to set
> VM_FLUSH_RESET_PERMS right after their allocation for the same reason.
> Hence would not it help to have a variant like vmalloc_exec_reset() or
> such which will tag vm_struct->flags with VM_FLUSH_RESET_PERMS right
> after it's allocation without requiring the caller to do the same.
>
If there is a sufficient number of similar users, then of course, it
makes sense to factor this out. However, the kprobes code is slightly
unusual in the sense that it allocates memory and immediately remaps it
read-only, and relies on code patching to populate this allocation. I am
not expecting to see this pattern in other places.
On 05/28/2019 01:50 PM, Ard Biesheuvel wrote:
> On 5/28/19 10:10 AM, Anshuman Khandual wrote:
>>
>>
>> On 05/23/2019 03:52 PM, Ard Biesheuvel wrote:
>>> Wire up the special helper functions to manipulate aliases of vmalloc
>>> regions in the linear map.
>>
>> IMHO the commit message here could be bit more descriptive because of the
>> amount of changes this patch brings in.
>>
>>>
>>> Signed-off-by: Ard Biesheuvel <[email protected]>
>>> ---
>>> arch/arm64/Kconfig | 1 +
>>> arch/arm64/include/asm/cacheflush.h | 3 ++
>>> arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
>>> mm/vmalloc.c | 11 -----
>>> 4 files changed, 44 insertions(+), 19 deletions(-)
>>>
>>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>>> index ca9c175fb949..4ab32180eabd 100644
>>> --- a/arch/arm64/Kconfig
>>> +++ b/arch/arm64/Kconfig
>>> @@ -26,6 +26,7 @@ config ARM64
>>> select ARCH_HAS_MEMBARRIER_SYNC_CORE
>>> select ARCH_HAS_PTE_SPECIAL
>>> select ARCH_HAS_SETUP_DMA_OPS
>>> + select ARCH_HAS_SET_DIRECT_MAP
>>> select ARCH_HAS_SET_MEMORY
>>> select ARCH_HAS_STRICT_KERNEL_RWX
>>> select ARCH_HAS_STRICT_MODULE_RWX
>>> diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
>>> index 19844211a4e6..b9ee5510067f 100644
>>> --- a/arch/arm64/include/asm/cacheflush.h
>>> +++ b/arch/arm64/include/asm/cacheflush.h
>>> @@ -187,4 +187,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
>>> int set_memory_valid(unsigned long addr, int numpages, int enable);
>>> +int set_direct_map_invalid_noflush(struct page *page);
>>> +int set_direct_map_default_noflush(struct page *page);
>>> +
>>> #endif
>>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>>> index 6cd645edcf35..9c6b9039ec8f 100644
>>> --- a/arch/arm64/mm/pageattr.c
>>> +++ b/arch/arm64/mm/pageattr.c
>>> @@ -159,17 +159,48 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
>>> __pgprot(PTE_VALID));
>>> }
>>> -#ifdef CONFIG_DEBUG_PAGEALLOC
>>> +int set_direct_map_invalid_noflush(struct page *page)
>>> +{
>>> + struct page_change_data data = {
>>> + .set_mask = __pgprot(0),
>>> + .clear_mask = __pgprot(PTE_VALID),
>>> + };
>>> +
>>> + if (!rodata_full)
>>> + return 0;
>>
>> Why rodata_full needs to be probed here ? Should not we still require the following
>> transition even if rodata_full is not enabled. Just wondering whether we can use
>> VM_FLUSH_RESET_PERMS feature without these required transitions.
>>
>> /*
>> * Set direct map to something invalid so that it won't be cached if
>> * there are any accesses after the TLB flush, then flush the TLB and
>> * reset the direct map permissions to the default.
>> */
>> set_area_direct_map(area, set_direct_map_invalid_noflush);
>> _vm_unmap_aliases(start, end, 1);
>> set_area_direct_map(area, set_direct_map_default_noflush);
>>
>> > +
>
> How would that work? With rodata_full disabled, the linear region is not mapped down to pages, and so there is no way we can manipulate linear aliases at page granularity.
Then should not we check debug_pagealloc_enabled() as well as in case
for __kernel_map_pages() later in the patch.
>
>>> + return apply_to_page_range(&init_mm,
>>> + (unsigned long)page_address(page),
>>> + PAGE_SIZE, change_page_range, &data);
>>> +}
>>> +
>>> +int set_direct_map_default_noflush(struct page *page)
>>> +{
>>> + struct page_change_data data = {
>>> + .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
>>> + .clear_mask = __pgprot(PTE_RDONLY),
>>
>> Replace __pgprot(PTE_VALID | PTE_WRITE) with PAGE_KERNEL instead !
>>
>
> This is a delta mask, so no need to pull in the PTE_MAYBE_NG or other attributes that we know we haven't changed.
>
>>> + };
>>> +
>>> + if (!rodata_full)
>>> + return 0;
>>> +
>>> + return apply_to_page_range(&init_mm,
>>> + (unsigned long)page_address(page),
>>> + PAGE_SIZE, change_page_range, &data);
>>> +}
>>> +
>>
>> IIUC set_direct_map_invalid_noflush() and set_direct_map_default_noflush()
>> should set *appropriate* permissions as seen fit from platform perspective
>> to implement this transition.
>>
>> In here set_direct_map_invalid_noflush() makes the entry invalid preventing
>> further MMU walks (hence new TLB entries). set_direct_map_default_noflush()
>> makes it a valid write entry. Though it looks similar to PAGE_KERNEL which
>> is the default permission for linear mapping on arm64 via __map_memblock().
>> Should not PAGE_KERNEL be used explicitly as suggested above.
>>
>
> No. We should restore the attributes that we cleared when manipulating the linear aliases. There isn't a lot of code that does that, so I don't see the need to use PAGE_KERNEL here.
Fair enough.
>
>>> void __kernel_map_pages(struct page *page, int numpages, int enable)
>>> {
>>> + if (!debug_pagealloc_enabled() && !rodata_full)
>>> + return;
>>> +
>>
>> I guess this is not related to CONFIG_ARCH_HAS_SET_DIRECT_MAP here and should
>> be a fix or an enhancement to CONFIG_DEBUG_PAGEALLOC implementation. Just
>> curious, !rodata_full check here to ensure that linear mapping does not have
>> block or contig mappings and should be backed by regular pages only ?
>>
>
> It is related. CONFIG_ARCH_HAS_SET_DIRECT_MAP introduces references to __kernel_map_pages() in generic code, so enabling CONFIG_ARCH_HAS_SET_DIRECT_MAP should make __kernel_map_pages() available unconditionally as well.
Ahh, got it.
>
>>> set_memory_valid((unsigned long)page_address(page), numpages, enable);
>>> }
>>> -#ifdef CONFIG_HIBERNATION
>>> +
>>> /*
>>> - * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
>>> - * is used to determine if a linear map page has been marked as not-valid by
>>> - * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
>>> - * This is based on kern_addr_valid(), which almost does what we need.
>>> + * This function is used to determine if a linear map page has been marked as
>>> + * not-valid. Walk the page table and check the PTE_VALID bit. This is based
>>> + * on kern_addr_valid(), which almost does what we need.
>>> *
>>> * Because this is only called on the kernel linear map, p?d_sect() implies
>>> * p?d_present(). When debug_pagealloc is enabled, sections mappings are
>>> @@ -183,6 +214,9 @@ bool kernel_page_present(struct page *page)
>>> pte_t *ptep;
>>> unsigned long addr = (unsigned long)page_address(page);
>>> + if (!debug_pagealloc_enabled() && !rodata_full)
>>> + return true;
>>> +
>>
>> Ditto, not related to CONFIG_ARCH_HAS_SET_DIRECT_MAP.
>>
>
> It is related.
Never mind.
On 5/28/19 10:41 AM, Anshuman Khandual wrote:
>
>
> On 05/28/2019 01:50 PM, Ard Biesheuvel wrote:
>> On 5/28/19 10:10 AM, Anshuman Khandual wrote:
>>>
>>>
>>> On 05/23/2019 03:52 PM, Ard Biesheuvel wrote:
>>>> Wire up the special helper functions to manipulate aliases of vmalloc
>>>> regions in the linear map.
>>>
>>> IMHO the commit message here could be bit more descriptive because of the
>>> amount of changes this patch brings in.
>>>
>>>>
>>>> Signed-off-by: Ard Biesheuvel <[email protected]>
>>>> ---
>>>> arch/arm64/Kconfig | 1 +
>>>> arch/arm64/include/asm/cacheflush.h | 3 ++
>>>> arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
>>>> mm/vmalloc.c | 11 -----
>>>> 4 files changed, 44 insertions(+), 19 deletions(-)
>>>>
>>>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>>>> index ca9c175fb949..4ab32180eabd 100644
>>>> --- a/arch/arm64/Kconfig
>>>> +++ b/arch/arm64/Kconfig
>>>> @@ -26,6 +26,7 @@ config ARM64
>>>> select ARCH_HAS_MEMBARRIER_SYNC_CORE
>>>> select ARCH_HAS_PTE_SPECIAL
>>>> select ARCH_HAS_SETUP_DMA_OPS
>>>> + select ARCH_HAS_SET_DIRECT_MAP
>>>> select ARCH_HAS_SET_MEMORY
>>>> select ARCH_HAS_STRICT_KERNEL_RWX
>>>> select ARCH_HAS_STRICT_MODULE_RWX
>>>> diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
>>>> index 19844211a4e6..b9ee5510067f 100644
>>>> --- a/arch/arm64/include/asm/cacheflush.h
>>>> +++ b/arch/arm64/include/asm/cacheflush.h
>>>> @@ -187,4 +187,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
>>>> int set_memory_valid(unsigned long addr, int numpages, int enable);
>>>> +int set_direct_map_invalid_noflush(struct page *page);
>>>> +int set_direct_map_default_noflush(struct page *page);
>>>> +
>>>> #endif
>>>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>>>> index 6cd645edcf35..9c6b9039ec8f 100644
>>>> --- a/arch/arm64/mm/pageattr.c
>>>> +++ b/arch/arm64/mm/pageattr.c
>>>> @@ -159,17 +159,48 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
>>>> __pgprot(PTE_VALID));
>>>> }
>>>> -#ifdef CONFIG_DEBUG_PAGEALLOC
>>>> +int set_direct_map_invalid_noflush(struct page *page)
>>>> +{
>>>> + struct page_change_data data = {
>>>> + .set_mask = __pgprot(0),
>>>> + .clear_mask = __pgprot(PTE_VALID),
>>>> + };
>>>> +
>>>> + if (!rodata_full)
>>>> + return 0;
>>>
>>> Why rodata_full needs to be probed here ? Should not we still require the following
>>> transition even if rodata_full is not enabled. Just wondering whether we can use
>>> VM_FLUSH_RESET_PERMS feature without these required transitions.
>>>
>>> /*
>>> * Set direct map to something invalid so that it won't be cached if
>>> * there are any accesses after the TLB flush, then flush the TLB and
>>> * reset the direct map permissions to the default.
>>> */
>>> set_area_direct_map(area, set_direct_map_invalid_noflush);
>>> _vm_unmap_aliases(start, end, 1);
>>> set_area_direct_map(area, set_direct_map_default_noflush);
>>>
>>> > +
>>
>> How would that work? With rodata_full disabled, the linear region is not mapped down to pages, and so there is no way we can manipulate linear aliases at page granularity.
>
> Then should not we check debug_pagealloc_enabled() as well as in case
> for __kernel_map_pages() later in the patch.
>
Yes, what we could do technically is to permit the direct_map_xx() calls
to proceed if debug_pagealloc_enabled() returns true, since we are
guaranteed to have a page granular mapping of the entire linear region.
However, that means we will be manipulating the linear aliases of
vmalloc mappings even if the user has requested us not to, i.e., by
passing rodata=on rather than rodata=full, so I don't think we should be
doing that.
>>
>>>> + return apply_to_page_range(&init_mm,
>>>> + (unsigned long)page_address(page),
>>>> + PAGE_SIZE, change_page_range, &data);
>>>> +}
>>>> +
>>>> +int set_direct_map_default_noflush(struct page *page)
>>>> +{
>>>> + struct page_change_data data = {
>>>> + .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
>>>> + .clear_mask = __pgprot(PTE_RDONLY),
>>>
>>> Replace __pgprot(PTE_VALID | PTE_WRITE) with PAGE_KERNEL instead !
>>>
>>
>> This is a delta mask, so no need to pull in the PTE_MAYBE_NG or other attributes that we know we haven't changed.
>>
>>>> + };
>>>> +
>>>> + if (!rodata_full)
>>>> + return 0;
>>>> +
>>>> + return apply_to_page_range(&init_mm,
>>>> + (unsigned long)page_address(page),
>>>> + PAGE_SIZE, change_page_range, &data);
>>>> +}
>>>> +
>>>
>>> IIUC set_direct_map_invalid_noflush() and set_direct_map_default_noflush()
>>> should set *appropriate* permissions as seen fit from platform perspective
>>> to implement this transition.
>>>
>>> In here set_direct_map_invalid_noflush() makes the entry invalid preventing
>>> further MMU walks (hence new TLB entries). set_direct_map_default_noflush()
>>> makes it a valid write entry. Though it looks similar to PAGE_KERNEL which
>>> is the default permission for linear mapping on arm64 via __map_memblock().
>>> Should not PAGE_KERNEL be used explicitly as suggested above.
>>>
>>
>> No. We should restore the attributes that we cleared when manipulating the linear aliases. There isn't a lot of code that does that, so I don't see the need to use PAGE_KERNEL here.
>
> Fair enough.
>
>>
>>>> void __kernel_map_pages(struct page *page, int numpages, int enable)
>>>> {
>>>> + if (!debug_pagealloc_enabled() && !rodata_full)
>>>> + return;
>>>> +
>>>
>>> I guess this is not related to CONFIG_ARCH_HAS_SET_DIRECT_MAP here and should
>>> be a fix or an enhancement to CONFIG_DEBUG_PAGEALLOC implementation. Just
>>> curious, !rodata_full check here to ensure that linear mapping does not have
>>> block or contig mappings and should be backed by regular pages only ?
>>>
>>
>> It is related. CONFIG_ARCH_HAS_SET_DIRECT_MAP introduces references to __kernel_map_pages() in generic code, so enabling CONFIG_ARCH_HAS_SET_DIRECT_MAP should make __kernel_map_pages() available unconditionally as well.
>
> Ahh, got it.
>
>>
>>>> set_memory_valid((unsigned long)page_address(page), numpages, enable);
>>>> }
>>>> -#ifdef CONFIG_HIBERNATION
>>>> +
>>>> /*
>>>> - * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
>>>> - * is used to determine if a linear map page has been marked as not-valid by
>>>> - * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
>>>> - * This is based on kern_addr_valid(), which almost does what we need.
>>>> + * This function is used to determine if a linear map page has been marked as
>>>> + * not-valid. Walk the page table and check the PTE_VALID bit. This is based
>>>> + * on kern_addr_valid(), which almost does what we need.
>>>> *
>>>> * Because this is only called on the kernel linear map, p?d_sect() implies
>>>> * p?d_present(). When debug_pagealloc is enabled, sections mappings are
>>>> @@ -183,6 +214,9 @@ bool kernel_page_present(struct page *page)
>>>> pte_t *ptep;
>>>> unsigned long addr = (unsigned long)page_address(page);
>>>> + if (!debug_pagealloc_enabled() && !rodata_full)
>>>> + return true;
>>>> +
>>>
>>> Ditto, not related to CONFIG_ARCH_HAS_SET_DIRECT_MAP.
>>>
>>
>> It is related.
>
> Never mind.
>
On 05/23/2019 03:52 PM, Ard Biesheuvel wrote:
> In order to avoid transient inconsistencies where freed code pages
> are remapped writable while stale TLB entries still exist on other
> cores, mark the kprobes text pages with the VM_FLUSH_RESET_PERMS
> attribute. This instructs the core vmalloc code not to defer the
> TLB flush when this region is unmapped and returned to the page
> allocator.
Makes sense.
>
> Signed-off-by: Ard Biesheuvel <[email protected]>
> ---
> arch/arm64/kernel/probes/kprobes.c | 4 +++-
> 1 file changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
> index 2509fcb6d404..036cfbf9682a 100644
> --- a/arch/arm64/kernel/probes/kprobes.c
> +++ b/arch/arm64/kernel/probes/kprobes.c
> @@ -131,8 +131,10 @@ void *alloc_insn_page(void)
> void *page;
>
> page = vmalloc_exec(PAGE_SIZE);
> - if (page)
> + if (page) {
> set_memory_ro((unsigned long)page, 1);
> + set_vm_flush_reset_perms(page);
> + }
Looks good. It seems there might be more users who would like to set
VM_FLUSH_RESET_PERMS right after their allocation for the same reason.
Hence would not it help to have a variant like vmalloc_exec_reset() or
such which will tag vm_struct->flags with VM_FLUSH_RESET_PERMS right
after it's allocation without requiring the caller to do the same.
On 5/28/19 10:10 AM, Anshuman Khandual wrote:
>
>
> On 05/23/2019 03:52 PM, Ard Biesheuvel wrote:
>> Wire up the special helper functions to manipulate aliases of vmalloc
>> regions in the linear map.
>
> IMHO the commit message here could be bit more descriptive because of the
> amount of changes this patch brings in.
>
>>
>> Signed-off-by: Ard Biesheuvel <[email protected]>
>> ---
>> arch/arm64/Kconfig | 1 +
>> arch/arm64/include/asm/cacheflush.h | 3 ++
>> arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
>> mm/vmalloc.c | 11 -----
>> 4 files changed, 44 insertions(+), 19 deletions(-)
>>
>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>> index ca9c175fb949..4ab32180eabd 100644
>> --- a/arch/arm64/Kconfig
>> +++ b/arch/arm64/Kconfig
>> @@ -26,6 +26,7 @@ config ARM64
>> select ARCH_HAS_MEMBARRIER_SYNC_CORE
>> select ARCH_HAS_PTE_SPECIAL
>> select ARCH_HAS_SETUP_DMA_OPS
>> + select ARCH_HAS_SET_DIRECT_MAP
>> select ARCH_HAS_SET_MEMORY
>> select ARCH_HAS_STRICT_KERNEL_RWX
>> select ARCH_HAS_STRICT_MODULE_RWX
>> diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
>> index 19844211a4e6..b9ee5510067f 100644
>> --- a/arch/arm64/include/asm/cacheflush.h
>> +++ b/arch/arm64/include/asm/cacheflush.h
>> @@ -187,4 +187,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
>>
>> int set_memory_valid(unsigned long addr, int numpages, int enable);
>>
>> +int set_direct_map_invalid_noflush(struct page *page);
>> +int set_direct_map_default_noflush(struct page *page);
>> +
>> #endif
>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>> index 6cd645edcf35..9c6b9039ec8f 100644
>> --- a/arch/arm64/mm/pageattr.c
>> +++ b/arch/arm64/mm/pageattr.c
>> @@ -159,17 +159,48 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
>> __pgprot(PTE_VALID));
>> }
>>
>> -#ifdef CONFIG_DEBUG_PAGEALLOC
>> +int set_direct_map_invalid_noflush(struct page *page)
>> +{
>> + struct page_change_data data = {
>> + .set_mask = __pgprot(0),
>> + .clear_mask = __pgprot(PTE_VALID),
>> + };
>> +
>> + if (!rodata_full)
>> + return 0;
>
> Why rodata_full needs to be probed here ? Should not we still require the following
> transition even if rodata_full is not enabled. Just wondering whether we can use
> VM_FLUSH_RESET_PERMS feature without these required transitions.
>
> /*
> * Set direct map to something invalid so that it won't be cached if
> * there are any accesses after the TLB flush, then flush the TLB and
> * reset the direct map permissions to the default.
> */
> set_area_direct_map(area, set_direct_map_invalid_noflush);
> _vm_unmap_aliases(start, end, 1);
> set_area_direct_map(area, set_direct_map_default_noflush);
>
> > +
How would that work? With rodata_full disabled, the linear region is not
mapped down to pages, and so there is no way we can manipulate linear
aliases at page granularity.
>> + return apply_to_page_range(&init_mm,
>> + (unsigned long)page_address(page),
>> + PAGE_SIZE, change_page_range, &data);
>> +}
>> +
>> +int set_direct_map_default_noflush(struct page *page)
>> +{
>> + struct page_change_data data = {
>> + .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
>> + .clear_mask = __pgprot(PTE_RDONLY),
>
> Replace __pgprot(PTE_VALID | PTE_WRITE) with PAGE_KERNEL instead !
>
This is a delta mask, so no need to pull in the PTE_MAYBE_NG or other
attributes that we know we haven't changed.
>> + };
>> +
>> + if (!rodata_full)
>> + return 0;
>> +
>> + return apply_to_page_range(&init_mm,
>> + (unsigned long)page_address(page),
>> + PAGE_SIZE, change_page_range, &data);
>> +}
>> +
>
> IIUC set_direct_map_invalid_noflush() and set_direct_map_default_noflush()
> should set *appropriate* permissions as seen fit from platform perspective
> to implement this transition.
>
> In here set_direct_map_invalid_noflush() makes the entry invalid preventing
> further MMU walks (hence new TLB entries). set_direct_map_default_noflush()
> makes it a valid write entry. Though it looks similar to PAGE_KERNEL which
> is the default permission for linear mapping on arm64 via __map_memblock().
> Should not PAGE_KERNEL be used explicitly as suggested above.
>
No. We should restore the attributes that we cleared when manipulating
the linear aliases. There isn't a lot of code that does that, so I don't
see the need to use PAGE_KERNEL here.
>> void __kernel_map_pages(struct page *page, int numpages, int enable)
>> {
>> + if (!debug_pagealloc_enabled() && !rodata_full)
>> + return;
>> +
>
> I guess this is not related to CONFIG_ARCH_HAS_SET_DIRECT_MAP here and should
> be a fix or an enhancement to CONFIG_DEBUG_PAGEALLOC implementation. Just
> curious, !rodata_full check here to ensure that linear mapping does not have
> block or contig mappings and should be backed by regular pages only ?
>
It is related. CONFIG_ARCH_HAS_SET_DIRECT_MAP introduces references to
__kernel_map_pages() in generic code, so enabling
CONFIG_ARCH_HAS_SET_DIRECT_MAP should make __kernel_map_pages()
available unconditionally as well.
>> set_memory_valid((unsigned long)page_address(page), numpages, enable);
>> }
>> -#ifdef CONFIG_HIBERNATION
>> +
>> /*
>> - * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
>> - * is used to determine if a linear map page has been marked as not-valid by
>> - * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
>> - * This is based on kern_addr_valid(), which almost does what we need.
>> + * This function is used to determine if a linear map page has been marked as
>> + * not-valid. Walk the page table and check the PTE_VALID bit. This is based
>> + * on kern_addr_valid(), which almost does what we need.
>> *
>> * Because this is only called on the kernel linear map, p?d_sect() implies
>> * p?d_present(). When debug_pagealloc is enabled, sections mappings are
>> @@ -183,6 +214,9 @@ bool kernel_page_present(struct page *page)
>> pte_t *ptep;
>> unsigned long addr = (unsigned long)page_address(page);
>>
>> + if (!debug_pagealloc_enabled() && !rodata_full)
>> + return true;
>> +
>
> Ditto, not related to CONFIG_ARCH_HAS_SET_DIRECT_MAP.
>
It is related.
On Thu, May 23, 2019 at 11:22:52AM +0100, Ard Biesheuvel wrote:
> Wire up the code introduced in v5.2 to manage the permissions
> of executable vmalloc regions (and their linear aliases) more
> strictly.
>
> One of the things that came up in the internal discussion is
> whether non-x86 architectures have any benefit at all from the
> lazy vunmap feature, and whether it would perhaps be better to
> implement eager vunmap instead.
>
> Cc: Nadav Amit <[email protected]>
> Cc: Rick Edgecombe <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Will Deacon <[email protected]>
> Cc: Masami Hiramatsu <[email protected]>
> Cc: James Morse <[email protected]>
>
> Ard Biesheuvel (4):
> arm64: module: create module allocations without exec permissions
> arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP
> arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages
> arm64: bpf: do not allocate executable memory
>
> arch/arm64/Kconfig | 1 +
> arch/arm64/include/asm/cacheflush.h | 3 ++
> arch/arm64/kernel/module.c | 4 +-
> arch/arm64/kernel/probes/kprobes.c | 4 +-
> arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
> arch/arm64/net/bpf_jit_comp.c | 2 +-
> mm/vmalloc.c | 11 -----
> 7 files changed, 50 insertions(+), 23 deletions(-)
Thanks, this all looks good to me. I can get pick this up for 5.2 if
Rick's fixes [1] land soon enough.
Cheers,
Will
[1] https://lore.kernel.org/lkml/[email protected]/T/#u
On 5/28/19 12:04 PM, Will Deacon wrote:
> On Thu, May 23, 2019 at 11:22:52AM +0100, Ard Biesheuvel wrote:
>> Wire up the code introduced in v5.2 to manage the permissions
>> of executable vmalloc regions (and their linear aliases) more
>> strictly.
>>
>> One of the things that came up in the internal discussion is
>> whether non-x86 architectures have any benefit at all from the
>> lazy vunmap feature, and whether it would perhaps be better to
>> implement eager vunmap instead.
>>
>> Cc: Nadav Amit <[email protected]>
>> Cc: Rick Edgecombe <[email protected]>
>> Cc: Peter Zijlstra <[email protected]>
>> Cc: Andrew Morton <[email protected]>
>> Cc: Will Deacon <[email protected]>
>> Cc: Masami Hiramatsu <[email protected]>
>> Cc: James Morse <[email protected]>
>>
>> Ard Biesheuvel (4):
>> arm64: module: create module allocations without exec permissions
>> arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP
>> arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages
>> arm64: bpf: do not allocate executable memory
>>
>> arch/arm64/Kconfig | 1 +
>> arch/arm64/include/asm/cacheflush.h | 3 ++
>> arch/arm64/kernel/module.c | 4 +-
>> arch/arm64/kernel/probes/kprobes.c | 4 +-
>> arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
>> arch/arm64/net/bpf_jit_comp.c | 2 +-
>> mm/vmalloc.c | 11 -----
>> 7 files changed, 50 insertions(+), 23 deletions(-)
>
> Thanks, this all looks good to me. I can get pick this up for 5.2 if
> Rick's fixes [1] land soon enough.
>
Note that you'll get a trivial conflict in the hunk against mm/vmalloc.c.
On Tue, May 28, 2019 at 11:04:20AM +0100, Will Deacon wrote:
> On Thu, May 23, 2019 at 11:22:52AM +0100, Ard Biesheuvel wrote:
> > Wire up the code introduced in v5.2 to manage the permissions
> > of executable vmalloc regions (and their linear aliases) more
> > strictly.
> >
> > One of the things that came up in the internal discussion is
> > whether non-x86 architectures have any benefit at all from the
> > lazy vunmap feature, and whether it would perhaps be better to
> > implement eager vunmap instead.
> >
> > Cc: Nadav Amit <[email protected]>
> > Cc: Rick Edgecombe <[email protected]>
> > Cc: Peter Zijlstra <[email protected]>
> > Cc: Andrew Morton <[email protected]>
> > Cc: Will Deacon <[email protected]>
> > Cc: Masami Hiramatsu <[email protected]>
> > Cc: James Morse <[email protected]>
> >
> > Ard Biesheuvel (4):
> > arm64: module: create module allocations without exec permissions
> > arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP
> > arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages
> > arm64: bpf: do not allocate executable memory
> >
> > arch/arm64/Kconfig | 1 +
> > arch/arm64/include/asm/cacheflush.h | 3 ++
> > arch/arm64/kernel/module.c | 4 +-
> > arch/arm64/kernel/probes/kprobes.c | 4 +-
> > arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
> > arch/arm64/net/bpf_jit_comp.c | 2 +-
> > mm/vmalloc.c | 11 -----
> > 7 files changed, 50 insertions(+), 23 deletions(-)
>
> Thanks, this all looks good to me. I can get pick this up for 5.2 if
> Rick's fixes [1] land soon enough.
Bah, I missed these landing in -rc5 and I think it's a bit too late for
us to take this for 5.2. now particularly with our limited ability to
fix any late regressions that might arise.
In which case, Catalin, please can you take these for 5.3? You might run
into some testing failures with for-next/core due to the late of Rick's
fixes, but linux-next should be alright and I don't think you'll get any
conflicts.
Acked-by: Will Deacon <[email protected]>
Ard: are you ok with that?
Thanks,
Will
On 6/24/19 1:16 PM, Will Deacon wrote:
> On Tue, May 28, 2019 at 11:04:20AM +0100, Will Deacon wrote:
>> On Thu, May 23, 2019 at 11:22:52AM +0100, Ard Biesheuvel wrote:
>>> Wire up the code introduced in v5.2 to manage the permissions
>>> of executable vmalloc regions (and their linear aliases) more
>>> strictly.
>>>
>>> One of the things that came up in the internal discussion is
>>> whether non-x86 architectures have any benefit at all from the
>>> lazy vunmap feature, and whether it would perhaps be better to
>>> implement eager vunmap instead.
>>>
>>> Cc: Nadav Amit <[email protected]>
>>> Cc: Rick Edgecombe <[email protected]>
>>> Cc: Peter Zijlstra <[email protected]>
>>> Cc: Andrew Morton <[email protected]>
>>> Cc: Will Deacon <[email protected]>
>>> Cc: Masami Hiramatsu <[email protected]>
>>> Cc: James Morse <[email protected]>
>>>
>>> Ard Biesheuvel (4):
>>> arm64: module: create module allocations without exec permissions
>>> arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP
>>> arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages
>>> arm64: bpf: do not allocate executable memory
>>>
>>> arch/arm64/Kconfig | 1 +
>>> arch/arm64/include/asm/cacheflush.h | 3 ++
>>> arch/arm64/kernel/module.c | 4 +-
>>> arch/arm64/kernel/probes/kprobes.c | 4 +-
>>> arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
>>> arch/arm64/net/bpf_jit_comp.c | 2 +-
>>> mm/vmalloc.c | 11 -----
>>> 7 files changed, 50 insertions(+), 23 deletions(-)
>>
>> Thanks, this all looks good to me. I can get pick this up for 5.2 if
>> Rick's fixes [1] land soon enough.
>
> Bah, I missed these landing in -rc5 and I think it's a bit too late for
> us to take this for 5.2. now particularly with our limited ability to
> fix any late regressions that might arise.
>
> In which case, Catalin, please can you take these for 5.3? You might run
> into some testing failures with for-next/core due to the late of Rick's
> fixes, but linux-next should be alright and I don't think you'll get any
> conflicts.
>
> Acked-by: Will Deacon <[email protected]>
>
> Ard: are you ok with that?
>
That is fine, although I won't be around to pick up the pieces by the
time the merge window opens. Also, I'd like to follow up on the lazy
vunmap thing for non-x86, but perhaps we can talk about this at plumbers?
(+ Catalin)
On Mon, 24 Jun 2019 at 13:23, Ard Biesheuvel <[email protected]> wrote:
>
> On 6/24/19 1:16 PM, Will Deacon wrote:
> > On Tue, May 28, 2019 at 11:04:20AM +0100, Will Deacon wrote:
> >> On Thu, May 23, 2019 at 11:22:52AM +0100, Ard Biesheuvel wrote:
> >>> Wire up the code introduced in v5.2 to manage the permissions
> >>> of executable vmalloc regions (and their linear aliases) more
> >>> strictly.
> >>>
> >>> One of the things that came up in the internal discussion is
> >>> whether non-x86 architectures have any benefit at all from the
> >>> lazy vunmap feature, and whether it would perhaps be better to
> >>> implement eager vunmap instead.
> >>>
> >>> Cc: Nadav Amit <[email protected]>
> >>> Cc: Rick Edgecombe <[email protected]>
> >>> Cc: Peter Zijlstra <[email protected]>
> >>> Cc: Andrew Morton <[email protected]>
> >>> Cc: Will Deacon <[email protected]>
> >>> Cc: Masami Hiramatsu <[email protected]>
> >>> Cc: James Morse <[email protected]>
> >>>
> >>> Ard Biesheuvel (4):
> >>> arm64: module: create module allocations without exec permissions
> >>> arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP
> >>> arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages
> >>> arm64: bpf: do not allocate executable memory
> >>>
> >>> arch/arm64/Kconfig | 1 +
> >>> arch/arm64/include/asm/cacheflush.h | 3 ++
> >>> arch/arm64/kernel/module.c | 4 +-
> >>> arch/arm64/kernel/probes/kprobes.c | 4 +-
> >>> arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
> >>> arch/arm64/net/bpf_jit_comp.c | 2 +-
> >>> mm/vmalloc.c | 11 -----
> >>> 7 files changed, 50 insertions(+), 23 deletions(-)
> >>
> >> Thanks, this all looks good to me. I can get pick this up for 5.2 if
> >> Rick's fixes [1] land soon enough.
> >
> > Bah, I missed these landing in -rc5 and I think it's a bit too late for
> > us to take this for 5.2. now particularly with our limited ability to
> > fix any late regressions that might arise.
> >
> > In which case, Catalin, please can you take these for 5.3? You might run
> > into some testing failures with for-next/core due to the late of Rick's
> > fixes, but linux-next should be alright and I don't think you'll get any
> > conflicts.
> >
> > Acked-by: Will Deacon <[email protected]>
> >
> > Ard: are you ok with that?
> >
>
> That is fine, although I won't be around to pick up the pieces by the
> time the merge window opens. Also, I'd like to follow up on the lazy
> vunmap thing for non-x86, but perhaps we can talk about this at plumbers?
>
Actually, you will run into a couple of conflicts. Let me know if you
want me to respin (although they still won't apply cleanly to both
for-next/core and -next)
On Mon, Jun 24, 2019 at 04:29:39PM +0200, Ard Biesheuvel wrote:
> On Mon, 24 Jun 2019 at 13:23, Ard Biesheuvel <[email protected]> wrote:
> > On 6/24/19 1:16 PM, Will Deacon wrote:
> > > On Tue, May 28, 2019 at 11:04:20AM +0100, Will Deacon wrote:
> > >> On Thu, May 23, 2019 at 11:22:52AM +0100, Ard Biesheuvel wrote:
> > >>> Ard Biesheuvel (4):
> > >>> arm64: module: create module allocations without exec permissions
> > >>> arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP
> > >>> arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages
> > >>> arm64: bpf: do not allocate executable memory
> > >>>
> > >>> arch/arm64/Kconfig | 1 +
> > >>> arch/arm64/include/asm/cacheflush.h | 3 ++
> > >>> arch/arm64/kernel/module.c | 4 +-
> > >>> arch/arm64/kernel/probes/kprobes.c | 4 +-
> > >>> arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
> > >>> arch/arm64/net/bpf_jit_comp.c | 2 +-
> > >>> mm/vmalloc.c | 11 -----
> > >>> 7 files changed, 50 insertions(+), 23 deletions(-)
> > >>
> > >> Thanks, this all looks good to me. I can get pick this up for 5.2 if
> > >> Rick's fixes [1] land soon enough.
> > >
> > > Bah, I missed these landing in -rc5 and I think it's a bit too late for
> > > us to take this for 5.2. now particularly with our limited ability to
> > > fix any late regressions that might arise.
> > >
> > > In which case, Catalin, please can you take these for 5.3? You might run
> > > into some testing failures with for-next/core due to the late of Rick's
> > > fixes, but linux-next should be alright and I don't think you'll get any
> > > conflicts.
> > >
> > > Acked-by: Will Deacon <[email protected]>
> > >
> > > Ard: are you ok with that?
> >
> > That is fine, although I won't be around to pick up the pieces by the
> > time the merge window opens. Also, I'd like to follow up on the lazy
> > vunmap thing for non-x86, but perhaps we can talk about this at plumbers?
>
> Actually, you will run into a couple of conflicts. Let me know if you
> want me to respin (although they still won't apply cleanly to both
> for-next/core and -next)
I queued them in for-next/core (and fixed a minor conflict). Thanks.
--
Catalin
On Mon, 24 Jun 2019 at 19:14, Catalin Marinas <[email protected]> wrote:
>
> On Mon, Jun 24, 2019 at 04:29:39PM +0200, Ard Biesheuvel wrote:
> > On Mon, 24 Jun 2019 at 13:23, Ard Biesheuvel <[email protected]> wrote:
> > > On 6/24/19 1:16 PM, Will Deacon wrote:
> > > > On Tue, May 28, 2019 at 11:04:20AM +0100, Will Deacon wrote:
> > > >> On Thu, May 23, 2019 at 11:22:52AM +0100, Ard Biesheuvel wrote:
> > > >>> Ard Biesheuvel (4):
> > > >>> arm64: module: create module allocations without exec permissions
> > > >>> arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP
> > > >>> arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages
> > > >>> arm64: bpf: do not allocate executable memory
> > > >>>
> > > >>> arch/arm64/Kconfig | 1 +
> > > >>> arch/arm64/include/asm/cacheflush.h | 3 ++
> > > >>> arch/arm64/kernel/module.c | 4 +-
> > > >>> arch/arm64/kernel/probes/kprobes.c | 4 +-
> > > >>> arch/arm64/mm/pageattr.c | 48 ++++++++++++++++----
> > > >>> arch/arm64/net/bpf_jit_comp.c | 2 +-
> > > >>> mm/vmalloc.c | 11 -----
> > > >>> 7 files changed, 50 insertions(+), 23 deletions(-)
> > > >>
> > > >> Thanks, this all looks good to me. I can get pick this up for 5.2 if
> > > >> Rick's fixes [1] land soon enough.
> > > >
> > > > Bah, I missed these landing in -rc5 and I think it's a bit too late for
> > > > us to take this for 5.2. now particularly with our limited ability to
> > > > fix any late regressions that might arise.
> > > >
> > > > In which case, Catalin, please can you take these for 5.3? You might run
> > > > into some testing failures with for-next/core due to the late of Rick's
> > > > fixes, but linux-next should be alright and I don't think you'll get any
> > > > conflicts.
> > > >
> > > > Acked-by: Will Deacon <[email protected]>
> > > >
> > > > Ard: are you ok with that?
> > >
> > > That is fine, although I won't be around to pick up the pieces by the
> > > time the merge window opens. Also, I'd like to follow up on the lazy
> > > vunmap thing for non-x86, but perhaps we can talk about this at plumbers?
> >
> > Actually, you will run into a couple of conflicts. Let me know if you
> > want me to respin (although they still won't apply cleanly to both
> > for-next/core and -next)
>
> I queued them in for-next/core (and fixed a minor conflict). Thanks.
>
OK, in that case, you will get a conflict in -next on the hunk against
mm/vmalloc.c in the second patch. Just FYI ...