2019-08-30 21:50:37

by Masayoshi Mizuma

[permalink] [raw]
Subject: [PATCH v3 4/5] x86/mm/KASLR: Cleanup calculation for direct mapping size

From: Masayoshi Mizuma <[email protected]>

Cleanup calculation for direct mapping size.

Signed-off-by: Baoquan He <[email protected]>
Signed-off-by: Masayoshi Mizuma <[email protected]>
---
arch/x86/mm/kaslr.c | 50 +++++++++++++++++++++++++++++++--------------
1 file changed, 35 insertions(+), 15 deletions(-)

diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index dc6182eec..8e5f3642e 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -70,15 +70,45 @@ static inline bool kaslr_memory_enabled(void)
return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
}

+/*
+ * Even though a huge virtual address space is reserved for the direct
+ * mapping of physical memory, e.g in 4-level paging mode, it's 64TB,
+ * rare system can own enough physical memory to use it up, most are
+ * even less than 1TB. So with KASLR enabled, we adapt the size of
+ * direct mapping area to the size of actual physical memory plus the
+ * configured padding CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING.
+ * The left part will be taken out to join memory randomization.
+ */
+static inline unsigned long calc_direct_mapping_size(void)
+{
+ unsigned long size_tb, memory_tb;
+
+ /*
+ * Update Physical memory mapping to available and
+ * add padding if needed (especially for memory hotplug support).
+ */
+ memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
+ CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
+
+ size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
+
+ /*
+ * Adapt physical memory region size based on available memory
+ */
+ if (memory_tb < size_tb)
+ size_tb = memory_tb;
+
+ return size_tb;
+}
+
/* Initialize base and padding for each memory region randomized with KASLR */
void __init kernel_randomize_memory(void)
{
- size_t i;
- unsigned long vaddr_start, vaddr;
- unsigned long rand, memory_tb;
- struct rnd_state rand_state;
+ unsigned long vaddr_start, vaddr, rand;
unsigned long remain_entropy;
unsigned long vmemmap_size;
+ struct rnd_state rand_state;
+ size_t i;

vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
vaddr = vaddr_start;
@@ -95,20 +125,10 @@ void __init kernel_randomize_memory(void)
if (!kaslr_memory_enabled())
return;

- kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
+ kaslr_regions[0].size_tb = calc_direct_mapping_size();
kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;

- /*
- * Update Physical memory mapping to available and
- * add padding if needed (especially for memory hotplug support).
- */
BUG_ON(kaslr_regions[0].base != &page_offset_base);
- memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
- CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
-
- /* Adapt phyiscal memory region size based on available memory */
- if (memory_tb < kaslr_regions[0].size_tb)
- kaslr_regions[0].size_tb = memory_tb;

/*
* Calculate the vmemmap region size in TBs, aligned to a TB
--
2.18.1


2019-09-05 14:17:17

by Baoquan He

[permalink] [raw]
Subject: Re: [PATCH v3 4/5] x86/mm/KASLR: Cleanup calculation for direct mapping size

On 08/30/19 at 05:47pm, Masayoshi Mizuma wrote:
> From: Masayoshi Mizuma <[email protected]>
>
> Cleanup calculation for direct mapping size.
>
> Signed-off-by: Baoquan He <[email protected]>
> Signed-off-by: Masayoshi Mizuma <[email protected]>
> ---
> arch/x86/mm/kaslr.c | 50 +++++++++++++++++++++++++++++++--------------
> 1 file changed, 35 insertions(+), 15 deletions(-)
>
> diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
> index dc6182eec..8e5f3642e 100644
> --- a/arch/x86/mm/kaslr.c
> +++ b/arch/x86/mm/kaslr.c
> @@ -70,15 +70,45 @@ static inline bool kaslr_memory_enabled(void)
> return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
> }
>
> +/*
> + * Even though a huge virtual address space is reserved for the direct
> + * mapping of physical memory, e.g in 4-level paging mode, it's 64TB,
> + * rare system can own enough physical memory to use it up, most are
> + * even less than 1TB. So with KASLR enabled, we adapt the size of
> + * direct mapping area to the size of actual physical memory plus the
> + * configured padding CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING.
> + * The left part will be taken out to join memory randomization.
> + */
> +static inline unsigned long calc_direct_mapping_size(void)

I think patch 4 and 5 can be merged, just keep one
calc_direct_mapping_size() to do the mapping size calculation for the
direct mapping section, it's not that complicated. Adding
phys_memmap_size() makes it a little redundent, in my opinion.

Thanks
Baoquan

> +{
> + unsigned long size_tb, memory_tb;
> +
> + /*
> + * Update Physical memory mapping to available and
> + * add padding if needed (especially for memory hotplug support).
> + */
> + memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
> + CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
> +
> + size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
> +
> + /*
> + * Adapt physical memory region size based on available memory
> + */
> + if (memory_tb < size_tb)
> + size_tb = memory_tb;
> +
> + return size_tb;
> +}
> +
> /* Initialize base and padding for each memory region randomized with KASLR */
> void __init kernel_randomize_memory(void)
> {
> - size_t i;
> - unsigned long vaddr_start, vaddr;
> - unsigned long rand, memory_tb;
> - struct rnd_state rand_state;
> + unsigned long vaddr_start, vaddr, rand;
> unsigned long remain_entropy;
> unsigned long vmemmap_size;
> + struct rnd_state rand_state;
> + size_t i;
>
> vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
> vaddr = vaddr_start;
> @@ -95,20 +125,10 @@ void __init kernel_randomize_memory(void)
> if (!kaslr_memory_enabled())
> return;
>
> - kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
> + kaslr_regions[0].size_tb = calc_direct_mapping_size();
> kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
>
> - /*
> - * Update Physical memory mapping to available and
> - * add padding if needed (especially for memory hotplug support).
> - */
> BUG_ON(kaslr_regions[0].base != &page_offset_base);
> - memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
> - CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
> -
> - /* Adapt phyiscal memory region size based on available memory */
> - if (memory_tb < kaslr_regions[0].size_tb)
> - kaslr_regions[0].size_tb = memory_tb;
>
> /*
> * Calculate the vmemmap region size in TBs, aligned to a TB
> --
> 2.18.1
>

2019-10-29 19:44:42

by Masayoshi Mizuma

[permalink] [raw]
Subject: Re: [PATCH v3 4/5] x86/mm/KASLR: Cleanup calculation for direct mapping size

On Thu, Sep 05, 2019 at 09:54:51PM +0800, Baoquan He wrote:
> On 08/30/19 at 05:47pm, Masayoshi Mizuma wrote:
> > From: Masayoshi Mizuma <[email protected]>
> >
> > Cleanup calculation for direct mapping size.
> >
> > Signed-off-by: Baoquan He <[email protected]>
> > Signed-off-by: Masayoshi Mizuma <[email protected]>
> > ---
> > arch/x86/mm/kaslr.c | 50 +++++++++++++++++++++++++++++++--------------
> > 1 file changed, 35 insertions(+), 15 deletions(-)
> >
> > diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
> > index dc6182eec..8e5f3642e 100644
> > --- a/arch/x86/mm/kaslr.c
> > +++ b/arch/x86/mm/kaslr.c
> > @@ -70,15 +70,45 @@ static inline bool kaslr_memory_enabled(void)
> > return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
> > }
> >
> > +/*
> > + * Even though a huge virtual address space is reserved for the direct
> > + * mapping of physical memory, e.g in 4-level paging mode, it's 64TB,
> > + * rare system can own enough physical memory to use it up, most are
> > + * even less than 1TB. So with KASLR enabled, we adapt the size of
> > + * direct mapping area to the size of actual physical memory plus the
> > + * configured padding CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING.
> > + * The left part will be taken out to join memory randomization.
> > + */
> > +static inline unsigned long calc_direct_mapping_size(void)
>
> I think patch 4 and 5 can be merged, just keep one
> calc_direct_mapping_size() to do the mapping size calculation for the
> direct mapping section, it's not that complicated. Adding
> phys_memmap_size() makes it a little redundent, in my opinion.

Thanks, I'll merge patch 4 and 5.

- Masa

>
> Thanks
> Baoquan
>
> > +{
> > + unsigned long size_tb, memory_tb;
> > +
> > + /*
> > + * Update Physical memory mapping to available and
> > + * add padding if needed (especially for memory hotplug support).
> > + */
> > + memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
> > + CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
> > +
> > + size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
> > +
> > + /*
> > + * Adapt physical memory region size based on available memory
> > + */
> > + if (memory_tb < size_tb)
> > + size_tb = memory_tb;
> > +
> > + return size_tb;
> > +}
> > +
> > /* Initialize base and padding for each memory region randomized with KASLR */
> > void __init kernel_randomize_memory(void)
> > {
> > - size_t i;
> > - unsigned long vaddr_start, vaddr;
> > - unsigned long rand, memory_tb;
> > - struct rnd_state rand_state;
> > + unsigned long vaddr_start, vaddr, rand;
> > unsigned long remain_entropy;
> > unsigned long vmemmap_size;
> > + struct rnd_state rand_state;
> > + size_t i;
> >
> > vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
> > vaddr = vaddr_start;
> > @@ -95,20 +125,10 @@ void __init kernel_randomize_memory(void)
> > if (!kaslr_memory_enabled())
> > return;
> >
> > - kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
> > + kaslr_regions[0].size_tb = calc_direct_mapping_size();
> > kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
> >
> > - /*
> > - * Update Physical memory mapping to available and
> > - * add padding if needed (especially for memory hotplug support).
> > - */
> > BUG_ON(kaslr_regions[0].base != &page_offset_base);
> > - memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
> > - CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
> > -
> > - /* Adapt phyiscal memory region size based on available memory */
> > - if (memory_tb < kaslr_regions[0].size_tb)
> > - kaslr_regions[0].size_tb = memory_tb;
> >
> > /*
> > * Calculate the vmemmap region size in TBs, aligned to a TB
> > --
> > 2.18.1
> >