2019-05-05 08:56:07

by Nadav Amit

[permalink] [raw]
Subject: [PATCH] x86/mm: Initialize pgd cache during mm initialization

Poking-mm initialization might require to duplicate the PGD in early
stage. Initialize the PGD cache earlier to prevent boot failures.

Cc: Stephen Rothwell <[email protected]>
Cc: Rick Edgecombe <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Fixes: 4fc19708b165 ("x86/alternatives: Initialize temporary mm for patching")
Reported-by: kernel test robot <[email protected]>
Signed-off-by: Nadav Amit <[email protected]>
---
arch/x86/include/asm/pgtable.h | 1 +
arch/x86/mm/pgtable.c | 10 ++++++----
init/main.c | 1 +
3 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 6b6bfdfe83aa..9635662e1163 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1027,6 +1027,7 @@ static inline int pgd_none(pgd_t pgd)

extern int direct_gbpages;
void init_mem_mapping(void);
+void pgd_cache_init(void);
void early_alloc_pgt_buf(void);
extern void memblock_find_dma_reserve(void);

diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 3dbf440d4114..1f67b1e15bf6 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -373,14 +373,14 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,

static struct kmem_cache *pgd_cache;

-static int __init pgd_cache_init(void)
+void __init pgd_cache_init(void)
{
/*
* When PAE kernel is running as a Xen domain, it does not use
* shared kernel pmd. And this requires a whole page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return 0;
+ return;

/*
* when PAE kernel is not running as a Xen domain, it uses
@@ -390,9 +390,7 @@ static int __init pgd_cache_init(void)
*/
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
SLAB_PANIC, NULL);
- return 0;
}
-core_initcall(pgd_cache_init);

static inline pgd_t *_pgd_alloc(void)
{
@@ -420,6 +418,10 @@ static inline void _pgd_free(pgd_t *pgd)
}
#else

+void __init pgd_cache_init(void)
+{
+}
+
static inline pgd_t *_pgd_alloc(void)
{
return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
diff --git a/init/main.c b/init/main.c
index 949eed8015ec..7fac4ac2fede 100644
--- a/init/main.c
+++ b/init/main.c
@@ -537,6 +537,7 @@ static void __init mm_init(void)
init_espfix_bsp();
/* Should be run after espfix64 is set up. */
pti_init();
+ pgd_cache_init();
}

void __init __weak arch_call_rest_init(void)
--
2.17.1


Subject: [tip:x86/mm] x86/mm: Initialize PGD cache during mm initialization

Commit-ID: ef5f22b4e5caf7e5ac12b28d4c9566c95d709ba5
Gitweb: https://git.kernel.org/tip/ef5f22b4e5caf7e5ac12b28d4c9566c95d709ba5
Author: Nadav Amit <[email protected]>
AuthorDate: Sat, 4 May 2019 18:11:24 -0700
Committer: Ingo Molnar <[email protected]>
CommitDate: Sun, 5 May 2019 12:43:13 +0200

x86/mm: Initialize PGD cache during mm initialization

Poking-mm initialization might require to duplicate the PGD in early
stage. Initialize the PGD cache earlier to prevent boot failures.

Reported-by: kernel test robot <[email protected]>
Signed-off-by: Nadav Amit <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Rick Edgecombe <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Stephen Rothwell <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: 4fc19708b165 ("x86/alternatives: Initialize temporary mm for patching")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/pgtable.h | 1 +
arch/x86/mm/pgtable.c | 10 ++++++----
init/main.c | 1 +
3 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 702db5904753..d488b3053330 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1010,6 +1010,7 @@ static inline int pgd_none(pgd_t pgd)

extern int direct_gbpages;
void init_mem_mapping(void);
+void pgd_cache_init(void);
void early_alloc_pgt_buf(void);
extern void memblock_find_dma_reserve(void);

diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 7bd01709a091..c8177045b7d4 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -373,14 +373,14 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,

static struct kmem_cache *pgd_cache;

-static int __init pgd_cache_init(void)
+void __init pgd_cache_init(void)
{
/*
* When PAE kernel is running as a Xen domain, it does not use
* shared kernel pmd. And this requires a whole page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return 0;
+ return;

/*
* when PAE kernel is not running as a Xen domain, it uses
@@ -390,9 +390,7 @@ static int __init pgd_cache_init(void)
*/
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
SLAB_PANIC, NULL);
- return 0;
}
-core_initcall(pgd_cache_init);

static inline pgd_t *_pgd_alloc(void)
{
@@ -420,6 +418,10 @@ static inline void _pgd_free(pgd_t *pgd)
}
#else

+void __init pgd_cache_init(void)
+{
+}
+
static inline pgd_t *_pgd_alloc(void)
{
return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
diff --git a/init/main.c b/init/main.c
index 95dd9406ee31..1d1cb8f10cad 100644
--- a/init/main.c
+++ b/init/main.c
@@ -537,6 +537,7 @@ static void __init mm_init(void)
init_espfix_bsp();
/* Should be run after espfix64 is set up. */
pti_init();
+ pgd_cache_init();
}

void __init __weak arch_call_rest_init(void)

Subject: [tip:x86/mm] x86/mm: Initialize PGD cache during mm initialization

Commit-ID: caa841360134f863987f2d4f77b8dc2fbb7596f8
Gitweb: https://git.kernel.org/tip/caa841360134f863987f2d4f77b8dc2fbb7596f8
Author: Nadav Amit <[email protected]>
AuthorDate: Sat, 4 May 2019 18:11:24 -0700
Committer: Ingo Molnar <[email protected]>
CommitDate: Sun, 5 May 2019 20:32:46 +0200

x86/mm: Initialize PGD cache during mm initialization

Poking-mm initialization might require to duplicate the PGD in early
stage. Initialize the PGD cache earlier to prevent boot failures.

Reported-by: kernel test robot <[email protected]>
Signed-off-by: Nadav Amit <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Rick Edgecombe <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Stephen Rothwell <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: 4fc19708b165 ("x86/alternatives: Initialize temporary mm for patching")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/mm/pgtable.c | 10 ++++++----
include/asm-generic/pgtable.h | 2 ++
init/main.c | 3 +++
3 files changed, 11 insertions(+), 4 deletions(-)

diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 7bd01709a091..c8177045b7d4 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -373,14 +373,14 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,

static struct kmem_cache *pgd_cache;

-static int __init pgd_cache_init(void)
+void __init pgd_cache_init(void)
{
/*
* When PAE kernel is running as a Xen domain, it does not use
* shared kernel pmd. And this requires a whole page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return 0;
+ return;

/*
* when PAE kernel is not running as a Xen domain, it uses
@@ -390,9 +390,7 @@ static int __init pgd_cache_init(void)
*/
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
SLAB_PANIC, NULL);
- return 0;
}
-core_initcall(pgd_cache_init);

static inline pgd_t *_pgd_alloc(void)
{
@@ -420,6 +418,10 @@ static inline void _pgd_free(pgd_t *pgd)
}
#else

+void __init pgd_cache_init(void)
+{
+}
+
static inline pgd_t *_pgd_alloc(void)
{
return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index fa782fba51ee..75d9d68a6de7 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1126,6 +1126,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
static inline void init_espfix_bsp(void) { }
#endif

+extern void __init pgd_cache_init(void);
+
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
{
diff --git a/init/main.c b/init/main.c
index 95dd9406ee31..9dc2f3b4f753 100644
--- a/init/main.c
+++ b/init/main.c
@@ -506,6 +506,8 @@ void __init __weak mem_encrypt_init(void) { }

void __init __weak poking_init(void) { }

+void __init __weak pgd_cache_init(void) { }
+
bool initcall_debug;
core_param(initcall_debug, initcall_debug, bool, 0644);

@@ -537,6 +539,7 @@ static void __init mm_init(void)
init_espfix_bsp();
/* Should be run after espfix64 is set up. */
pti_init();
+ pgd_cache_init();
}

void __init __weak arch_call_rest_init(void)