Page table check performs extra verifications at the time when new
pages become accessible from the userspace by getting their page
table entries (PTEs PMDs etc.) added into the table. It is supported
on X86[1].
This patchset made some simple changes and make it easier to support
new architecture, then we support this feature on ARM64 and RISCV.
[1]https://lore.kernel.org/lkml/[email protected]/
v1 -> v2:
1. Fix arm64's pte/pmd/pud_user_accessible_page() according to the the
suggestions of Catalin.
2. Also fix riscv's pte_pmd_pud_user_accessible_page().
Kefeng Wang (2):
mm: page_table_check: move pxx_user_accessible_page into x86
arm64: mm: add support for page table check
Tong Tiangen (2):
mm: page_table_check: add hooks to public helpers
riscv: mm: add support for page table check
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/pgtable.h | 65 ++++++++++++++++++++++++---
arch/riscv/Kconfig | 1 +
arch/riscv/include/asm/pgtable.h | 77 +++++++++++++++++++++++++++++---
arch/x86/include/asm/pgtable.h | 29 +++++++-----
include/linux/pgtable.h | 27 +++++++----
mm/page_table_check.c | 25 ++++-------
7 files changed, 179 insertions(+), 46 deletions(-)
--
2.18.0.huawei.25
From: Kefeng Wang <[email protected]>
As commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
check"), add some necessary page table check hooks into routines that
modify user page tables.
Signed-off-by: Kefeng Wang <[email protected]>
Signed-off-by: Tong Tiangen <[email protected]>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/pgtable.h | 65 +++++++++++++++++++++++++++++---
2 files changed, 61 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 962c84952c98..1880dd0a8a11 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -91,6 +91,7 @@ config ARM64
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 94e147e5456c..30ed787b5df3 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -33,6 +33,7 @@
#include <linux/mmdebug.h>
#include <linux/mm_types.h>
#include <linux/sched.h>
+#include <linux/page_table_check.h>
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
@@ -96,6 +97,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
+#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
@@ -312,7 +314,7 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
__func__, pte_val(old_pte), pte_val(pte));
}
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
@@ -343,6 +345,13 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte(ptep, pte);
}
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ page_table_check_pte_set(mm, addr, ptep, pte);
+ return __set_pte_at(mm, addr, ptep, pte);
+}
+
/*
* Huge pte definitions.
*/
@@ -438,6 +447,8 @@ static inline int pmd_trans_huge(pmd_t pmd)
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
+#define pmd_user(pmd) pte_user(pmd_pte(pmd))
+#define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd))
#define pmd_cont(pmd) pte_cont(pmd_pte(pmd))
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
@@ -485,8 +496,19 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
-#define set_pud_at(mm, addr, pudp, pud) set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud))
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ page_table_check_pmd_set(mm, addr, pmdp, pmd);
+ return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
+}
+
+static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud)
+{
+ page_table_check_pud_set(mm, addr, pudp, pud);
+ return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
+}
#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
@@ -627,6 +649,24 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) pud_sect(pud)
#define pud_valid(pud) pte_valid(pud_pte(pud))
+#define pud_user(pud) pte_user(pud_pte(pud))
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+static inline bool pte_user_accessible_page(pte_t pte)
+{
+ return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte));
+}
+
+static inline bool pmd_user_accessible_page(pmd_t pmd)
+{
+ return pmd_present(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
+}
+
+static inline bool pud_user_accessible_page(pud_t pud)
+{
+ return pud_present(pud) && pud_user(pud);
+}
+#endif
static inline void set_pud(pud_t *pudp, pud_t pud)
{
@@ -856,11 +896,21 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ return __pte(xchg_relaxed(&pte_val(*ptep), 0));
+}
+
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
- return __pte(xchg_relaxed(&pte_val(*ptep), 0));
+ pte_t pte = __ptep_get_and_clear(mm, address, ptep);
+
+ page_table_check_pte_clear(mm, address, pte);
+
+ return pte;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -868,7 +918,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
- return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+ pmd_t pmd = pte_pmd(__ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+
+ page_table_check_pmd_clear(mm, address, pmd);
+
+ return pmd;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -902,6 +956,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
+ page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
}
#endif
--
2.18.0.huawei.25
From: Kefeng Wang <[email protected]>
The pxx_user_accessible_page() check the PTE bit, it's
architecture-specific code, move them into x86's pgtable.h,
also add default PMD/PUD_PAGE_SIZE definition, it's prepare
for support page table check feature on new architecture.
Signed-off-by: Kefeng Wang <[email protected]>
---
arch/x86/include/asm/pgtable.h | 19 +++++++++++++++++++
mm/page_table_check.c | 25 ++++++++-----------------
2 files changed, 27 insertions(+), 17 deletions(-)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 62ab07e24aef..8cd6514e3052 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1430,6 +1430,25 @@ static inline bool arch_faults_on_old_pte(void)
return false;
}
+#ifdef CONFIG_PAGE_TABLE_CHECK
+static inline bool pte_user_accessible_page(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
+}
+
+static inline bool pmd_user_accessible_page(pmd_t pmd)
+{
+ return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) &&
+ (pmd_val(pmd) & _PAGE_USER);
+}
+
+static inline bool pud_user_accessible_page(pud_t pud)
+{
+ return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) &&
+ (pud_val(pud) & _PAGE_USER);
+}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_H */
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 2458281bff89..145f059d1c4d 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -10,6 +10,14 @@
#undef pr_fmt
#define pr_fmt(fmt) "page_table_check: " fmt
+#ifndef PMD_PAGE_SIZE
+#define PMD_PAGE_SIZE PMD_SIZE
+#endif
+
+#ifndef PUD_PAGE_SIZE
+#define PUD_PAGE_SIZE PUD_SIZE
+#endif
+
struct page_table_check {
atomic_t anon_map_count;
atomic_t file_map_count;
@@ -52,23 +60,6 @@ static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
return (void *)(page_ext) + page_table_check_ops.offset;
}
-static inline bool pte_user_accessible_page(pte_t pte)
-{
- return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
-}
-
-static inline bool pmd_user_accessible_page(pmd_t pmd)
-{
- return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) &&
- (pmd_val(pmd) & _PAGE_USER);
-}
-
-static inline bool pud_user_accessible_page(pud_t pud)
-{
- return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) &&
- (pud_val(pud) & _PAGE_USER);
-}
-
/*
* An enty is removed from the page table, decrement the counters for that page
* verify that it is of correct type and counters do not become negative.
--
2.18.0.huawei.25
Move ptep_clear() to the include/linux/pgtable.h and add page table check
relate hooks to some helpers, it's prepare for support page table check
feature on new architecture.
Signed-off-by: Tong Tiangen <[email protected]>
---
arch/x86/include/asm/pgtable.h | 10 ----------
include/linux/pgtable.h | 27 +++++++++++++++++++--------
2 files changed, 19 insertions(+), 18 deletions(-)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 8cd6514e3052..8c85f2eabbaa 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1077,16 +1077,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
return pte;
}
-#define __HAVE_ARCH_PTEP_CLEAR
-static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
- ptep_get_and_clear(mm, addr, ptep);
- else
- pte_clear(mm, addr, ptep);
-}
-
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index f4f4077b97aa..d27fd0ed84a9 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/errno.h>
#include <asm-generic/pgtable_uffd.h>
+#include <linux/page_table_check.h>
#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
@@ -259,14 +260,6 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
-#ifndef __HAVE_ARCH_PTEP_CLEAR
-static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- pte_clear(mm, addr, ptep);
-}
-#endif
-
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
@@ -274,10 +267,23 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
{
pte_t pte = *ptep;
pte_clear(mm, address, ptep);
+ page_table_check_pte_clear(mm, address, pte);
return pte;
}
#endif
+#ifndef __HAVE_ARCH_PTEP_CLEAR
+static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+#ifdef CONFIG_PAGE_TABLE_CHECK
+ ptep_get_and_clear(mm, addr, ptep);
+#else
+ pte_clear(mm, addr, ptep);
+#endif
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET
static inline pte_t ptep_get(pte_t *ptep)
{
@@ -347,7 +353,10 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
+
pmd_clear(pmdp);
+ page_table_check_pmd_clear(mm, address, pmd);
+
return pmd;
}
#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
@@ -359,6 +368,8 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
pud_t pud = *pudp;
pud_clear(pudp);
+ page_table_check_pud_clear(mm, address, pud);
+
return pud;
}
#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
--
2.18.0.huawei.25
As commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
check"), add some necessary page table check hooks into routines that
modify user page tables.
Signed-off-by: Tong Tiangen <[email protected]>
---
arch/riscv/Kconfig | 1 +
arch/riscv/include/asm/pgtable.h | 77 +++++++++++++++++++++++++++++---
2 files changed, 72 insertions(+), 6 deletions(-)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index d140e066f758..710d591fb272 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -37,6 +37,7 @@ config RISCV
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_SUPPORTS_HUGETLBFS if MMU
+ select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_USE_MEMTEST
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 046b44225623..6f22d9580658 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -114,6 +114,8 @@
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */
+#include <linux/page_table_check.h>
+
#ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({ \
uintptr_t __a = (uintptr_t)(addr); \
@@ -315,6 +317,11 @@ static inline int pte_exec(pte_t pte)
return pte_val(pte) & _PAGE_EXEC;
}
+static inline int pte_user(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_USER;
+}
+
static inline int pte_huge(pte_t pte)
{
return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
@@ -446,7 +453,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
void flush_icache_pte(pte_t pte);
-static inline void set_pte_at(struct mm_struct *mm,
+static inline void __set_pte_at(struct mm_struct *mm,
unsigned long addr, pte_t *ptep, pte_t pteval)
{
if (pte_present(pteval) && pte_exec(pteval))
@@ -455,10 +462,17 @@ static inline void set_pte_at(struct mm_struct *mm,
set_pte(ptep, pteval);
}
+static inline void set_pte_at(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+ page_table_check_pte_set(mm, addr, ptep, pteval);
+ __set_pte_at(mm, addr, ptep, pteval);
+}
+
static inline void pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- set_pte_at(mm, addr, ptep, __pte(0));
+ __set_pte_at(mm, addr, ptep, __pte(0));
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
@@ -475,11 +489,21 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
return true;
}
+static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
+}
+
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
- return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
+ pte_t pte = __ptep_get_and_clear(mm, address, ptep);
+
+ page_table_check_pte_clear(mm, address, pte);
+
+ return pte;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -546,6 +570,13 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
}
+#define __pud_to_phys(pud) (pud_val(pud) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
+
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
+}
+
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
@@ -567,6 +598,11 @@ static inline int pmd_young(pmd_t pmd)
return pte_young(pmd_pte(pmd));
}
+static inline int pmd_user(pmd_t pmd)
+{
+ return pte_user(pmd_pte(pmd));
+}
+
static inline pmd_t pmd_mkold(pmd_t pmd)
{
return pte_pmd(pte_mkold(pmd_pte(pmd)));
@@ -600,15 +636,39 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
+ page_table_check_pmd_set(mm, addr, pmdp, pmd);
+ return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
+}
+
+static inline int pud_user(pud_t pud)
+{
+ return pte_user(pud_pte(pud));
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
- return set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
+ page_table_check_pud_set(mm, addr, pudp, pud);
+ return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
+}
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+static inline bool pte_user_accessible_page(pte_t pte)
+{
+ return pte_present(pte) && pte_user(pte);
}
+static inline bool pmd_user_accessible_page(pmd_t pmd)
+{
+ return pmd_leaf(pmd) && pmd_user(pmd);
+}
+
+static inline bool pud_user_accessible_page(pud_t pud)
+{
+ return pud_leaf(pud) && pud_user(pud);
+}
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
@@ -634,7 +694,11 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
- return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+ pmd_t pmd = pte_pmd(__ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+
+ page_table_check_pmd_clear(mm, address, pmd);
+
+ return pmd;
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -648,6 +712,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
+ page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
--
2.18.0.huawei.25
在 2022/3/24 10:12, Pasha Tatashin 写道:
> On Wed, Mar 23, 2022 at 10:07 PM Tong Tiangen <[email protected]> wrote:
>>
>>
>>
>> 在 2022/3/24 1:42, Pasha Tatashin 写道:
>>> On Tue, Mar 22, 2022 at 10:25 AM Tong Tiangen <[email protected]> wrote:
>>>>
>>>> Move ptep_clear() to the include/linux/pgtable.h and add page table check
>>>> relate hooks to some helpers, it's prepare for support page table check
>>>> feature on new architecture.
>>>>
>>>> Signed-off-by: Tong Tiangen <[email protected]>
>>>> ---
>>>> arch/x86/include/asm/pgtable.h | 10 ----------
>>>> include/linux/pgtable.h | 27 +++++++++++++++++++--------
>>>> 2 files changed, 19 insertions(+), 18 deletions(-)
>>>>
>>>> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
>>>> index 8cd6514e3052..8c85f2eabbaa 100644
>>>> --- a/arch/x86/include/asm/pgtable.h
>>>> +++ b/arch/x86/include/asm/pgtable.h
>>>> @@ -1077,16 +1077,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
>>>> return pte;
>>>> }
>>>>
>>>> -#define __HAVE_ARCH_PTEP_CLEAR
>>>> -static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
>>>> - pte_t *ptep)
>>>> -{
>>>> - if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
>>>> - ptep_get_and_clear(mm, addr, ptep);
>>>> - else
>>>> - pte_clear(mm, addr, ptep);
>>>> -}
>>>> -
>>>> #define __HAVE_ARCH_PTEP_SET_WRPROTECT
>>>> static inline void ptep_set_wrprotect(struct mm_struct *mm,
>>>> unsigned long addr, pte_t *ptep)
>>>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
>>>> index f4f4077b97aa..d27fd0ed84a9 100644
>>>> --- a/include/linux/pgtable.h
>>>> +++ b/include/linux/pgtable.h
>>>> @@ -12,6 +12,7 @@
>>>> #include <linux/bug.h>
>>>> #include <linux/errno.h>
>>>> #include <asm-generic/pgtable_uffd.h>
>>>> +#include <linux/page_table_check.h>
>>>>
>>>> #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
>>>> defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
>>>> @@ -259,14 +260,6 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
>>>> #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>>>> #endif
>>>>
>>>> -#ifndef __HAVE_ARCH_PTEP_CLEAR
>>>> -static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
>>>> - pte_t *ptep)
>>>> -{
>>>> - pte_clear(mm, addr, ptep);
>>>> -}
>>>> -#endif
>>>> -
>>>> #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
>>>> static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
>>>> unsigned long address,
>>>> @@ -274,10 +267,23 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
>>>> {
>>>> pte_t pte = *ptep;
>>>> pte_clear(mm, address, ptep);
>>>> + page_table_check_pte_clear(mm, address, pte);
>>>> return pte;
>>>> }
>>>> #endif
>>>>
>>>> +#ifndef __HAVE_ARCH_PTEP_CLEAR
>>>> +static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
>>>> + pte_t *ptep)
>>>> +{
>>>> +#ifdef CONFIG_PAGE_TABLE_CHECK
>>>> + ptep_get_and_clear(mm, addr, ptep);
>>>> +#else
>>>> + pte_clear(mm, addr, ptep);
>>>> +#endif
>>>
>>> I have a preference to use if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
>>> instead of #ifdef. The end result is the same. Otherwise it looks
>>> good.
>>>
>>> Thanks,
>>> Pasha
>>> .
>>
>> I have a little hesitation when making this change , in theory, add if
>> here may affect the performance a little in some scenarios. However, the
>> impact on the whole call path should be small.
>
> I do not think so, the compiler should optimize out IS_ENABLED() when
> not enabled, no?
>
You are right.
https://www.kernel.org/doc/Documentation/process/coding-style.rst
The compiler will constant-fold the conditional away, and include or
exclude the block of code just as with an #ifdef, so this will **not add
any runtime overhead**.
Thanks :)
>>
>> I will send v3 using if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK)).
>>
>> Thanks.
>> Tong
>> .
> .
On Tue, Mar 22, 2022 at 10:26 AM Tong Tiangen <[email protected]> wrote:
>
> As commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
> check"), add some necessary page table check hooks into routines that
> modify user page tables.
>
> Signed-off-by: Tong Tiangen <[email protected]>
Reviewed-by: Pasha Tatashin <[email protected]>
Thanks,
Pasha
On Tue, Mar 22, 2022 at 10:25 AM Tong Tiangen <[email protected]> wrote:
>
> From: Kefeng Wang <[email protected]>
>
> The pxx_user_accessible_page() check the PTE bit, it's
> architecture-specific code, move them into x86's pgtable.h,
> also add default PMD/PUD_PAGE_SIZE definition, it's prepare
> for support page table check feature on new architecture.
>
> Signed-off-by: Kefeng Wang <[email protected]>
Acked-by: Pasha Tatashin <[email protected]>
Thank you for this work!
On Tue, Mar 22, 2022 at 10:25 AM Tong Tiangen <[email protected]> wrote:
>
> From: Kefeng Wang <[email protected]>
>
> As commit d283d422c6c4 ("x86: mm: add x86_64 support for page table
> check"), add some necessary page table check hooks into routines that
> modify user page tables.
>
> Signed-off-by: Kefeng Wang <[email protected]>
> Signed-off-by: Tong Tiangen <[email protected]>
Reviewed-by: Pasha Tatashin <[email protected]>
On Wed, Mar 23, 2022 at 10:07 PM Tong Tiangen <[email protected]> wrote:
>
>
>
> 在 2022/3/24 1:42, Pasha Tatashin 写道:
> > On Tue, Mar 22, 2022 at 10:25 AM Tong Tiangen <[email protected]> wrote:
> >>
> >> Move ptep_clear() to the include/linux/pgtable.h and add page table check
> >> relate hooks to some helpers, it's prepare for support page table check
> >> feature on new architecture.
> >>
> >> Signed-off-by: Tong Tiangen <[email protected]>
> >> ---
> >> arch/x86/include/asm/pgtable.h | 10 ----------
> >> include/linux/pgtable.h | 27 +++++++++++++++++++--------
> >> 2 files changed, 19 insertions(+), 18 deletions(-)
> >>
> >> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> >> index 8cd6514e3052..8c85f2eabbaa 100644
> >> --- a/arch/x86/include/asm/pgtable.h
> >> +++ b/arch/x86/include/asm/pgtable.h
> >> @@ -1077,16 +1077,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
> >> return pte;
> >> }
> >>
> >> -#define __HAVE_ARCH_PTEP_CLEAR
> >> -static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
> >> - pte_t *ptep)
> >> -{
> >> - if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
> >> - ptep_get_and_clear(mm, addr, ptep);
> >> - else
> >> - pte_clear(mm, addr, ptep);
> >> -}
> >> -
> >> #define __HAVE_ARCH_PTEP_SET_WRPROTECT
> >> static inline void ptep_set_wrprotect(struct mm_struct *mm,
> >> unsigned long addr, pte_t *ptep)
> >> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> >> index f4f4077b97aa..d27fd0ed84a9 100644
> >> --- a/include/linux/pgtable.h
> >> +++ b/include/linux/pgtable.h
> >> @@ -12,6 +12,7 @@
> >> #include <linux/bug.h>
> >> #include <linux/errno.h>
> >> #include <asm-generic/pgtable_uffd.h>
> >> +#include <linux/page_table_check.h>
> >>
> >> #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
> >> defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
> >> @@ -259,14 +260,6 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
> >> #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
> >> #endif
> >>
> >> -#ifndef __HAVE_ARCH_PTEP_CLEAR
> >> -static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
> >> - pte_t *ptep)
> >> -{
> >> - pte_clear(mm, addr, ptep);
> >> -}
> >> -#endif
> >> -
> >> #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
> >> static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
> >> unsigned long address,
> >> @@ -274,10 +267,23 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
> >> {
> >> pte_t pte = *ptep;
> >> pte_clear(mm, address, ptep);
> >> + page_table_check_pte_clear(mm, address, pte);
> >> return pte;
> >> }
> >> #endif
> >>
> >> +#ifndef __HAVE_ARCH_PTEP_CLEAR
> >> +static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
> >> + pte_t *ptep)
> >> +{
> >> +#ifdef CONFIG_PAGE_TABLE_CHECK
> >> + ptep_get_and_clear(mm, addr, ptep);
> >> +#else
> >> + pte_clear(mm, addr, ptep);
> >> +#endif
> >
> > I have a preference to use if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
> > instead of #ifdef. The end result is the same. Otherwise it looks
> > good.
> >
> > Thanks,
> > Pasha
> > .
>
> I have a little hesitation when making this change , in theory, add if
> here may affect the performance a little in some scenarios. However, the
> impact on the whole call path should be small.
I do not think so, the compiler should optimize out IS_ENABLED() when
not enabled, no?
>
> I will send v3 using if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK)).
>
> Thanks.
> Tong
> .
On Tue, Mar 22, 2022 at 10:25 AM Tong Tiangen <[email protected]> wrote:
>
> Move ptep_clear() to the include/linux/pgtable.h and add page table check
> relate hooks to some helpers, it's prepare for support page table check
> feature on new architecture.
>
> Signed-off-by: Tong Tiangen <[email protected]>
> ---
> arch/x86/include/asm/pgtable.h | 10 ----------
> include/linux/pgtable.h | 27 +++++++++++++++++++--------
> 2 files changed, 19 insertions(+), 18 deletions(-)
>
> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> index 8cd6514e3052..8c85f2eabbaa 100644
> --- a/arch/x86/include/asm/pgtable.h
> +++ b/arch/x86/include/asm/pgtable.h
> @@ -1077,16 +1077,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
> return pte;
> }
>
> -#define __HAVE_ARCH_PTEP_CLEAR
> -static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
> - pte_t *ptep)
> -{
> - if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
> - ptep_get_and_clear(mm, addr, ptep);
> - else
> - pte_clear(mm, addr, ptep);
> -}
> -
> #define __HAVE_ARCH_PTEP_SET_WRPROTECT
> static inline void ptep_set_wrprotect(struct mm_struct *mm,
> unsigned long addr, pte_t *ptep)
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index f4f4077b97aa..d27fd0ed84a9 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -12,6 +12,7 @@
> #include <linux/bug.h>
> #include <linux/errno.h>
> #include <asm-generic/pgtable_uffd.h>
> +#include <linux/page_table_check.h>
>
> #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
> defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
> @@ -259,14 +260,6 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
> #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
> #endif
>
> -#ifndef __HAVE_ARCH_PTEP_CLEAR
> -static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
> - pte_t *ptep)
> -{
> - pte_clear(mm, addr, ptep);
> -}
> -#endif
> -
> #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
> static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
> unsigned long address,
> @@ -274,10 +267,23 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
> {
> pte_t pte = *ptep;
> pte_clear(mm, address, ptep);
> + page_table_check_pte_clear(mm, address, pte);
> return pte;
> }
> #endif
>
> +#ifndef __HAVE_ARCH_PTEP_CLEAR
> +static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
> + pte_t *ptep)
> +{
> +#ifdef CONFIG_PAGE_TABLE_CHECK
> + ptep_get_and_clear(mm, addr, ptep);
> +#else
> + pte_clear(mm, addr, ptep);
> +#endif
I have a preference to use if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
instead of #ifdef. The end result is the same. Otherwise it looks
good.
Thanks,
Pasha
在 2022/3/24 1:42, Pasha Tatashin 写道:
> On Tue, Mar 22, 2022 at 10:25 AM Tong Tiangen <[email protected]> wrote:
>>
>> Move ptep_clear() to the include/linux/pgtable.h and add page table check
>> relate hooks to some helpers, it's prepare for support page table check
>> feature on new architecture.
>>
>> Signed-off-by: Tong Tiangen <[email protected]>
>> ---
>> arch/x86/include/asm/pgtable.h | 10 ----------
>> include/linux/pgtable.h | 27 +++++++++++++++++++--------
>> 2 files changed, 19 insertions(+), 18 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
>> index 8cd6514e3052..8c85f2eabbaa 100644
>> --- a/arch/x86/include/asm/pgtable.h
>> +++ b/arch/x86/include/asm/pgtable.h
>> @@ -1077,16 +1077,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
>> return pte;
>> }
>>
>> -#define __HAVE_ARCH_PTEP_CLEAR
>> -static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
>> - pte_t *ptep)
>> -{
>> - if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
>> - ptep_get_and_clear(mm, addr, ptep);
>> - else
>> - pte_clear(mm, addr, ptep);
>> -}
>> -
>> #define __HAVE_ARCH_PTEP_SET_WRPROTECT
>> static inline void ptep_set_wrprotect(struct mm_struct *mm,
>> unsigned long addr, pte_t *ptep)
>> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
>> index f4f4077b97aa..d27fd0ed84a9 100644
>> --- a/include/linux/pgtable.h
>> +++ b/include/linux/pgtable.h
>> @@ -12,6 +12,7 @@
>> #include <linux/bug.h>
>> #include <linux/errno.h>
>> #include <asm-generic/pgtable_uffd.h>
>> +#include <linux/page_table_check.h>
>>
>> #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
>> defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
>> @@ -259,14 +260,6 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
>> #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>> #endif
>>
>> -#ifndef __HAVE_ARCH_PTEP_CLEAR
>> -static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
>> - pte_t *ptep)
>> -{
>> - pte_clear(mm, addr, ptep);
>> -}
>> -#endif
>> -
>> #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
>> static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
>> unsigned long address,
>> @@ -274,10 +267,23 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
>> {
>> pte_t pte = *ptep;
>> pte_clear(mm, address, ptep);
>> + page_table_check_pte_clear(mm, address, pte);
>> return pte;
>> }
>> #endif
>>
>> +#ifndef __HAVE_ARCH_PTEP_CLEAR
>> +static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
>> + pte_t *ptep)
>> +{
>> +#ifdef CONFIG_PAGE_TABLE_CHECK
>> + ptep_get_and_clear(mm, addr, ptep);
>> +#else
>> + pte_clear(mm, addr, ptep);
>> +#endif
>
> I have a preference to use if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
> instead of #ifdef. The end result is the same. Otherwise it looks
> good.
>
> Thanks,
> Pasha
> .
I have a little hesitation when making this change , in theory, add if
here may affect the performance a little in some scenarios. However, the
impact on the whole call path should be small.
I will send v3 using if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK)).
Thanks.
Tong
.