2020-02-07 14:28:34

by Sergey Dyasli

[permalink] [raw]
Subject: [PATCH v3 0/4] basic KASAN support for Xen PV domains

This series allows to boot and run Xen PV kernels (Dom0 and DomU) with
CONFIG_KASAN=y. It has been used internally for some time now with good
results for finding memory corruption issues in Dom0 kernel.

Only Outline instrumentation is supported at the moment.

Sergey Dyasli (2):
kasan: introduce set_pmd_early_shadow()
x86/xen: add basic KASAN support for PV kernel

Ross Lagerwall (2):
xen: teach KASAN about grant tables
xen/netback: fix grant copy across page boundary

arch/x86/mm/kasan_init_64.c | 10 +++++-
arch/x86/xen/Makefile | 7 ++++
arch/x86/xen/enlighten_pv.c | 3 ++
arch/x86/xen/mmu_pv.c | 43 ++++++++++++++++++++++
drivers/net/xen-netback/common.h | 2 +-
drivers/net/xen-netback/netback.c | 60 +++++++++++++++++++++++++------
drivers/xen/Makefile | 2 ++
drivers/xen/grant-table.c | 5 ++-
include/linux/kasan.h | 2 ++
include/xen/xen-ops.h | 10 ++++++
lib/Kconfig.kasan | 3 +-
mm/kasan/init.c | 32 ++++++++++++-----
12 files changed, 156 insertions(+), 23 deletions(-)

--
2.17.1


2020-02-07 14:28:41

by Sergey Dyasli

[permalink] [raw]
Subject: [PATCH v3 1/4] kasan: introduce set_pmd_early_shadow()

It is incorrect to call pmd_populate_kernel() multiple times for the
same page table from inside Xen PV domains. Xen notices it during
kasan_populate_early_shadow():

(XEN) mm.c:3222:d155v0 mfn 3704b already pinned

This happens for kasan_early_shadow_pte when USE_SPLIT_PTE_PTLOCKS is
enabled. Fix this by introducing set_pmd_early_shadow() which calls
pmd_populate_kernel() only once and uses set_pmd() afterwards.

Signed-off-by: Sergey Dyasli <[email protected]>
---
v2 --> v3: no changes

v1 --> v2:
- Fix compilation without CONFIG_XEN_PV
- Slightly updated description

RFC --> v1:
- New patch
---
mm/kasan/init.c | 32 ++++++++++++++++++++++++--------
1 file changed, 24 insertions(+), 8 deletions(-)

diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index ce45c491ebcd..7791fe0a7704 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -81,6 +81,26 @@ static inline bool kasan_early_shadow_page_entry(pte_t pte)
return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
}

+#ifdef CONFIG_XEN_PV
+static inline void set_pmd_early_shadow(pmd_t *pmd)
+{
+ static bool pmd_populated = false;
+ pte_t *early_shadow = lm_alias(kasan_early_shadow_pte);
+
+ if (likely(pmd_populated)) {
+ set_pmd(pmd, __pmd(__pa(early_shadow) | _PAGE_TABLE));
+ } else {
+ pmd_populate_kernel(&init_mm, pmd, early_shadow);
+ pmd_populated = true;
+ }
+}
+#else
+static inline void set_pmd_early_shadow(pmd_t *pmd)
+{
+ pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_early_shadow_pte));
+}
+#endif /* ifdef CONFIG_XEN_PV */
+
static __init void *early_alloc(size_t size, int node)
{
void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
@@ -120,8 +140,7 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
next = pmd_addr_end(addr, end);

if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
- pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ set_pmd_early_shadow(pmd);
continue;
}

@@ -157,8 +176,7 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
pud_populate(&init_mm, pud,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ set_pmd_early_shadow(pmd);
continue;
}

@@ -198,8 +216,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
pud_populate(&init_mm, pud,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ set_pmd_early_shadow(pmd);
continue;
}

@@ -271,8 +288,7 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
pud_populate(&init_mm, pud,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ set_pmd_early_shadow(pmd);
continue;
}

--
2.17.1