2021-03-13 08:51:17

by Alexandre Ghiti

[permalink] [raw]
Subject: [PATCH v3 2/2] riscv: Cleanup KASAN_VMALLOC support

When KASAN vmalloc region is populated, there is no userspace process and
the page table in use is swapper_pg_dir, so there is no need to read
SATP. Then we can use the same scheme used by kasan_populate_p*d
functions to go through the page table, which harmonizes the code.

In addition, make use of set_pgd that goes through all unused page table
levels, contrary to p*d_populate functions, which makes this function work
whatever the number of page table levels.

Signed-off-by: Alexandre Ghiti <[email protected]>
Reviewed-by: Palmer Dabbelt <[email protected]>
---
arch/riscv/mm/kasan_init.c | 59 ++++++++++++--------------------------
1 file changed, 18 insertions(+), 41 deletions(-)

diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 57bf4ae09361..c16178918239 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -11,18 +11,6 @@
#include <asm/fixmap.h>
#include <asm/pgalloc.h>

-static __init void *early_alloc(size_t size, int node)
-{
- void *ptr = memblock_alloc_try_nid(size, size,
- __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
-
- if (!ptr)
- panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
- __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
-
- return ptr;
-}
-
extern pgd_t early_pg_dir[PTRS_PER_PGD];
asmlinkage void __init kasan_early_init(void)
{
@@ -155,38 +143,27 @@ static void __init kasan_populate(void *start, void *end)
memset(start, KASAN_SHADOW_INIT, end - start);
}

-void __init kasan_shallow_populate(void *start, void *end)
+static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
{
- unsigned long vaddr = (unsigned long)start & PAGE_MASK;
- unsigned long vend = PAGE_ALIGN((unsigned long)end);
- unsigned long pfn;
- int index;
+ unsigned long next;
void *p;
- pud_t *pud_dir, *pud_k;
- pgd_t *pgd_dir, *pgd_k;
- p4d_t *p4d_dir, *p4d_k;
-
- while (vaddr < vend) {
- index = pgd_index(vaddr);
- pfn = csr_read(CSR_SATP) & SATP_PPN;
- pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
- pgd_k = init_mm.pgd + index;
- pgd_dir = pgd_offset_k(vaddr);
- set_pgd(pgd_dir, *pgd_k);
-
- p4d_dir = p4d_offset(pgd_dir, vaddr);
- p4d_k = p4d_offset(pgd_k, vaddr);
-
- vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
- pud_dir = pud_offset(p4d_dir, vaddr);
- pud_k = pud_offset(p4d_k, vaddr);
-
- if (pud_present(*pud_dir)) {
- p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
- pud_populate(&init_mm, pud_dir, p);
+ pgd_t *pgd_k = pgd_offset_k(vaddr);
+
+ do {
+ next = pgd_addr_end(vaddr, end);
+ if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
+ p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
- vaddr += PAGE_SIZE;
- }
+ } while (pgd_k++, vaddr = next, vaddr != end);
+}
+
+static void __init kasan_shallow_populate(void *start, void *end)
+{
+ unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+ unsigned long vend = PAGE_ALIGN((unsigned long)end);
+
+ kasan_shallow_populate_pgd(vaddr, vend);

local_flush_tlb_all();
}
--
2.20.1


2021-03-30 05:08:01

by Palmer Dabbelt

[permalink] [raw]
Subject: Re: [PATCH v3 2/2] riscv: Cleanup KASAN_VMALLOC support

On Sat, 13 Mar 2021 00:45:05 PST (-0800), [email protected] wrote:
> When KASAN vmalloc region is populated, there is no userspace process and
> the page table in use is swapper_pg_dir, so there is no need to read
> SATP. Then we can use the same scheme used by kasan_populate_p*d
> functions to go through the page table, which harmonizes the code.
>
> In addition, make use of set_pgd that goes through all unused page table
> levels, contrary to p*d_populate functions, which makes this function work
> whatever the number of page table levels.
>
> Signed-off-by: Alexandre Ghiti <[email protected]>
> Reviewed-by: Palmer Dabbelt <[email protected]>
> ---
> arch/riscv/mm/kasan_init.c | 59 ++++++++++++--------------------------
> 1 file changed, 18 insertions(+), 41 deletions(-)
>
> diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
> index 57bf4ae09361..c16178918239 100644
> --- a/arch/riscv/mm/kasan_init.c
> +++ b/arch/riscv/mm/kasan_init.c
> @@ -11,18 +11,6 @@
> #include <asm/fixmap.h>
> #include <asm/pgalloc.h>
>
> -static __init void *early_alloc(size_t size, int node)
> -{
> - void *ptr = memblock_alloc_try_nid(size, size,
> - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
> -
> - if (!ptr)
> - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
> - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
> -
> - return ptr;
> -}
> -
> extern pgd_t early_pg_dir[PTRS_PER_PGD];
> asmlinkage void __init kasan_early_init(void)
> {
> @@ -155,38 +143,27 @@ static void __init kasan_populate(void *start, void *end)
> memset(start, KASAN_SHADOW_INIT, end - start);
> }
>
> -void __init kasan_shallow_populate(void *start, void *end)
> +static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
> {
> - unsigned long vaddr = (unsigned long)start & PAGE_MASK;
> - unsigned long vend = PAGE_ALIGN((unsigned long)end);
> - unsigned long pfn;
> - int index;
> + unsigned long next;
> void *p;
> - pud_t *pud_dir, *pud_k;
> - pgd_t *pgd_dir, *pgd_k;
> - p4d_t *p4d_dir, *p4d_k;
> -
> - while (vaddr < vend) {
> - index = pgd_index(vaddr);
> - pfn = csr_read(CSR_SATP) & SATP_PPN;
> - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
> - pgd_k = init_mm.pgd + index;
> - pgd_dir = pgd_offset_k(vaddr);
> - set_pgd(pgd_dir, *pgd_k);
> -
> - p4d_dir = p4d_offset(pgd_dir, vaddr);
> - p4d_k = p4d_offset(pgd_k, vaddr);
> -
> - vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
> - pud_dir = pud_offset(p4d_dir, vaddr);
> - pud_k = pud_offset(p4d_k, vaddr);
> -
> - if (pud_present(*pud_dir)) {
> - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
> - pud_populate(&init_mm, pud_dir, p);
> + pgd_t *pgd_k = pgd_offset_k(vaddr);
> +
> + do {
> + next = pgd_addr_end(vaddr, end);
> + if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
> + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
> }
> - vaddr += PAGE_SIZE;
> - }
> + } while (pgd_k++, vaddr = next, vaddr != end);
> +}
> +
> +static void __init kasan_shallow_populate(void *start, void *end)
> +{
> + unsigned long vaddr = (unsigned long)start & PAGE_MASK;
> + unsigned long vend = PAGE_ALIGN((unsigned long)end);
> +
> + kasan_shallow_populate_pgd(vaddr, vend);
>
> local_flush_tlb_all();
> }

Thanks, this is on for-next.

2021-03-30 09:51:55

by Geert Uytterhoeven

[permalink] [raw]
Subject: Re: [PATCH v3 2/2] riscv: Cleanup KASAN_VMALLOC support

Hi Palmer,

On Tue, Mar 30, 2021 at 7:08 AM Palmer Dabbelt <[email protected]> wrote:
> On Sat, 13 Mar 2021 00:45:05 PST (-0800), [email protected] wrote:
> > When KASAN vmalloc region is populated, there is no userspace process and
> > the page table in use is swapper_pg_dir, so there is no need to read
> > SATP. Then we can use the same scheme used by kasan_populate_p*d
> > functions to go through the page table, which harmonizes the code.
> >
> > In addition, make use of set_pgd that goes through all unused page table
> > levels, contrary to p*d_populate functions, which makes this function work
> > whatever the number of page table levels.
> >
> > Signed-off-by: Alexandre Ghiti <[email protected]>
> > Reviewed-by: Palmer Dabbelt <[email protected]>
> > ---
> > arch/riscv/mm/kasan_init.c | 59 ++++++++++++--------------------------
> > 1 file changed, 18 insertions(+), 41 deletions(-)
> >
> > diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
> > index 57bf4ae09361..c16178918239 100644
> > --- a/arch/riscv/mm/kasan_init.c
> > +++ b/arch/riscv/mm/kasan_init.c
> > @@ -11,18 +11,6 @@
> > #include <asm/fixmap.h>
> > #include <asm/pgalloc.h>
> >
> > -static __init void *early_alloc(size_t size, int node)
> > -{
> > - void *ptr = memblock_alloc_try_nid(size, size,
> > - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
> > -
> > - if (!ptr)
> > - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
> > - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
> > -
> > - return ptr;
> > -}
> > -
> > extern pgd_t early_pg_dir[PTRS_PER_PGD];
> > asmlinkage void __init kasan_early_init(void)
> > {
> > @@ -155,38 +143,27 @@ static void __init kasan_populate(void *start, void *end)
> > memset(start, KASAN_SHADOW_INIT, end - start);
> > }
> >
> > -void __init kasan_shallow_populate(void *start, void *end)
> > +static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
> > {
> > - unsigned long vaddr = (unsigned long)start & PAGE_MASK;
> > - unsigned long vend = PAGE_ALIGN((unsigned long)end);
> > - unsigned long pfn;
> > - int index;
> > + unsigned long next;
> > void *p;
> > - pud_t *pud_dir, *pud_k;
> > - pgd_t *pgd_dir, *pgd_k;
> > - p4d_t *p4d_dir, *p4d_k;
> > -
> > - while (vaddr < vend) {
> > - index = pgd_index(vaddr);
> > - pfn = csr_read(CSR_SATP) & SATP_PPN;
> > - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
> > - pgd_k = init_mm.pgd + index;
> > - pgd_dir = pgd_offset_k(vaddr);
> > - set_pgd(pgd_dir, *pgd_k);
> > -
> > - p4d_dir = p4d_offset(pgd_dir, vaddr);
> > - p4d_k = p4d_offset(pgd_k, vaddr);
> > -
> > - vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
> > - pud_dir = pud_offset(p4d_dir, vaddr);
> > - pud_k = pud_offset(p4d_k, vaddr);
> > -
> > - if (pud_present(*pud_dir)) {
> > - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
> > - pud_populate(&init_mm, pud_dir, p);
> > + pgd_t *pgd_k = pgd_offset_k(vaddr);
> > +
> > + do {
> > + next = pgd_addr_end(vaddr, end);
> > + if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
> > + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> > + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
> > }
> > - vaddr += PAGE_SIZE;
> > - }
> > + } while (pgd_k++, vaddr = next, vaddr != end);
> > +}
> > +
> > +static void __init kasan_shallow_populate(void *start, void *end)
> > +{
> > + unsigned long vaddr = (unsigned long)start & PAGE_MASK;
> > + unsigned long vend = PAGE_ALIGN((unsigned long)end);
> > +
> > + kasan_shallow_populate_pgd(vaddr, vend);
> >
> > local_flush_tlb_all();
> > }
>
> Thanks, this is on for-next.

Your for-next does not include your fixes branch, hence they now conflict,
and for-next lacks the local_flush_tlb_all().

Gr{oetje,eeting}s,

Geert

--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- [email protected]

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds

2021-04-15 06:17:12

by Palmer Dabbelt

[permalink] [raw]
Subject: Re: [PATCH v3 2/2] riscv: Cleanup KASAN_VMALLOC support

On Tue, 30 Mar 2021 02:47:30 PDT (-0700), [email protected] wrote:
> Hi Palmer,
>
> On Tue, Mar 30, 2021 at 7:08 AM Palmer Dabbelt <[email protected]> wrote:
>> On Sat, 13 Mar 2021 00:45:05 PST (-0800), [email protected] wrote:
>> > When KASAN vmalloc region is populated, there is no userspace process and
>> > the page table in use is swapper_pg_dir, so there is no need to read
>> > SATP. Then we can use the same scheme used by kasan_populate_p*d
>> > functions to go through the page table, which harmonizes the code.
>> >
>> > In addition, make use of set_pgd that goes through all unused page table
>> > levels, contrary to p*d_populate functions, which makes this function work
>> > whatever the number of page table levels.
>> >
>> > Signed-off-by: Alexandre Ghiti <[email protected]>
>> > Reviewed-by: Palmer Dabbelt <[email protected]>
>> > ---
>> > arch/riscv/mm/kasan_init.c | 59 ++++++++++++--------------------------
>> > 1 file changed, 18 insertions(+), 41 deletions(-)
>> >
>> > diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
>> > index 57bf4ae09361..c16178918239 100644
>> > --- a/arch/riscv/mm/kasan_init.c
>> > +++ b/arch/riscv/mm/kasan_init.c
>> > @@ -11,18 +11,6 @@
>> > #include <asm/fixmap.h>
>> > #include <asm/pgalloc.h>
>> >
>> > -static __init void *early_alloc(size_t size, int node)
>> > -{
>> > - void *ptr = memblock_alloc_try_nid(size, size,
>> > - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
>> > -
>> > - if (!ptr)
>> > - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
>> > - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
>> > -
>> > - return ptr;
>> > -}
>> > -
>> > extern pgd_t early_pg_dir[PTRS_PER_PGD];
>> > asmlinkage void __init kasan_early_init(void)
>> > {
>> > @@ -155,38 +143,27 @@ static void __init kasan_populate(void *start, void *end)
>> > memset(start, KASAN_SHADOW_INIT, end - start);
>> > }
>> >
>> > -void __init kasan_shallow_populate(void *start, void *end)
>> > +static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
>> > {
>> > - unsigned long vaddr = (unsigned long)start & PAGE_MASK;
>> > - unsigned long vend = PAGE_ALIGN((unsigned long)end);
>> > - unsigned long pfn;
>> > - int index;
>> > + unsigned long next;
>> > void *p;
>> > - pud_t *pud_dir, *pud_k;
>> > - pgd_t *pgd_dir, *pgd_k;
>> > - p4d_t *p4d_dir, *p4d_k;
>> > -
>> > - while (vaddr < vend) {
>> > - index = pgd_index(vaddr);
>> > - pfn = csr_read(CSR_SATP) & SATP_PPN;
>> > - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
>> > - pgd_k = init_mm.pgd + index;
>> > - pgd_dir = pgd_offset_k(vaddr);
>> > - set_pgd(pgd_dir, *pgd_k);
>> > -
>> > - p4d_dir = p4d_offset(pgd_dir, vaddr);
>> > - p4d_k = p4d_offset(pgd_k, vaddr);
>> > -
>> > - vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
>> > - pud_dir = pud_offset(p4d_dir, vaddr);
>> > - pud_k = pud_offset(p4d_k, vaddr);
>> > -
>> > - if (pud_present(*pud_dir)) {
>> > - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
>> > - pud_populate(&init_mm, pud_dir, p);
>> > + pgd_t *pgd_k = pgd_offset_k(vaddr);
>> > +
>> > + do {
>> > + next = pgd_addr_end(vaddr, end);
>> > + if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
>> > + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>> > + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
>> > }
>> > - vaddr += PAGE_SIZE;
>> > - }
>> > + } while (pgd_k++, vaddr = next, vaddr != end);
>> > +}
>> > +
>> > +static void __init kasan_shallow_populate(void *start, void *end)
>> > +{
>> > + unsigned long vaddr = (unsigned long)start & PAGE_MASK;
>> > + unsigned long vend = PAGE_ALIGN((unsigned long)end);
>> > +
>> > + kasan_shallow_populate_pgd(vaddr, vend);
>> >
>> > local_flush_tlb_all();
>> > }
>>
>> Thanks, this is on for-next.
>
> Your for-next does not include your fixes branch, hence they now conflict,
> and for-next lacks the local_flush_tlb_all().

This came up before and I don't think we ever sorted out what the right
thing to do is. Right now I'm keeping for-next pinned an at early RC,
but fast-forwarding fixes to the latest RC every time I sent a PR. I
don't have fixes merged back into for-next because I don't want those
merges to show up when I send my merge window PRs.

For this one I purposefully left out the local_flush_tlb_all() whene I
pulled in this patch, and was planning on fixing it up along with any
other merge conflicts when I send along the PR. It does all seem like a
bit of a song and dance here, though, so I'm open to suggestions as to
how to run this better -- though last time I went through that exercise
it seemed like everyone had their own way of doing it, they all had a
different set of issues, and I was at least familiar with this flavor of
craziness.

I was kind of tempted to convert for-next over into a branch that only
contains merges, though, which would make it a bit easier to merge fixes
in.

>
> Gr{oetje,eeting}s,
>
> Geert