The KVM page-table library refcounts the pages of concatenated stage-2
PGDs individually. However, the host's stage-2 PGD is currently managed
by EL2 as a single high-order compound page, which can cause the
refcount of the tail pages to reach 0 when they really shouldn't, hence
corrupting the page-table.
Fix this by introducing a new hyp_split_page() helper in the EL2 page
allocator (matching EL1's split_page() function), and make use of it
from host_s2_zalloc_page().
Fixes: 1025c8c0c6ac ("KVM: arm64: Wrap the host with a stage 2")
Suggested-by: Will Deacon <[email protected]>
Signed-off-by: Quentin Perret <[email protected]>
---
arch/arm64/kvm/hyp/include/nvhe/gfp.h | 1 +
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 6 +++++-
arch/arm64/kvm/hyp/nvhe/page_alloc.c | 14 ++++++++++++++
3 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
index fb0f523d1492..0a048dc06a7d 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
@@ -24,6 +24,7 @@ struct hyp_pool {
/* Allocation */
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
+void hyp_split_page(struct hyp_page *page);
void hyp_get_page(struct hyp_pool *pool, void *addr);
void hyp_put_page(struct hyp_pool *pool, void *addr);
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index bacd493a4eac..93a79736c283 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -35,7 +35,11 @@ const u8 pkvm_hyp_id = 1;
static void *host_s2_zalloc_pages_exact(size_t size)
{
- return hyp_alloc_pages(&host_s2_pool, get_order(size));
+ void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
+
+ hyp_split_page(hyp_virt_to_page(addr));
+
+ return addr;
}
static void *host_s2_zalloc_page(void *pool)
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
index 41fc25bdfb34..a6e874e61a40 100644
--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -193,6 +193,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
hyp_spin_unlock(&pool->lock);
}
+void hyp_split_page(struct hyp_page *p)
+{
+ unsigned short order = p->order;
+ unsigned int i;
+
+ p->order = 0;
+ for (i = 1; i < (1 << order); i++) {
+ struct hyp_page *tail = p + i;
+
+ tail->order = 0;
+ hyp_set_page_refcounted(tail);
+ }
+}
+
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
{
unsigned short i = order;
--
2.33.0.800.g4c38ced690-goog
Hi Quentin,
On Mon, 04 Oct 2021 10:03:13 +0100,
Quentin Perret <[email protected]> wrote:
>
> The KVM page-table library refcounts the pages of concatenated stage-2
> PGDs individually. However, the host's stage-2 PGD is currently managed
> by EL2 as a single high-order compound page, which can cause the
> refcount of the tail pages to reach 0 when they really shouldn't, hence
> corrupting the page-table.
nit: this comment only applies to the protected mode, right? As far as
I can tell, 'classic' KVM is just fine.
>
> Fix this by introducing a new hyp_split_page() helper in the EL2 page
> allocator (matching EL1's split_page() function), and make use of it
uber nit: split_page() is not an EL1 function. more of a standard
kernel function.
> from host_s2_zalloc_page().
>
> Fixes: 1025c8c0c6ac ("KVM: arm64: Wrap the host with a stage 2")
> Suggested-by: Will Deacon <[email protected]>
> Signed-off-by: Quentin Perret <[email protected]>
> ---
> arch/arm64/kvm/hyp/include/nvhe/gfp.h | 1 +
> arch/arm64/kvm/hyp/nvhe/mem_protect.c | 6 +++++-
> arch/arm64/kvm/hyp/nvhe/page_alloc.c | 14 ++++++++++++++
> 3 files changed, 20 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> index fb0f523d1492..0a048dc06a7d 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> @@ -24,6 +24,7 @@ struct hyp_pool {
>
> /* Allocation */
> void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
> +void hyp_split_page(struct hyp_page *page);
> void hyp_get_page(struct hyp_pool *pool, void *addr);
> void hyp_put_page(struct hyp_pool *pool, void *addr);
>
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index bacd493a4eac..93a79736c283 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -35,7 +35,11 @@ const u8 pkvm_hyp_id = 1;
>
> static void *host_s2_zalloc_pages_exact(size_t size)
> {
> - return hyp_alloc_pages(&host_s2_pool, get_order(size));
> + void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
> +
> + hyp_split_page(hyp_virt_to_page(addr));
The only reason this doesn't lead to a subsequent memory leak is that
concatenated page tables are always a power of two, right?
If so, that deserves a comment, because I don't think this works in
the general case unless you actively free the pages that are between
size and (1 << order).
> +
> + return addr;
> }
>
> static void *host_s2_zalloc_page(void *pool)
> diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> index 41fc25bdfb34..a6e874e61a40 100644
> --- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
> @@ -193,6 +193,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
> hyp_spin_unlock(&pool->lock);
> }
>
> +void hyp_split_page(struct hyp_page *p)
> +{
> + unsigned short order = p->order;
> + unsigned int i;
> +
> + p->order = 0;
> + for (i = 1; i < (1 << order); i++) {
> + struct hyp_page *tail = p + i;
> +
> + tail->order = 0;
> + hyp_set_page_refcounted(tail);
> + }
> +}
> +
> void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
> {
> unsigned short i = order;
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
Hey Marc,
On Monday 04 Oct 2021 at 10:55:13 (+0100), Marc Zyngier wrote:
> Hi Quentin,
>
> On Mon, 04 Oct 2021 10:03:13 +0100,
> Quentin Perret <[email protected]> wrote:
> >
> > The KVM page-table library refcounts the pages of concatenated stage-2
> > PGDs individually. However, the host's stage-2 PGD is currently managed
> > by EL2 as a single high-order compound page, which can cause the
> > refcount of the tail pages to reach 0 when they really shouldn't, hence
> > corrupting the page-table.
>
> nit: this comment only applies to the protected mode, right? As far as
> I can tell, 'classic' KVM is just fine.
Correct, this really only applies to the host stage-2, which implies
we're in protected mode. I'll make that a bit more explicit.
> > Fix this by introducing a new hyp_split_page() helper in the EL2 page
> > allocator (matching EL1's split_page() function), and make use of it
>
> uber nit: split_page() is not an EL1 function. more of a standard
> kernel function.
Fair enough :)
> > from host_s2_zalloc_page().
> >
> > Fixes: 1025c8c0c6ac ("KVM: arm64: Wrap the host with a stage 2")
> > Suggested-by: Will Deacon <[email protected]>
> > Signed-off-by: Quentin Perret <[email protected]>
> > ---
> > arch/arm64/kvm/hyp/include/nvhe/gfp.h | 1 +
> > arch/arm64/kvm/hyp/nvhe/mem_protect.c | 6 +++++-
> > arch/arm64/kvm/hyp/nvhe/page_alloc.c | 14 ++++++++++++++
> > 3 files changed, 20 insertions(+), 1 deletion(-)
> >
> > diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> > index fb0f523d1492..0a048dc06a7d 100644
> > --- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> > +++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
> > @@ -24,6 +24,7 @@ struct hyp_pool {
> >
> > /* Allocation */
> > void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
> > +void hyp_split_page(struct hyp_page *page);
> > void hyp_get_page(struct hyp_pool *pool, void *addr);
> > void hyp_put_page(struct hyp_pool *pool, void *addr);
> >
> > diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > index bacd493a4eac..93a79736c283 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > @@ -35,7 +35,11 @@ const u8 pkvm_hyp_id = 1;
> >
> > static void *host_s2_zalloc_pages_exact(size_t size)
> > {
> > - return hyp_alloc_pages(&host_s2_pool, get_order(size));
> > + void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
> > +
> > + hyp_split_page(hyp_virt_to_page(addr));
>
> The only reason this doesn't lead to a subsequent memory leak is that
> concatenated page tables are always a power of two, right?
Indeed, and also because the host stage-2 is _never_ freed, so that's
not memory we're going to reclaim anyway -- we don't have an
implementation of ->free_pages_exact() in the host stage-2 mm_ops.
> If so, that deserves a comment, because I don't think this works in
> the general case unless you actively free the pages that are between
> size and (1 << order).
Ack, that'll probably confuse me too in a few weeks, so a comment won't
hurt. I'll re-spin shortly.
Thanks,
Quentin