2021-05-19 03:52:51

by Claudio Imbrenda

[permalink] [raw]
Subject: [PATCH v1 02/11] KVM: s390: pv: properly handle page flags for protected guests

Introduce variants of the convert and destroy page functions that also
clear the PG_arch_1 bit used to mark them as secure pages.

These new functions can only be called on pages for which a reference
is already being held.

Signed-off-by: Claudio Imbrenda <[email protected]>
---
arch/s390/include/asm/pgtable.h | 9 ++++++---
arch/s390/include/asm/uv.h | 10 ++++++++--
arch/s390/kernel/uv.c | 34 ++++++++++++++++++++++++++++++++-
arch/s390/mm/gmap.c | 4 +++-
4 files changed, 50 insertions(+), 7 deletions(-)

diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 29c7ecd5ad1d..141a8aaacd6d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1062,8 +1062,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
pte_t res;

res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
+ /* At this point the reference through the mapping is still present */
if (mm_is_protected(mm) && pte_present(res))
- uv_convert_from_secure(pte_val(res) & PAGE_MASK);
+ uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
return res;
}

@@ -1079,8 +1080,9 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
pte_t res;

res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
+ /* At this point the reference through the mapping is still present */
if (mm_is_protected(vma->vm_mm) && pte_present(res))
- uv_convert_from_secure(pte_val(res) & PAGE_MASK);
+ uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
return res;
}

@@ -1104,8 +1106,9 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
} else {
res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
}
+ /* At this point the reference through the mapping is still present */
if (mm_is_protected(mm) && pte_present(res))
- uv_convert_from_secure(pte_val(res) & PAGE_MASK);
+ uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
return res;
}

diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 7b98d4caee77..9aa621e84745 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -345,8 +345,9 @@ static inline int is_prot_virt_host(void)
}

int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
-int uv_destroy_page(unsigned long paddr);
+int uv_destroy_owned_page(unsigned long paddr);
int uv_convert_from_secure(unsigned long paddr);
+int uv_convert_owned_from_secure(unsigned long paddr);
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);

void setup_uv(void);
@@ -356,7 +357,7 @@ void adjust_to_uv_max(unsigned long *vmax);
static inline void setup_uv(void) {}
static inline void adjust_to_uv_max(unsigned long *vmax) {}

-static inline int uv_destroy_page(unsigned long paddr)
+static inline int uv_destroy_owned_page(unsigned long paddr)
{
return 0;
}
@@ -365,6 +366,11 @@ static inline int uv_convert_from_secure(unsigned long paddr)
{
return 0;
}
+
+static inline int uv_convert_owned_from_secure(unsigned long paddr)
+{
+ return 0;
+}
#endif

#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index b2d2ad153067..3d94760c0371 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -121,7 +121,7 @@ static int uv_pin_shared(unsigned long paddr)
*
* @paddr: Absolute host address of page to be destroyed
*/
-int uv_destroy_page(unsigned long paddr)
+static int uv_destroy_page(unsigned long paddr)
{
struct uv_cb_cfs uvcb = {
.header.cmd = UVC_CMD_DESTR_SEC_STOR,
@@ -141,6 +141,22 @@ int uv_destroy_page(unsigned long paddr)
return 0;
}

+/*
+ * The caller must already hold a reference to the page
+ */
+int uv_destroy_owned_page(unsigned long paddr)
+{
+ struct page *page = phys_to_page(paddr);
+ int rc;
+
+ get_page(page);
+ rc = uv_destroy_page(paddr);
+ if (!rc)
+ clear_bit(PG_arch_1, &page->flags);
+ put_page(page);
+ return rc;
+}
+
/*
* Requests the Ultravisor to encrypt a guest page and make it
* accessible to the host for paging (export).
@@ -160,6 +176,22 @@ int uv_convert_from_secure(unsigned long paddr)
return 0;
}

+/*
+ * The caller must already hold a reference to the page
+ */
+int uv_convert_owned_from_secure(unsigned long paddr)
+{
+ struct page *page = phys_to_page(paddr);
+ int rc;
+
+ get_page(page);
+ rc = uv_convert_from_secure(paddr);
+ if (!rc)
+ clear_bit(PG_arch_1, &page->flags);
+ put_page(page);
+ return rc;
+}
+
/*
* Calculate the expected ref_count for a page that would otherwise have no
* further pins. This was cribbed from similar functions in other places in
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 9bb2c7512cd5..de679facc720 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2678,8 +2678,10 @@ static int __s390_reset_acc(pte_t *ptep, unsigned long addr,
{
pte_t pte = READ_ONCE(*ptep);

+ /* There is a reference through the mapping */
if (pte_present(pte))
- WARN_ON_ONCE(uv_destroy_page(pte_val(pte) & PAGE_MASK));
+ WARN_ON_ONCE(uv_destroy_owned_page(pte_val(pte) & PAGE_MASK));
+
return 0;
}

--
2.31.1