On Mon, Jun 20, 2022 at 4:03 PM Ashish Kalra <[email protected]> wrote:
>
> From: Brijesh Singh <[email protected]>
>
> The integrity guarantee of SEV-SNP is enforced through the RMP table.
> The RMP is used with standard x86 and IOMMU page tables to enforce memory
> restrictions and page access rights. The RMP check is enforced as soon as
> SEV-SNP is enabled globally in the system. When hardware encounters an
> RMP checks failure, it raises a page-fault exception.
nit: "RMP checks ..." -> "RMP-check ..."
>
> The rmp_make_private() and rmp_make_shared() helpers are used to add
> or remove the pages from the RMP table. Improve the rmp_make_private() to
> invalid state so that pages cannot be used in the direct-map after its
nit: "invalid state ..." -> "invalidate state ..."
nit: "... after its" -> "... after they're"
(Here, and in the patch subject too.)
> added in the RMP table, and restore to its default valid permission after
nit: "... restore to its ..." -> "... restored to their ..."
> the pages are removed from the RMP table.
>
> Signed-off-by: Brijesh Singh <[email protected]>
> ---
> arch/x86/kernel/sev.c | 61 ++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 60 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index f6c64a722e94..734cddd837f5 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -2451,10 +2451,42 @@ int psmash(u64 pfn)
> }
> EXPORT_SYMBOL_GPL(psmash);
>
> +static int restore_direct_map(u64 pfn, int npages)
> +{
> + int i, ret = 0;
> +
> + for (i = 0; i < npages; i++) {
> + ret = set_direct_map_default_noflush(pfn_to_page(pfn + i));
> + if (ret)
> + goto cleanup;
> + }
> +
> +cleanup:
> + WARN(ret > 0, "Failed to restore direct map for pfn 0x%llx\n", pfn + i);
> + return ret;
> +}
> +
> +static int invalid_direct_map(unsigned long pfn, int npages)
I think we should rename this function to "invalidate_direct_map()".
> +{
> + int i, ret = 0;
> +
> + for (i = 0; i < npages; i++) {
> + ret = set_direct_map_invalid_noflush(pfn_to_page(pfn + i));
> + if (ret)
> + goto cleanup;
> + }
> +
> + return 0;
> +
> +cleanup:
> + restore_direct_map(pfn, i);
> + return ret;
> +}
> +
> static int rmpupdate(u64 pfn, struct rmpupdate *val)
> {
> unsigned long paddr = pfn << PAGE_SHIFT;
> - int ret;
> + int ret, level, npages;
>
> if (!pfn_valid(pfn))
> return -EINVAL;
> @@ -2462,11 +2494,38 @@ static int rmpupdate(u64 pfn, struct rmpupdate *val)
> if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
> return -ENXIO;
>
> + level = RMP_TO_X86_PG_LEVEL(val->pagesize);
> + npages = page_level_size(level) / PAGE_SIZE;
> +
> + /*
> + * If page is getting assigned in the RMP table then unmap it from the
> + * direct map.
> + */
> + if (val->assigned) {
> + if (invalid_direct_map(pfn, npages)) {
> + pr_err("Failed to unmap pfn 0x%llx pages %d from direct_map\n",
> + pfn, npages);
> + return -EFAULT;
> + }
> + }
> +
> /* Binutils version 2.36 supports the RMPUPDATE mnemonic. */
> asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
> : "=a"(ret)
> : "a"(paddr), "c"((unsigned long)val)
> : "memory", "cc");
> +
> + /*
> + * Restore the direct map after the page is removed from the RMP table.
> + */
> + if (!ret && !val->assigned) {
> + if (restore_direct_map(pfn, npages)) {
> + pr_err("Failed to map pfn 0x%llx pages %d in direct_map\n",
> + pfn, npages);
> + return -EFAULT;
> + }
> + }
> +
> return ret;
> }
>
> --
> 2.25.1
>
>