2024-03-25 22:28:27

by Tom Lendacky

[permalink] [raw]
Subject: [PATCH v3 06/14] x86/sev: Use the SVSM to create a vCPU when not in VMPL0

Using the RMPADJUST instruction, the VSMA attribute can only be changed
at VMPL0. An SVSM will be present when running at VMPL1 or a lower
privilege level.

When an SVSM is present, use the SVSM_CORE_CREATE_VCPU call or the
SVSM_CORE_DESTROY_VCPU call to perform VMSA attribute changes. Use the
VMPL level supplied by the SVSM within the VMSA and when starting the
AP.

Signed-off-by: Tom Lendacky <[email protected]>
---
arch/x86/include/asm/sev.h | 2 ++
arch/x86/kernel/sev.c | 60 +++++++++++++++++++++++++++++++++-----
2 files changed, 54 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 204f0a4857d6..d7be613b7372 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -229,6 +229,8 @@ struct svsm_call {
#define SVSM_CORE_CALL(x) ((0ULL << 32) | (x))
#define SVSM_CORE_REMAP_CA 0
#define SVSM_CORE_PVALIDATE 1
+#define SVSM_CORE_CREATE_VCPU 2
+#define SVSM_CORE_DELETE_VCPU 3

#ifdef CONFIG_AMD_MEM_ENCRYPT
extern void __sev_es_ist_enter(struct pt_regs *regs);
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index d3e182d69d65..ea8b43a0f01b 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -1016,7 +1016,7 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
}

-static int snp_set_vmsa(void *va, bool vmsa)
+static int base_snp_set_vmsa(void *va, bool vmsa)
{
u64 attrs;

@@ -1034,6 +1034,40 @@ static int snp_set_vmsa(void *va, bool vmsa)
return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
}

+static int svsm_snp_set_vmsa(void *va, void *caa, int apic_id, bool vmsa)
+{
+ struct svsm_call call = {};
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+
+ call.caa = this_cpu_read(svsm_caa);
+ call.rcx = __pa(va);
+
+ if (vmsa) {
+ /* Protocol 0, Call ID 2 */
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
+ call.rdx = __pa(caa);
+ call.r8 = apic_id;
+ } else {
+ /* Protocol 0, Call ID 3 */
+ call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
+ }
+
+ ret = svsm_protocol(&call);
+
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static int snp_set_vmsa(void *va, void *caa, int apic_id, bool vmsa)
+{
+ return vmpl ? svsm_snp_set_vmsa(va, caa, apic_id, vmsa)
+ : base_snp_set_vmsa(va, vmsa);
+}
+
#define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
#define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
#define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
@@ -1065,11 +1099,11 @@ static void *snp_alloc_vmsa_page(void)
return page_address(p + 1);
}

-static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
+static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
{
int err;

- err = snp_set_vmsa(vmsa, false);
+ err = snp_set_vmsa(vmsa, NULL, apic_id, false);
if (err)
pr_err("clear VMSA page failed (%u), leaking page\n", err);
else
@@ -1080,6 +1114,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
{
struct sev_es_save_area *cur_vmsa, *vmsa;
struct ghcb_state state;
+ struct svsm_ca *caa;
unsigned long flags;
struct ghcb *ghcb;
u8 sipi_vector;
@@ -1126,6 +1161,12 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
if (!vmsa)
return -ENOMEM;

+ /*
+ * If an SVSM is present, then the SVSM CAA per-CPU variable will
+ * have a value, otherwise it will be NULL.
+ */
+ caa = per_cpu(svsm_caa, cpu);
+
/* CR4 should maintain the MCE value */
cr4 = native_read_cr4() & X86_CR4_MCE;

@@ -1173,11 +1214,11 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
* VMPL level
* SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
*/
- vmsa->vmpl = 0;
+ vmsa->vmpl = vmpl;
vmsa->sev_features = sev_status >> 2;

/* Switch the page over to a VMSA page now that it is initialized */
- ret = snp_set_vmsa(vmsa, true);
+ ret = snp_set_vmsa(vmsa, caa, apic_id, true);
if (ret) {
pr_err("set VMSA page failed (%u)\n", ret);
free_page((unsigned long)vmsa);
@@ -1193,7 +1234,10 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
vc_ghcb_invalidate(ghcb);
ghcb_set_rax(ghcb, vmsa->sev_features);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
- ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
+ ghcb_set_sw_exit_info_1(ghcb,
+ ((u64)apic_id << 32) |
+ ((u64)vmpl << 16) |
+ SVM_VMGEXIT_AP_CREATE);
ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));

sev_es_wr_ghcb_msr(__pa(ghcb));
@@ -1211,13 +1255,13 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)

/* Perform cleanup if there was an error */
if (ret) {
- snp_cleanup_vmsa(vmsa);
+ snp_cleanup_vmsa(vmsa, apic_id);
vmsa = NULL;
}

/* Free up any previous VMSA page */
if (cur_vmsa)
- snp_cleanup_vmsa(cur_vmsa);
+ snp_cleanup_vmsa(cur_vmsa, apic_id);

/* Record the current VMSA page */
per_cpu(sev_vmsa, cpu) = vmsa;
--
2.43.2



2024-04-12 15:28:57

by Gupta, Pankaj

[permalink] [raw]
Subject: Re: [PATCH v3 06/14] x86/sev: Use the SVSM to create a vCPU when not in VMPL0

On 3/25/2024 11:26 PM, Tom Lendacky wrote:
> Using the RMPADJUST instruction, the VSMA attribute can only be changed
> at VMPL0. An SVSM will be present when running at VMPL1 or a lower
> privilege level.
>
> When an SVSM is present, use the SVSM_CORE_CREATE_VCPU call or the
> SVSM_CORE_DESTROY_VCPU call to perform VMSA attribute changes. Use the
> VMPL level supplied by the SVSM within the VMSA and when starting the
> AP.
>
> Signed-off-by: Tom Lendacky <[email protected]>

Reviewed-by: Pankaj Gupta <[email protected]>

> ---
> arch/x86/include/asm/sev.h | 2 ++
> arch/x86/kernel/sev.c | 60 +++++++++++++++++++++++++++++++++-----
> 2 files changed, 54 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
> index 204f0a4857d6..d7be613b7372 100644
> --- a/arch/x86/include/asm/sev.h
> +++ b/arch/x86/include/asm/sev.h
> @@ -229,6 +229,8 @@ struct svsm_call {
> #define SVSM_CORE_CALL(x) ((0ULL << 32) | (x))
> #define SVSM_CORE_REMAP_CA 0
> #define SVSM_CORE_PVALIDATE 1
> +#define SVSM_CORE_CREATE_VCPU 2
> +#define SVSM_CORE_DELETE_VCPU 3
>
> #ifdef CONFIG_AMD_MEM_ENCRYPT
> extern void __sev_es_ist_enter(struct pt_regs *regs);
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index d3e182d69d65..ea8b43a0f01b 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -1016,7 +1016,7 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end)
> set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
> }
>
> -static int snp_set_vmsa(void *va, bool vmsa)
> +static int base_snp_set_vmsa(void *va, bool vmsa)
> {
> u64 attrs;
>
> @@ -1034,6 +1034,40 @@ static int snp_set_vmsa(void *va, bool vmsa)
> return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
> }
>
> +static int svsm_snp_set_vmsa(void *va, void *caa, int apic_id, bool vmsa)
> +{
> + struct svsm_call call = {};
> + unsigned long flags;
> + int ret;
> +
> + local_irq_save(flags);
> +
> + call.caa = this_cpu_read(svsm_caa);
> + call.rcx = __pa(va);
> +
> + if (vmsa) {
> + /* Protocol 0, Call ID 2 */
> + call.rax = SVSM_CORE_CALL(SVSM_CORE_CREATE_VCPU);
> + call.rdx = __pa(caa);
> + call.r8 = apic_id;
> + } else {
> + /* Protocol 0, Call ID 3 */
> + call.rax = SVSM_CORE_CALL(SVSM_CORE_DELETE_VCPU);
> + }
> +
> + ret = svsm_protocol(&call);
> +
> + local_irq_restore(flags);
> +
> + return ret;
> +}
> +
> +static int snp_set_vmsa(void *va, void *caa, int apic_id, bool vmsa)
> +{
> + return vmpl ? svsm_snp_set_vmsa(va, caa, apic_id, vmsa)
> + : base_snp_set_vmsa(va, vmsa);
> +}
> +
> #define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
> #define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
> #define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
> @@ -1065,11 +1099,11 @@ static void *snp_alloc_vmsa_page(void)
> return page_address(p + 1);
> }
>
> -static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
> +static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa, int apic_id)
> {
> int err;
>
> - err = snp_set_vmsa(vmsa, false);
> + err = snp_set_vmsa(vmsa, NULL, apic_id, false);
> if (err)
> pr_err("clear VMSA page failed (%u), leaking page\n", err);
> else
> @@ -1080,6 +1114,7 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
> {
> struct sev_es_save_area *cur_vmsa, *vmsa;
> struct ghcb_state state;
> + struct svsm_ca *caa;
> unsigned long flags;
> struct ghcb *ghcb;
> u8 sipi_vector;
> @@ -1126,6 +1161,12 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
> if (!vmsa)
> return -ENOMEM;
>
> + /*
> + * If an SVSM is present, then the SVSM CAA per-CPU variable will
> + * have a value, otherwise it will be NULL.
> + */
> + caa = per_cpu(svsm_caa, cpu);
> +
> /* CR4 should maintain the MCE value */
> cr4 = native_read_cr4() & X86_CR4_MCE;
>
> @@ -1173,11 +1214,11 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
> * VMPL level
> * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
> */
> - vmsa->vmpl = 0;
> + vmsa->vmpl = vmpl;
> vmsa->sev_features = sev_status >> 2;
>
> /* Switch the page over to a VMSA page now that it is initialized */
> - ret = snp_set_vmsa(vmsa, true);
> + ret = snp_set_vmsa(vmsa, caa, apic_id, true);
> if (ret) {
> pr_err("set VMSA page failed (%u)\n", ret);
> free_page((unsigned long)vmsa);
> @@ -1193,7 +1234,10 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
> vc_ghcb_invalidate(ghcb);
> ghcb_set_rax(ghcb, vmsa->sev_features);
> ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
> - ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
> + ghcb_set_sw_exit_info_1(ghcb,
> + ((u64)apic_id << 32) |
> + ((u64)vmpl << 16) |
> + SVM_VMGEXIT_AP_CREATE);
> ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
>
> sev_es_wr_ghcb_msr(__pa(ghcb));
> @@ -1211,13 +1255,13 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
>
> /* Perform cleanup if there was an error */
> if (ret) {
> - snp_cleanup_vmsa(vmsa);
> + snp_cleanup_vmsa(vmsa, apic_id);
> vmsa = NULL;
> }
>
> /* Free up any previous VMSA page */
> if (cur_vmsa)
> - snp_cleanup_vmsa(cur_vmsa);
> + snp_cleanup_vmsa(cur_vmsa, apic_id);
>
> /* Record the current VMSA page */
> per_cpu(sev_vmsa, cpu) = vmsa;