2021-10-21 17:47:45

by Peter Gonda

[permalink] [raw]
Subject: [PATCH 1/5 V11] KVM: SEV: Refactor out sev_es_state struct

Move SEV-ES vCPU metadata into new sev_es_state struct from vcpu_svm.

Signed-off-by: Peter Gonda <[email protected]>
Suggested-by: Tom Lendacky <[email protected]>
Acked-by: Tom Lendacky <[email protected]>
Cc: Marc Orr <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: Sean Christopherson <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Dr. David Alan Gilbert <[email protected]>
Cc: Brijesh Singh <[email protected]>
Cc: Tom Lendacky <[email protected]>
Cc: Vitaly Kuznetsov <[email protected]>
Cc: Wanpeng Li <[email protected]>
Cc: Jim Mattson <[email protected]>
Cc: Joerg Roedel <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
arch/x86/kvm/svm/sev.c | 83 +++++++++++++++++++++---------------------
arch/x86/kvm/svm/svm.c | 8 ++--
arch/x86/kvm/svm/svm.h | 26 +++++++------
3 files changed, 61 insertions(+), 56 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0d21d59936e5..109b223c0b58 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -590,7 +590,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
* traditional VMSA as it has been built so far (in prep
* for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
*/
- memcpy(svm->vmsa, save, sizeof(*save));
+ memcpy(svm->sev_es.vmsa, save, sizeof(*save));

return 0;
}
@@ -612,11 +612,11 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
* the VMSA memory content (i.e it will write the same memory region
* with the guest's key), so invalidate it first.
*/
- clflush_cache_range(svm->vmsa, PAGE_SIZE);
+ clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);

vmsa.reserved = 0;
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
- vmsa.address = __sme_pa(svm->vmsa);
+ vmsa.address = __sme_pa(svm->sev_es.vmsa);
vmsa.len = PAGE_SIZE;
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
if (ret)
@@ -2031,16 +2031,16 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
svm = to_svm(vcpu);

if (vcpu->arch.guest_state_protected)
- sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
- __free_page(virt_to_page(svm->vmsa));
+ sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
+ __free_page(virt_to_page(svm->sev_es.vmsa));

- if (svm->ghcb_sa_free)
- kfree(svm->ghcb_sa);
+ if (svm->sev_es.ghcb_sa_free)
+ kfree(svm->sev_es.ghcb_sa);
}

static void dump_ghcb(struct vcpu_svm *svm)
{
- struct ghcb *ghcb = svm->ghcb;
+ struct ghcb *ghcb = svm->sev_es.ghcb;
unsigned int nbits;

/* Re-use the dump_invalid_vmcb module parameter */
@@ -2066,7 +2066,7 @@ static void dump_ghcb(struct vcpu_svm *svm)
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
- struct ghcb *ghcb = svm->ghcb;
+ struct ghcb *ghcb = svm->sev_es.ghcb;

/*
* The GHCB protocol so far allows for the following data
@@ -2086,7 +2086,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct kvm_vcpu *vcpu = &svm->vcpu;
- struct ghcb *ghcb = svm->ghcb;
+ struct ghcb *ghcb = svm->sev_es.ghcb;
u64 exit_code;

/*
@@ -2133,7 +2133,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
struct ghcb *ghcb;
u64 exit_code = 0;

- ghcb = svm->ghcb;
+ ghcb = svm->sev_es.ghcb;

/* Only GHCB Usage code 0 is supported */
if (ghcb->ghcb_usage)
@@ -2251,33 +2251,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)

void sev_es_unmap_ghcb(struct vcpu_svm *svm)
{
- if (!svm->ghcb)
+ if (!svm->sev_es.ghcb)
return;

- if (svm->ghcb_sa_free) {
+ if (svm->sev_es.ghcb_sa_free) {
/*
* The scratch area lives outside the GHCB, so there is a
* buffer that, depending on the operation performed, may
* need to be synced, then freed.
*/
- if (svm->ghcb_sa_sync) {
+ if (svm->sev_es.ghcb_sa_sync) {
kvm_write_guest(svm->vcpu.kvm,
- ghcb_get_sw_scratch(svm->ghcb),
- svm->ghcb_sa, svm->ghcb_sa_len);
- svm->ghcb_sa_sync = false;
+ ghcb_get_sw_scratch(svm->sev_es.ghcb),
+ svm->sev_es.ghcb_sa,
+ svm->sev_es.ghcb_sa_len);
+ svm->sev_es.ghcb_sa_sync = false;
}

- kfree(svm->ghcb_sa);
- svm->ghcb_sa = NULL;
- svm->ghcb_sa_free = false;
+ kfree(svm->sev_es.ghcb_sa);
+ svm->sev_es.ghcb_sa = NULL;
+ svm->sev_es.ghcb_sa_free = false;
}

- trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
+ trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);

sev_es_sync_to_ghcb(svm);

- kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
- svm->ghcb = NULL;
+ kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
+ svm->sev_es.ghcb = NULL;
}

void pre_sev_run(struct vcpu_svm *svm, int cpu)
@@ -2307,7 +2308,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
{
struct vmcb_control_area *control = &svm->vmcb->control;
- struct ghcb *ghcb = svm->ghcb;
+ struct ghcb *ghcb = svm->sev_es.ghcb;
u64 ghcb_scratch_beg, ghcb_scratch_end;
u64 scratch_gpa_beg, scratch_gpa_end;
void *scratch_va;
@@ -2343,7 +2344,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
return false;
}

- scratch_va = (void *)svm->ghcb;
+ scratch_va = (void *)svm->sev_es.ghcb;
scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
} else {
/*
@@ -2373,12 +2374,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
* the vCPU next time (i.e. a read was requested so the data
* must be written back to the guest memory).
*/
- svm->ghcb_sa_sync = sync;
- svm->ghcb_sa_free = true;
+ svm->sev_es.ghcb_sa_sync = sync;
+ svm->sev_es.ghcb_sa_free = true;
}

- svm->ghcb_sa = scratch_va;
- svm->ghcb_sa_len = len;
+ svm->sev_es.ghcb_sa = scratch_va;
+ svm->sev_es.ghcb_sa_len = len;

return true;
}
@@ -2497,15 +2498,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
return -EINVAL;
}

- if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
+ if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
/* Unable to map GHCB from guest */
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
ghcb_gpa);
return -EINVAL;
}

- svm->ghcb = svm->ghcb_map.hva;
- ghcb = svm->ghcb_map.hva;
+ svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
+ ghcb = svm->sev_es.ghcb_map.hva;

trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);

@@ -2528,7 +2529,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = kvm_sev_es_mmio_read(vcpu,
control->exit_info_1,
control->exit_info_2,
- svm->ghcb_sa);
+ svm->sev_es.ghcb_sa);
break;
case SVM_VMGEXIT_MMIO_WRITE:
if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
@@ -2537,7 +2538,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = kvm_sev_es_mmio_write(vcpu,
control->exit_info_1,
control->exit_info_2,
- svm->ghcb_sa);
+ svm->sev_es.ghcb_sa);
break;
case SVM_VMGEXIT_NMI_COMPLETE:
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
@@ -2587,8 +2588,8 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
return -EINVAL;

- return kvm_sev_es_string_io(&svm->vcpu, size, port,
- svm->ghcb_sa, svm->ghcb_sa_len / size, in);
+ return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
+ svm->sev_es.ghcb_sa_len / size, in);
}

void sev_es_init_vmcb(struct vcpu_svm *svm)
@@ -2603,7 +2604,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
* VMCB page. Do not include the encryption mask on the VMSA physical
* address since hardware will access it using the guest key.
*/
- svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
+ svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);

/* Can't intercept CR register access, HV can't modify CR registers */
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
@@ -2675,8 +2676,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
struct vcpu_svm *svm = to_svm(vcpu);

/* First SIPI: Use the values as initially set by the VMM */
- if (!svm->received_first_sipi) {
- svm->received_first_sipi = true;
+ if (!svm->sev_es.received_first_sipi) {
+ svm->sev_es.received_first_sipi = true;
return;
}

@@ -2685,8 +2686,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
* the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
* non-zero value.
*/
- if (!svm->ghcb)
+ if (!svm->sev_es.ghcb)
return;

- ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 989685098b3e..68294491c23d 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1372,7 +1372,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);

if (vmsa_page)
- svm->vmsa = page_address(vmsa_page);
+ svm->sev_es.vmsa = page_address(vmsa_page);

svm->guest_state_loaded = false;

@@ -2762,11 +2762,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
{
struct vcpu_svm *svm = to_svm(vcpu);
- if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
+ if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
return kvm_complete_insn_gp(vcpu, err);

- ghcb_set_sw_exit_info_1(svm->ghcb, 1);
- ghcb_set_sw_exit_info_2(svm->ghcb,
+ ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
+ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
X86_TRAP_GP |
SVM_EVTINJ_TYPE_EXEPT |
SVM_EVTINJ_VALID);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 5d30db599e10..6d8d762d208f 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -123,6 +123,20 @@ struct svm_nested_state {
bool initialized;
};

+struct vcpu_sev_es_state {
+ /* SEV-ES support */
+ struct vmcb_save_area *vmsa;
+ struct ghcb *ghcb;
+ struct kvm_host_map ghcb_map;
+ bool received_first_sipi;
+
+ /* SEV-ES scratch area support */
+ void *ghcb_sa;
+ u32 ghcb_sa_len;
+ bool ghcb_sa_sync;
+ bool ghcb_sa_free;
+};
+
struct vcpu_svm {
struct kvm_vcpu vcpu;
/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
@@ -183,17 +197,7 @@ struct vcpu_svm {
DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
} shadow_msr_intercept;

- /* SEV-ES support */
- struct vmcb_save_area *vmsa;
- struct ghcb *ghcb;
- struct kvm_host_map ghcb_map;
- bool received_first_sipi;
-
- /* SEV-ES scratch area support */
- void *ghcb_sa;
- u32 ghcb_sa_len;
- bool ghcb_sa_sync;
- bool ghcb_sa_free;
+ struct vcpu_sev_es_state sev_es;

bool guest_state_loaded;
};
--
2.33.0.1079.g6e70778dc9-goog


2021-11-04 21:14:01

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH 1/5 V11] KVM: SEV: Refactor out sev_es_state struct

FWIW, "git format-patch -v $version" will generate the version info for you.
Based on the weird ordering of the shortlog and the cover letter subject

[PATCH 1/5 V11] KVM: SEV: Refactor out sev_es_state struct

vs.

[PATCH V11 2/5] KVM: SEV: Add support for SEV intra host migration

it looks like you might be hand-editing these?

Note, format-patch also takes --rfc, and you can override the entire thing for a
free form prefixes in --subject-prefix, e.g. I use it to call out "[kvm-unit-tests PATCH ...].

The PATCH part of the "prefix" needs to be provided manually, but then you do fun
things like replace PATCH with something else entirely, append an incremental
version, e.g. v2.1, which some people do if they need to make a small change to
one patch in a large series.

On Thu, Oct 21, 2021, Peter Gonda wrote:
> Move SEV-ES vCPU metadata into new sev_es_state struct from vcpu_svm.
>
> Signed-off-by: Peter Gonda <[email protected]>
> Suggested-by: Tom Lendacky <[email protected]>
> Acked-by: Tom Lendacky <[email protected]>
> Cc: Marc Orr <[email protected]>
> Cc: Paolo Bonzini <[email protected]>
> Cc: Sean Christopherson <[email protected]>
> Cc: David Rientjes <[email protected]>
> Cc: Dr. David Alan Gilbert <[email protected]>
> Cc: Brijesh Singh <[email protected]>
> Cc: Tom Lendacky <[email protected]>
> Cc: Vitaly Kuznetsov <[email protected]>
> Cc: Wanpeng Li <[email protected]>
> Cc: Jim Mattson <[email protected]>
> Cc: Joerg Roedel <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Borislav Petkov <[email protected]>
> Cc: "H. Peter Anvin" <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> ---

At some point in the future we really need to do some cleanup to reduce the
"depth" of things like svm->sev_es... and svm->vcpu.arch..., but for now,

Reviewed-by: Sean Christopherson <[email protected]>