2018-07-23 07:49:11

by Tianyu Lan

[permalink] [raw]
Subject: [PATCH] KVM: Compile hv_remote_flush_tlb() and check_ept_pointer() when CONFIG_HYPERV is enabled.

This patch is to avoid compilation warning when CONFIG_HYPERV isn't enabled.

Signed-off-by: Lan Tianyu <[email protected]>
---
arch/x86/kvm/vmx.c | 84 +++++++++++++++++++++++++++---------------------------
1 file changed, 42 insertions(+), 42 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 16ea8b629416..2f379e575940 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1367,6 +1367,48 @@ static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
* GUEST_IA32_RTIT_CTL = 0x00002814,
*/
}
+
+/* check_ept_pointer() should be under protection of ept_pointer_lock. */
+static void check_ept_pointer(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ u64 tmp_eptp = INVALID_PAGE;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!VALID_PAGE(tmp_eptp)) {
+ tmp_eptp = to_vmx(vcpu)->ept_pointer;
+ } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
+ to_kvm_vmx(kvm)->ept_pointers_match
+ = EPT_POINTERS_MISMATCH;
+ return;
+ }
+ }
+
+ to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
+}
+
+static int hv_remote_flush_tlb(struct kvm *kvm)
+{
+ int ret;
+
+ spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+
+ if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
+ check_ept_pointer(kvm);
+
+ if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ ret = hyperv_flush_guest_mapping(
+ to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
+
+out:
+ spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+ return ret;
+}
#else /* !IS_ENABLED(CONFIG_HYPERV) */
static inline void evmcs_write64(unsigned long field, u64 value) {}
static inline void evmcs_write32(unsigned long field, u32 value) {}
@@ -4827,48 +4869,6 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
}
}

-/* check_ept_pointer() should be under protection of ept_pointer_lock. */
-static void check_ept_pointer(struct kvm *kvm)
-{
- struct kvm_vcpu *vcpu;
- u64 tmp_eptp = INVALID_PAGE;
- int i;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!VALID_PAGE(tmp_eptp)) {
- tmp_eptp = to_vmx(vcpu)->ept_pointer;
- } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
- to_kvm_vmx(kvm)->ept_pointers_match
- = EPT_POINTERS_MISMATCH;
- return;
- }
- }
-
- to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
-}
-
-static int hv_remote_flush_tlb(struct kvm *kvm)
-{
- int ret;
-
- spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
-
- if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
- check_ept_pointer(kvm);
-
- if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
- ret = -ENOTSUPP;
- goto out;
- }
-
- ret = hyperv_flush_guest_mapping(
- to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
-
-out:
- spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
- return ret;
-}
-
static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
{
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
--
2.14.3


2018-08-02 13:00:48

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH] KVM: Compile hv_remote_flush_tlb() and check_ept_pointer() when CONFIG_HYPERV is enabled.

On 23/07/2018 09:47, Tianyu Lan wrote:
> This patch is to avoid compilation warning when CONFIG_HYPERV isn't enabled.
>
> Signed-off-by: Lan Tianyu <[email protected]>
> ---
> arch/x86/kvm/vmx.c | 84 +++++++++++++++++++++++++++---------------------------
> 1 file changed, 42 insertions(+), 42 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 16ea8b629416..2f379e575940 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -1367,6 +1367,48 @@ static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
> * GUEST_IA32_RTIT_CTL = 0x00002814,
> */
> }
> +
> +/* check_ept_pointer() should be under protection of ept_pointer_lock. */
> +static void check_ept_pointer(struct kvm *kvm)
> +{
> + struct kvm_vcpu *vcpu;
> + u64 tmp_eptp = INVALID_PAGE;
> + int i;
> +
> + kvm_for_each_vcpu(i, vcpu, kvm) {
> + if (!VALID_PAGE(tmp_eptp)) {
> + tmp_eptp = to_vmx(vcpu)->ept_pointer;
> + } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
> + to_kvm_vmx(kvm)->ept_pointers_match
> + = EPT_POINTERS_MISMATCH;
> + return;
> + }
> + }
> +
> + to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
> +}
> +
> +static int hv_remote_flush_tlb(struct kvm *kvm)
> +{
> + int ret;
> +
> + spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
> +
> + if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
> + check_ept_pointer(kvm);
> +
> + if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
> + ret = -ENOTSUPP;
> + goto out;
> + }
> +
> + ret = hyperv_flush_guest_mapping(
> + to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
> +
> +out:
> + spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
> + return ret;
> +}
> #else /* !IS_ENABLED(CONFIG_HYPERV) */
> static inline void evmcs_write64(unsigned long field, u64 value) {}
> static inline void evmcs_write32(unsigned long field, u32 value) {}
> @@ -4827,48 +4869,6 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
> }
> }
>
> -/* check_ept_pointer() should be under protection of ept_pointer_lock. */
> -static void check_ept_pointer(struct kvm *kvm)
> -{
> - struct kvm_vcpu *vcpu;
> - u64 tmp_eptp = INVALID_PAGE;
> - int i;
> -
> - kvm_for_each_vcpu(i, vcpu, kvm) {
> - if (!VALID_PAGE(tmp_eptp)) {
> - tmp_eptp = to_vmx(vcpu)->ept_pointer;
> - } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
> - to_kvm_vmx(kvm)->ept_pointers_match
> - = EPT_POINTERS_MISMATCH;
> - return;
> - }
> - }
> -
> - to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
> -}
> -
> -static int hv_remote_flush_tlb(struct kvm *kvm)
> -{
> - int ret;
> -
> - spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
> -
> - if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
> - check_ept_pointer(kvm);
> -
> - if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
> - ret = -ENOTSUPP;
> - goto out;
> - }
> -
> - ret = hyperv_flush_guest_mapping(
> - to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
> -
> -out:
> - spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
> - return ret;
> -}
> -
> static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
> {
> __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
>

Queued, thanks.

Paolo