Merge check of "sp->role.cr4_pae != !!is_pae(vcpu))" and "vcpu->
arch.mmu.sync_page(vcpu, sp) == 0". kvm_mmu_prepare_zap_page()
is called under both these conditions.
Signed-off-by: Lan Tianyu <[email protected]>
---
arch/x86/kvm/mmu.c | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d594690d8b95..c2c1fd66fb36 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2117,12 +2117,8 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
- if (sp->role.cr4_pae != !!is_pae(vcpu)) {
- kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
- return false;
- }
-
- if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
+ if (sp->role.cr4_pae != !!is_pae(vcpu)
+ || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}
--
2.14.3
On 18/07/2018 08:12, Tianyu Lan wrote:
> Merge check of "sp->role.cr4_pae != !!is_pae(vcpu))" and "vcpu->
> arch.mmu.sync_page(vcpu, sp) == 0". kvm_mmu_prepare_zap_page()
> is called under both these conditions.
>
> Signed-off-by: Lan Tianyu <[email protected]>
> ---
> arch/x86/kvm/mmu.c | 8 ++------
> 1 file changed, 2 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index d594690d8b95..c2c1fd66fb36 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2117,12 +2117,8 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
> static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> struct list_head *invalid_list)
> {
> - if (sp->role.cr4_pae != !!is_pae(vcpu)) {
> - kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
> - return false;
> - }
> -
> - if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
> + if (sp->role.cr4_pae != !!is_pae(vcpu)
> + || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
> kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
> return false;
> }
>
Queued, thanks.
Paolo