It seems possible to optimize the code format
Signed-off-by: Zhou nan <[email protected]>
---
arch/x86/kvm/mmu/tdp_mmu.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 922b06bf4b94..a4ab6aee4db2 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -743,6 +743,7 @@ static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
if (record_acc_track)
handle_changed_spte_acc_track(old_spte, new_spte, level);
+
if (record_dirty_log)
handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
new_spte, level);
@@ -1149,8 +1150,10 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
+
if (account_nx)
account_huge_nx_page(kvm, sp);
+
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
return 0;
--
2.18.2