Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752822AbcCYN3E (ORCPT ); Fri, 25 Mar 2016 09:29:04 -0400 Received: from mga09.intel.com ([134.134.136.24]:47526 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752212AbcCYN3B (ORCPT ); Fri, 25 Mar 2016 09:29:01 -0400 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.24,391,1455004800"; d="scan'208";a="73118593" From: Xiao Guangrong To: pbonzini@redhat.com Cc: gleb@kernel.org, mtosatti@redhat.com, kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Xiao Guangrong Subject: [PATCH 3/4] KVM: MMU: reduce the size of mmu_page_path Date: Fri, 25 Mar 2016 21:19:37 +0800 Message-Id: <1458911978-19430-3-git-send-email-guangrong.xiao@linux.intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1458911978-19430-1-git-send-email-guangrong.xiao@linux.intel.com> References: <1458911978-19430-1-git-send-email-guangrong.xiao@linux.intel.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 2888 Lines: 91 Currently only PT64_ROOT_LEVEL - 1 levels are used, one additional entry in .parent[] is used as a sentinel, the additional entry in .idx[] is purely wasted This patch reduces its size and sets the sentinel on the upper level of the place where we start from Signed-off-by: Xiao Guangrong --- arch/x86/kvm/mmu.c | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e273144..c396e8b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1984,12 +1984,12 @@ static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, } struct mmu_page_path { - struct kvm_mmu_page *parent[PT64_ROOT_LEVEL]; - unsigned int idx[PT64_ROOT_LEVEL]; + struct kvm_mmu_page *parent[PT64_ROOT_LEVEL - 1]; + unsigned int idx[PT64_ROOT_LEVEL - 1]; }; #define for_each_sp(pvec, sp, parents, i) \ - for (i = mmu_pages_first(&pvec, &parents); \ + for (i = mmu_pages_next(&pvec, &parents, -1); \ i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ i = mmu_pages_next(&pvec, &parents, i)) @@ -2016,25 +2016,15 @@ static int mmu_pages_next(struct kvm_mmu_pages *pvec, return n; } -static int mmu_pages_first(struct kvm_mmu_pages *pvec, - struct mmu_page_path *parents) +static void +mmu_pages_init(struct mmu_page_path *parents, struct kvm_mmu_page *parent) { - struct kvm_mmu_page *sp; - int level; - - if (pvec->nr == 0) - return 0; - - sp = pvec->page[0].sp; - level = sp->role.level; - WARN_ON(level == PT_PAGE_TABLE_LEVEL); - /* - * Also set up a sentinel. Further entries in pvec are all - * children of sp, so this element is never overwritten. + * set up a sentinel. Further entries in pvec are all children of + * sp, so this element is never overwritten. */ - parents->parent[level - 1] = NULL; - return mmu_pages_next(pvec, parents, -1); + if (parent->role.level < PT64_ROOT_LEVEL) + parents->parent[parent->role.level - 1] = NULL; } static void mmu_pages_clear_parents(struct mmu_page_path *parents) @@ -2051,7 +2041,7 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents) WARN_ON(idx == INVALID_INDEX); clear_unsync_child_bit(sp, idx); level++; - } while (!sp->unsync_children); + } while (!sp->unsync_children && (level < PT64_ROOT_LEVEL - 1)); } static void mmu_sync_children(struct kvm_vcpu *vcpu, @@ -2064,6 +2054,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, LIST_HEAD(invalid_list); bool flush = false; + mmu_pages_init(&parents, parent); while (mmu_unsync_walk(parent, &pages)) { bool protected = false; @@ -2335,6 +2326,7 @@ static int mmu_zap_unsync_children(struct kvm *kvm, if (parent->role.level == PT_PAGE_TABLE_LEVEL) return 0; + mmu_pages_init(&parents, parent); while (mmu_unsync_walk(parent, &pages)) { struct kvm_mmu_page *sp; -- 1.8.3.1