Allow more page become asynchronous at getting sp time, if need create new
shadow page for gfn but it not allow unsync(level > 0), we should unsync all
gfn's unsync page
Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/kvm/mmu.c | 22 ++++++++++++++++++++--
1 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 13378e7..e0bb4d8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1199,6 +1199,23 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return 0;
}
+static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ struct hlist_head *bucket;
+ struct kvm_mmu_page *s;
+ struct hlist_node *node, *n;
+ unsigned index;
+
+ index = kvm_page_table_hashfn(gfn);
+ bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+ hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
+ if (s->gfn != gfn || !s->unsync)
+ continue;
+ WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+ kvm_sync_page(vcpu, s, true);
+ }
+}
+
struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
unsigned int idx[PT64_ROOT_LEVEL-1];
@@ -1334,8 +1351,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
trace_kvm_mmu_get_page(sp, false);
return sp;
}
- if (unsync_sp)
- kvm_sync_page(vcpu, unsync_sp, true);
+
+ if (!direct && level > PT_PAGE_TABLE_LEVEL && unsync_sp)
+ kvm_sync_pages(vcpu, gfn);
++vcpu->kvm->stat.mmu_cache_miss;
sp = kvm_mmu_alloc_page(vcpu, parent_pte);
--
1.6.1.2
On 04/22/2010 09:13 AM, Xiao Guangrong wrote:
> Allow more page become asynchronous at getting sp time, if need create new
> shadow page for gfn but it not allow unsync(level> 0), we should unsync all
> gfn's unsync page
>
This is something I wanted for a long time.
--
Do not meddle in the internals of kernels, for they are subtle and quick to panic.