Introduce mmu_topup_memory_cache_atomic(), it support topup memory
cache in atomic context.
And, when we prefetch pte, we need topup the rmap's cache
Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/kvm/mmu.c | 29 +++++++++++++++++++++++++----
1 files changed, 25 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 734b106..92ff099 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -290,15 +290,16 @@ static void __set_spte(u64 *sptep, u64 spte)
#endif
}
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
- struct kmem_cache *base_cache, int min)
+static int __mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+ struct kmem_cache *base_cache, int min,
+ int max, gfp_t flags)
{
void *obj;
if (cache->nobjs >= min)
return 0;
- while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
- obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
+ while (cache->nobjs < max) {
+ obj = kmem_cache_zalloc(base_cache, flags);
if (!obj)
return -ENOMEM;
cache->objects[cache->nobjs++] = obj;
@@ -306,6 +307,26 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
return 0;
}
+static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
+ struct kmem_cache *base_cache, int min)
+{
+ return __mmu_topup_memory_cache(cache, base_cache, min,
+ ARRAY_SIZE(cache->objects), GFP_KERNEL);
+}
+
+static int mmu_topup_memory_cache_atomic(struct kvm_mmu_memory_cache *cache,
+ struct kmem_cache *base_cache, int min)
+{
+ return __mmu_topup_memory_cache(cache, base_cache, min, min,
+ GFP_ATOMIC);
+}
+
+static int pte_prefetch_topup_memory_cache(struct kvm_vcpu *vcpu)
+{
+ return mmu_topup_memory_cache_atomic(&vcpu->arch.mmu_rmap_desc_cache,
+ rmap_desc_cache, 1);
+}
+
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
struct kmem_cache *cache)
{
--
1.6.1.2