2013-10-02 14:56:31

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH 0/7] KVM: x86: small MMU cleanups

A few small things (uninteresting callbacks or arguments, duplicate or
dead code) that I noticed while reviewing Gleb's latest nVMX series.

Paolo Bonzini (7):
KVM: mmu: remove uninteresting MMU "free" callbacks
KVM: mmu: remove uninteresting MMU "new_cr3" callbacks
KVM: mmu: unify destroy_kvm_mmu with kvm_mmu_unload
KVM: mmu: change useless int return types to void
KVM: mmu: remove argument to kvm_init_shadow_mmu and kvm_init_shadow_ept_mmu
KVM: mmu: remove ASSERT(vcpu)
KVM: mmu: replace assertions with MMU_WARN_ON

arch/x86/include/asm/kvm_host.h | 7 +-
arch/x86/kvm/mmu.c | 170 ++++++++++++----------------------------
arch/x86/kvm/mmu.h | 5 +-
arch/x86/kvm/svm.c | 10 +--
arch/x86/kvm/vmx.c | 8 +-
arch/x86/kvm/x86.c | 4 +-
6 files changed, 63 insertions(+), 141 deletions(-)

--
1.8.3.1


2013-10-02 14:56:36

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH 1/7] KVM: mmu: remove uninteresting MMU "free" callbacks

The free MMU callback has been a wrapper for mmu_free_roots since mmu_free_roots
itself was introduced (commit 17ac10a, [PATCH] KVM: MU: Special treatment
for shadow pae root pages, 2007-01-05), and has always been the same for all
MMU cases. Remove the indirection as it is useless.

Signed-off-by: Paolo Bonzini <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 1 -
arch/x86/kvm/mmu.c | 22 ++++------------------
2 files changed, 4 insertions(+), 19 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c76ff74..3ea4d40 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -261,7 +261,6 @@ struct kvm_mmu {
bool prefault);
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
- void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
struct x86_exception *exception);
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cf95cfe..1c4d580 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3424,18 +3424,12 @@ out_unlock:
return 0;
}

-static void nonpaging_free(struct kvm_vcpu *vcpu)
-{
- mmu_free_roots(vcpu);
-}
-
static int nonpaging_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
context->new_cr3 = nonpaging_new_cr3;
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
- context->free = nonpaging_free;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
@@ -3471,11 +3465,6 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
vcpu->arch.mmu.inject_page_fault(vcpu, fault);
}

-static void paging_free(struct kvm_vcpu *vcpu)
-{
- nonpaging_free(vcpu);
-}
-
static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
unsigned access, int *nr_present)
{
@@ -3683,7 +3672,6 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
context->update_pte = paging64_update_pte;
- context->free = paging_free;
context->shadow_root_level = level;
context->root_hpa = INVALID_PAGE;
context->direct_map = false;
@@ -3709,7 +3697,6 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
context->new_cr3 = paging_new_cr3;
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
- context->free = paging_free;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
context->update_pte = paging32_update_pte;
@@ -3732,7 +3719,6 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->base_role.word = 0;
context->new_cr3 = nonpaging_new_cr3;
context->page_fault = tdp_page_fault;
- context->free = nonpaging_free;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
@@ -3812,7 +3798,6 @@ int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
context->sync_page = ept_sync_page;
context->invlpg = ept_invlpg;
context->update_pte = ept_update_pte;
- context->free = paging_free;
context->root_level = context->shadow_root_level;
context->root_hpa = INVALID_PAGE;
context->direct_map = false;
@@ -3890,9 +3875,10 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
- if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
- /* mmu.free() should set root_hpa = INVALID_PAGE */
- vcpu->arch.mmu.free(vcpu);
+ if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+ mmu_free_roots(vcpu);
+ WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ }
}

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
--
1.8.3.1

2013-10-02 14:56:46

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH 5/7] KVM: mmu: remove argument to kvm_init_shadow_mmu and kvm_init_shadow_ept_mmu

The initialization function in mmu.c can always use walk_mmu, which
is known to be vcpu->arch.mmu. Only init_kvm_nested_mmu is used to
initialize vcpu->arch.nested_mmu.

Signed-off-by: Paolo Bonzini <[email protected]>
---
arch/x86/kvm/mmu.c | 15 +++++++++------
arch/x86/kvm/mmu.h | 5 ++---
arch/x86/kvm/svm.c | 4 ++--
arch/x86/kvm/vmx.c | 4 ++--
4 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 40772ef..ac598c8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3742,11 +3742,13 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
update_last_pte_bitmap(vcpu, context);
}

-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
+ struct kvm_mmu *context = vcpu->arch.walk_mmu;
+
ASSERT(vcpu);
- ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ ASSERT(!VALID_PAGE(context->root_hpa));

if (!is_paging(vcpu))
nonpaging_init_context(vcpu, context);
@@ -3765,11 +3767,12 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

-void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
- bool execonly)
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
{
+ struct kvm_mmu *context = vcpu->arch.walk_mmu;
+
ASSERT(vcpu);
- ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ ASSERT(!VALID_PAGE(context->root_hpa));

context->shadow_root_level = kvm_x86_ops->get_tdp_level();

@@ -3790,7 +3793,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
- kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
+ kvm_init_shadow_mmu(vcpu);
vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
vcpu->arch.walk_mmu->get_cr3 = get_cr3;
vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 2926152..c9d3d8f 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -70,9 +70,8 @@ enum {
};

int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
-void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
-void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
- bool execonly);
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);

static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c7168a5..37bcd6b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1961,8 +1961,8 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,

static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
- kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
-
+ WARN_ON(mmu_is_nested(vcpu));
+ kvm_init_shadow_mmu(vcpu);
vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2db9164..fdd1cb8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7501,9 +7501,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)

static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
- kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
+ WARN_ON(mmu_is_nested(vcpu));
+ kvm_init_shadow_ept_mmu(vcpu,
nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
-
vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
--
1.8.3.1

2013-10-02 14:56:53

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH 7/7] KVM: mmu: replace assertions with MMU_WARN_ON, a conditional WARN_ON

This makes the direction of the conditions consistent with code that
is already using WARN_ON.

Signed-off-by: Paolo Bonzini <[email protected]>
---
arch/x86/kvm/mmu.c | 42 ++++++++++++++----------------------------
1 file changed, 14 insertions(+), 28 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 699eab3..550e33d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -62,30 +62,16 @@ enum {
#undef MMU_DEBUG

#ifdef MMU_DEBUG
+static bool dbg = 0;
+module_param(dbg, bool, 0644);

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
-
+#define MMU_WARN_ON(x) WARN_ON(x)
#else
-
#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)
-
-#endif
-
-#ifdef MMU_DEBUG
-static bool dbg = 0;
-module_param(dbg, bool, 0644);
-#endif
-
-#ifndef MMU_DEBUG
-#define ASSERT(x) do { } while (0)
-#else
-#define ASSERT(x) \
- if (!(x)) { \
- printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
- __FILE__, __LINE__, #x); \
- }
+#define MMU_WARN_ON(x) do { } while (0)
#endif

#define PTE_PREFETCH_NUM 8
@@ -1533,7 +1519,7 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)

static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
{
- ASSERT(is_empty_shadow_page(sp->spt));
+ MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link);
list_del(&sp->link);
free_page((unsigned long)sp->spt);
@@ -3016,7 +3002,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];

- ASSERT(!VALID_PAGE(root));
+ MMU_WARN_ON(VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
make_mmu_pages_available(vcpu);
sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
@@ -3054,7 +3040,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;

- ASSERT(!VALID_PAGE(root));
+ MMU_WARN_ON(VALID_PAGE(root));

spin_lock(&vcpu->kvm->mmu_lock);
make_mmu_pages_available(vcpu);
@@ -3079,7 +3065,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];

- ASSERT(!VALID_PAGE(root));
+ MMU_WARN_ON(VALID_PAGE(root));
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
if (!is_present_gpte(pdptr)) {
@@ -3301,7 +3287,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
if (r)
return r;

- ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));

gfn = gva >> PAGE_SHIFT;

@@ -3367,7 +3353,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;

- ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));

if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, gpa, error_code, true);
@@ -3655,7 +3641,7 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);

- ASSERT(is_pae(vcpu));
+ MMU_WARN_ON(!is_pae(vcpu));
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
@@ -3745,7 +3731,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
struct kvm_mmu *context = vcpu->arch.walk_mmu;

- ASSERT(!VALID_PAGE(context->root_hpa));
+ MMU_WARN_ON(VALID_PAGE(context->root_hpa));

if (!is_paging(vcpu))
nonpaging_init_context(vcpu, context);
@@ -3768,7 +3754,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
{
struct kvm_mmu *context = vcpu->arch.walk_mmu;

- ASSERT(!VALID_PAGE(context->root_hpa));
+ MMU_WARN_ON(VALID_PAGE(context->root_hpa));

context->shadow_root_level = kvm_x86_ops->get_tdp_level();

@@ -4230,7 +4216,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)

void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
- ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));

init_kvm_mmu(vcpu);
}
--
1.8.3.1

2013-10-02 14:57:35

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH 6/7] KVM: mmu: remove ASSERT(vcpu)

Because ASSERT is just a printk, these would oops right away.
The assertion thus hardly adds anything.

Signed-off-by: Paolo Bonzini <[email protected]>
---
arch/x86/kvm/mmu.c | 13 -------------
1 file changed, 13 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ac598c8..699eab3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3301,7 +3301,6 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
if (r)
return r;

- ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));

gfn = gva >> PAGE_SHIFT;
@@ -3368,7 +3367,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;

- ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));

if (unlikely(error_code & PFERR_RSVD_MASK)) {
@@ -3747,7 +3745,6 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
struct kvm_mmu *context = vcpu->arch.walk_mmu;

- ASSERT(vcpu);
ASSERT(!VALID_PAGE(context->root_hpa));

if (!is_paging(vcpu))
@@ -3771,7 +3768,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
{
struct kvm_mmu *context = vcpu->arch.walk_mmu;

- ASSERT(vcpu);
ASSERT(!VALID_PAGE(context->root_hpa));

context->shadow_root_level = kvm_x86_ops->get_tdp_level();
@@ -3851,8 +3847,6 @@ static void init_kvm_mmu(struct kvm_vcpu *vcpu)

void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
-
kvm_mmu_unload(vcpu);
init_kvm_mmu(vcpu);
}
@@ -4208,8 +4202,6 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
struct page *page;
int i;

- ASSERT(vcpu);
-
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
* Therefore we need to allocate shadow page tables in the first
@@ -4228,8 +4220,6 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)

int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
-
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
vcpu->arch.mmu.translate_gpa = translate_gpa;
@@ -4240,7 +4230,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)

void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));

init_kvm_mmu(vcpu);
@@ -4528,8 +4517,6 @@ EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);

void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
- ASSERT(vcpu);
-
kvm_mmu_unload(vcpu);
free_mmu_pages(vcpu);
mmu_free_memory_caches(vcpu);
--
1.8.3.1

2013-10-02 14:56:40

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH 3/7] KVM: mmu: unify destroy_kvm_mmu with kvm_mmu_unload

They do the same thing, and destroy_kvm_mmu can be confused with
kvm_mmu_destroy.

Signed-off-by: Paolo Bonzini <[email protected]>
---
arch/x86/kvm/mmu.c | 14 ++++----------
1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index dff856c..d318d42 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3861,18 +3861,11 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
return init_kvm_softmmu(vcpu);
}

-static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
+int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
- if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
- mmu_free_roots(vcpu);
- WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
- }
-}

-int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
-{
- destroy_kvm_mmu(vcpu);
+ kvm_mmu_unload(vcpu);
return init_kvm_mmu(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
@@ -3898,6 +3891,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
mmu_free_roots(vcpu);
+ WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);

@@ -4548,7 +4542,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);

- destroy_kvm_mmu(vcpu);
+ kvm_mmu_unload(vcpu);
free_mmu_pages(vcpu);
mmu_free_memory_caches(vcpu);
}
--
1.8.3.1

2013-10-02 14:58:11

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH 4/7] KVM: mmu: change useless int return types to void

kvm_mmu initialization is mostly filling in function pointers, there is
no way for it to fail. Clean up unused return values.

Signed-off-by: Paolo Bonzini <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 4 +--
arch/x86/kvm/mmu.c | 71 ++++++++++++++++-------------------------
arch/x86/kvm/mmu.h | 4 +--
arch/x86/kvm/svm.c | 8 ++---
arch/x86/kvm/vmx.c | 6 ++--
arch/x86/kvm/x86.c | 2 +-
6 files changed, 37 insertions(+), 58 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 00a0e4c..94d9341 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -778,11 +778,11 @@ void kvm_mmu_module_exit(void);

void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
-int kvm_mmu_setup(struct kvm_vcpu *vcpu);
+void kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask);

-int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
+void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d318d42..40772ef 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3419,8 +3419,8 @@ out_unlock:
return 0;
}

-static int nonpaging_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void nonpaging_init_context(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -3432,7 +3432,6 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu,
context->root_hpa = INVALID_PAGE;
context->direct_map = true;
context->nx = false;
- return 0;
}

void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
@@ -3647,9 +3646,9 @@ static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
mmu->last_pte_bitmap = map;
}

-static int paging64_init_context_common(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context,
- int level)
+static void paging64_init_context_common(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context,
+ int level)
{
context->nx = is_nx(vcpu);
context->root_level = level;
@@ -3667,17 +3666,16 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
context->shadow_root_level = level;
context->root_hpa = INVALID_PAGE;
context->direct_map = false;
- return 0;
}

-static int paging64_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void paging64_init_context(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
- return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
+ paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
}

-static int paging32_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void paging32_init_context(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
context->nx = false;
context->root_level = PT32_ROOT_LEVEL;
@@ -3694,16 +3692,15 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->root_hpa = INVALID_PAGE;
context->direct_map = false;
- return 0;
}

-static int paging32E_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void paging32E_init_context(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
- return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
+ paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
}

-static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = vcpu->arch.walk_mmu;

@@ -3743,37 +3740,32 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)

update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);
-
- return 0;
}

-int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
- int r;
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));

if (!is_paging(vcpu))
- r = nonpaging_init_context(vcpu, context);
+ nonpaging_init_context(vcpu, context);
else if (is_long_mode(vcpu))
- r = paging64_init_context(vcpu, context);
+ paging64_init_context(vcpu, context);
else if (is_pae(vcpu))
- r = paging32E_init_context(vcpu, context);
+ paging32E_init_context(vcpu, context);
else
- r = paging32_init_context(vcpu, context);
+ paging32_init_context(vcpu, context);

vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
vcpu->arch.mmu.base_role.smep_andnot_wp
= smep && !is_write_protection(vcpu);
-
- return r;
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

-int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
bool execonly)
{
ASSERT(vcpu);
@@ -3793,24 +3785,19 @@ int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,

update_permission_bitmask(vcpu, context, true);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
-
- return 0;
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

-static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
+static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
- int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
-
+ kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
vcpu->arch.walk_mmu->get_cr3 = get_cr3;
vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
-
- return r;
}

-static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;

@@ -3847,11 +3834,9 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)

update_permission_bitmask(vcpu, g_context, false);
update_last_pte_bitmap(vcpu, g_context);
-
- return 0;
}

-static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+static void init_kvm_mmu(struct kvm_vcpu *vcpu)
{
if (mmu_is_nested(vcpu))
return init_kvm_nested_mmu(vcpu);
@@ -3861,12 +3846,12 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
return init_kvm_softmmu(vcpu);
}

-int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
+void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);

kvm_mmu_unload(vcpu);
- return init_kvm_mmu(vcpu);
+ init_kvm_mmu(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);

@@ -4250,12 +4235,12 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
return alloc_mmu_pages(vcpu);
}

-int kvm_mmu_setup(struct kvm_vcpu *vcpu)
+void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));

- return init_kvm_mmu(vcpu);
+ init_kvm_mmu(vcpu);
}

void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 77e044a..2926152 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -70,8 +70,8 @@ enum {
};

int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
-int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
-int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
+void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
bool execonly);

static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c0bc803..c7168a5 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1959,11 +1959,9 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
nested_svm_vmexit(svm);
}

-static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
+static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
- int r;
-
- r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
+ kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);

vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
@@ -1971,8 +1969,6 @@ static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu.shadow_root_level = get_npt_level();
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
-
- return r;
}

static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index be7fd0e..2db9164 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7499,9 +7499,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
return get_vmcs12(vcpu)->ept_pointer;
}

-static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
+static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
- int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
+ kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);

vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
@@ -7509,8 +7509,6 @@ static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;

vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
-
- return r;
}

static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4a33fdb..a9632ef 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6688,7 +6688,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (r)
return r;
kvm_vcpu_reset(vcpu);
- r = kvm_mmu_setup(vcpu);
+ kvm_mmu_setup(vcpu);
vcpu_put(vcpu);

return r;
--
1.8.3.1

2013-10-02 14:58:35

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH 2/7] KVM: mmu: remove uninteresting MMU "new_cr3" callbacks

The new_cr3 MMU callback has been a wrapper for mmu_free_roots since commit
e676505 (KVM: MMU: Force cr3 reload with two dimensional paging on mov
cr3 emulation, 2012-07-08).

The commit message mentioned that "mmu_free_roots() is somewhat of an overkill,
but fixing that is more complicated and will be done after this minimal fix".
One year has passed, and no one really felt the need to do a different fix.
Wrap the call with a kvm_mmu_new_cr3 function for clarity, but remove the
callback.

Signed-off-by: Paolo Bonzini <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/mmu.c | 13 +------------
arch/x86/kvm/x86.c | 2 +-
3 files changed, 3 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3ea4d40..00a0e4c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -253,7 +253,6 @@ struct kvm_pio_request {
* mode.
*/
struct kvm_mmu {
- void (*new_cr3)(struct kvm_vcpu *vcpu);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
@@ -921,6 +920,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);

void kvm_enable_tdp(void);
void kvm_disable_tdp(void);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1c4d580..dff856c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2570,11 +2570,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
kvm_release_pfn_clean(pfn);
}

-static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
-{
- mmu_free_roots(vcpu);
-}
-
static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
{
@@ -3427,7 +3422,6 @@ out_unlock:
static int nonpaging_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
- context->new_cr3 = nonpaging_new_cr3;
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->sync_page = nonpaging_sync_page;
@@ -3448,9 +3442,8 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);

-static void paging_new_cr3(struct kvm_vcpu *vcpu)
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
{
- pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
mmu_free_roots(vcpu);
}

@@ -3666,7 +3659,6 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
update_last_pte_bitmap(vcpu, context);

ASSERT(is_pae(vcpu));
- context->new_cr3 = paging_new_cr3;
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
@@ -3694,7 +3686,6 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);

- context->new_cr3 = paging_new_cr3;
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
context->sync_page = paging32_sync_page;
@@ -3717,7 +3708,6 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
struct kvm_mmu *context = vcpu->arch.walk_mmu;

context->base_role.word = 0;
- context->new_cr3 = nonpaging_new_cr3;
context->page_fault = tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
@@ -3792,7 +3782,6 @@ int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
context->shadow_root_level = kvm_x86_ops->get_tdp_level();

context->nx = true;
- context->new_cr3 = paging_new_cr3;
context->page_fault = ept_page_fault;
context->gva_to_gpa = ept_gva_to_gpa;
context->sync_page = ept_sync_page;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 187f824..4a33fdb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -684,7 +684,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)

vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
- vcpu->arch.mmu.new_cr3(vcpu);
+ kvm_mmu_new_cr3(vcpu);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);
--
1.8.3.1

2013-10-03 10:23:11

by Gleb Natapov

[permalink] [raw]
Subject: Re: [PATCH 2/7] KVM: mmu: remove uninteresting MMU "new_cr3" callbacks

On Wed, Oct 02, 2013 at 04:56:11PM +0200, Paolo Bonzini wrote:
> The new_cr3 MMU callback has been a wrapper for mmu_free_roots since commit
> e676505 (KVM: MMU: Force cr3 reload with two dimensional paging on mov
> cr3 emulation, 2012-07-08).
>
> The commit message mentioned that "mmu_free_roots() is somewhat of an overkill,
> but fixing that is more complicated and will be done after this minimal fix".
> One year has passed, and no one really felt the need to do a different fix.
> Wrap the call with a kvm_mmu_new_cr3 function for clarity, but remove the
> callback.
>
Well the reason for it is because whoever knew about the problem found
other things to do meanwhile :) But do we really need something smarter
here anyway? The situation described in e676505 should be extremely rare
even on non unrestricted guest HW and it happens during slow emulation
anyway, so optimization will likely not be noticeable.

> Signed-off-by: Paolo Bonzini <[email protected]>
> ---
> arch/x86/include/asm/kvm_host.h | 2 +-
> arch/x86/kvm/mmu.c | 13 +------------
> arch/x86/kvm/x86.c | 2 +-
> 3 files changed, 3 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 3ea4d40..00a0e4c 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -253,7 +253,6 @@ struct kvm_pio_request {
> * mode.
> */
> struct kvm_mmu {
> - void (*new_cr3)(struct kvm_vcpu *vcpu);
> void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
> unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
> u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
> @@ -921,6 +920,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
> int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
> void *insn, int insn_len);
> void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
> +void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
>
> void kvm_enable_tdp(void);
> void kvm_disable_tdp(void);
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 1c4d580..dff856c 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2570,11 +2570,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
> kvm_release_pfn_clean(pfn);
> }
>
> -static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
> -{
> - mmu_free_roots(vcpu);
> -}
> -
> static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
> bool no_dirty_log)
> {
> @@ -3427,7 +3422,6 @@ out_unlock:
> static int nonpaging_init_context(struct kvm_vcpu *vcpu,
> struct kvm_mmu *context)
> {
> - context->new_cr3 = nonpaging_new_cr3;
> context->page_fault = nonpaging_page_fault;
> context->gva_to_gpa = nonpaging_gva_to_gpa;
> context->sync_page = nonpaging_sync_page;
> @@ -3448,9 +3442,8 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
> }
> EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
>
> -static void paging_new_cr3(struct kvm_vcpu *vcpu)
> +void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
> {
> - pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
> mmu_free_roots(vcpu);
> }
>
> @@ -3666,7 +3659,6 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
> update_last_pte_bitmap(vcpu, context);
>
> ASSERT(is_pae(vcpu));
> - context->new_cr3 = paging_new_cr3;
> context->page_fault = paging64_page_fault;
> context->gva_to_gpa = paging64_gva_to_gpa;
> context->sync_page = paging64_sync_page;
> @@ -3694,7 +3686,6 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
> update_permission_bitmask(vcpu, context, false);
> update_last_pte_bitmap(vcpu, context);
>
> - context->new_cr3 = paging_new_cr3;
> context->page_fault = paging32_page_fault;
> context->gva_to_gpa = paging32_gva_to_gpa;
> context->sync_page = paging32_sync_page;
> @@ -3717,7 +3708,6 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
> struct kvm_mmu *context = vcpu->arch.walk_mmu;
>
> context->base_role.word = 0;
> - context->new_cr3 = nonpaging_new_cr3;
> context->page_fault = tdp_page_fault;
> context->sync_page = nonpaging_sync_page;
> context->invlpg = nonpaging_invlpg;
> @@ -3792,7 +3782,6 @@ int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
> context->shadow_root_level = kvm_x86_ops->get_tdp_level();
>
> context->nx = true;
> - context->new_cr3 = paging_new_cr3;
> context->page_fault = ept_page_fault;
> context->gva_to_gpa = ept_gva_to_gpa;
> context->sync_page = ept_sync_page;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 187f824..4a33fdb 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -684,7 +684,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
>
> vcpu->arch.cr3 = cr3;
> __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
> - vcpu->arch.mmu.new_cr3(vcpu);
> + kvm_mmu_new_cr3(vcpu);
> return 0;
> }
> EXPORT_SYMBOL_GPL(kvm_set_cr3);
> --
> 1.8.3.1
>

--
Gleb.

2013-10-03 11:26:02

by Gleb Natapov

[permalink] [raw]
Subject: Re: [PATCH 5/7] KVM: mmu: remove argument to kvm_init_shadow_mmu and kvm_init_shadow_ept_mmu

On Wed, Oct 02, 2013 at 04:56:14PM +0200, Paolo Bonzini wrote:
> The initialization function in mmu.c can always use walk_mmu, which
> is known to be vcpu->arch.mmu. Only init_kvm_nested_mmu is used to
> initialize vcpu->arch.nested_mmu.
>
> Signed-off-by: Paolo Bonzini <[email protected]>
> ---
> arch/x86/kvm/mmu.c | 15 +++++++++------
> arch/x86/kvm/mmu.h | 5 ++---
> arch/x86/kvm/svm.c | 4 ++--
> arch/x86/kvm/vmx.c | 4 ++--
> 4 files changed, 15 insertions(+), 13 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 40772ef..ac598c8 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3742,11 +3742,13 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
> update_last_pte_bitmap(vcpu, context);
> }
>
> -void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
> +void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
> {
> bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
> + struct kvm_mmu *context = vcpu->arch.walk_mmu;
I'd rather use &vcpu->arch.mmu here.

> +
> ASSERT(vcpu);
> - ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
> + ASSERT(!VALID_PAGE(context->root_hpa));
>
> if (!is_paging(vcpu))
> nonpaging_init_context(vcpu, context);
> @@ -3765,11 +3767,12 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
> }
> EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
>
> -void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
> - bool execonly)
> +void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
> {
> + struct kvm_mmu *context = vcpu->arch.walk_mmu;
> +
> ASSERT(vcpu);
> - ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
> + ASSERT(!VALID_PAGE(context->root_hpa));
>
> context->shadow_root_level = kvm_x86_ops->get_tdp_level();
>
> @@ -3790,7 +3793,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
>
> static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
> {
> - kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
> + kvm_init_shadow_mmu(vcpu);
> vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
> vcpu->arch.walk_mmu->get_cr3 = get_cr3;
> vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
And change walk_mmu to mmu here too for consistency with all other
places. Basically if you want to initialize use mmu or nested_mmu.
Use walk_mmu pointer only when you need to use mmu.

> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 2926152..c9d3d8f 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -70,9 +70,8 @@ enum {
> };
>
> int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
> -void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
> -void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
> - bool execonly);
> +void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
> +void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
>
> static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
> {
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index c7168a5..37bcd6b 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1961,8 +1961,8 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
>
> static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
> {
> - kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
> -
> + WARN_ON(mmu_is_nested(vcpu));
> + kvm_init_shadow_mmu(vcpu);
> vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
> vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
> vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 2db9164..fdd1cb8 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -7501,9 +7501,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
>
> static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
> {
> - kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
> + WARN_ON(mmu_is_nested(vcpu));
> + kvm_init_shadow_ept_mmu(vcpu,
> nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
> -
> vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
> vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
> vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
> --
> 1.8.3.1
>

--
Gleb.

2013-10-03 11:50:52

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH 5/7] KVM: mmu: remove argument to kvm_init_shadow_mmu and kvm_init_shadow_ept_mmu

Il 03/10/2013 13:25, Gleb Natapov ha scritto:
> On Wed, Oct 02, 2013 at 04:56:14PM +0200, Paolo Bonzini wrote:
>> The initialization function in mmu.c can always use walk_mmu, which
>> is known to be vcpu->arch.mmu. Only init_kvm_nested_mmu is used to
>> initialize vcpu->arch.nested_mmu.
>>
>> Signed-off-by: Paolo Bonzini <[email protected]>
>> ---
>> arch/x86/kvm/mmu.c | 15 +++++++++------
>> arch/x86/kvm/mmu.h | 5 ++---
>> arch/x86/kvm/svm.c | 4 ++--
>> arch/x86/kvm/vmx.c | 4 ++--
>> 4 files changed, 15 insertions(+), 13 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index 40772ef..ac598c8 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -3742,11 +3742,13 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
>> update_last_pte_bitmap(vcpu, context);
>> }
>>
>> -void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
>> +void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
>> {
>> bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
>> + struct kvm_mmu *context = vcpu->arch.walk_mmu;
> I'd rather use &vcpu->arch.mmu here.
>
>> +
>> ASSERT(vcpu);
>> - ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
>> + ASSERT(!VALID_PAGE(context->root_hpa));
>>
>> if (!is_paging(vcpu))
>> nonpaging_init_context(vcpu, context);
>> @@ -3765,11 +3767,12 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
>> }
>> EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
>>
>> -void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
>> - bool execonly)
>> +void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
>> {
>> + struct kvm_mmu *context = vcpu->arch.walk_mmu;
>> +
>> ASSERT(vcpu);
>> - ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
>> + ASSERT(!VALID_PAGE(context->root_hpa));
>>
>> context->shadow_root_level = kvm_x86_ops->get_tdp_level();
>>
>> @@ -3790,7 +3793,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
>>
>> static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
>> {
>> - kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
>> + kvm_init_shadow_mmu(vcpu);
>> vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
>> vcpu->arch.walk_mmu->get_cr3 = get_cr3;
>> vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
> And change walk_mmu to mmu here too for consistency with all other
> places. Basically if you want to initialize use mmu or nested_mmu.
> Use walk_mmu pointer only when you need to use mmu.

Makes sense, especially considering how kvm_init_shadow_mmu initializes
vcpu->arch.mmu.base_role directly.

Something like this (large enough that I'll probably make it a separate
patch in v2)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ac598c8..d1f53cf 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3702,7 +3704,7 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,

static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
- struct kvm_mmu *context = vcpu->arch.walk_mmu;
+ struct kvm_mmu *context = &vcpu->arch.mmu;

context->base_role.word = 0;
context->page_fault = tdp_page_fault;
@@ -3745,7 +3747,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
- struct kvm_mmu *context = vcpu->arch.walk_mmu;
+ struct kvm_mmu *context = &vcpu->arch.mmu;

ASSERT(vcpu);
ASSERT(!VALID_PAGE(context->root_hpa));
@@ -3759,17 +3761,17 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
else
paging32_init_context(vcpu, context);

- vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
- vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
- vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
- vcpu->arch.mmu.base_role.smep_andnot_wp
+ context->base_role.nxe = is_nx(vcpu);
+ context->base_role.cr4_pae = !!is_pae(vcpu);
+ context->base_role.cr0_wp = is_write_protection(vcpu);
+ context->base_role.smep_andnot_wp
= smep && !is_write_protection(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
{
- struct kvm_mmu *context = vcpu->arch.walk_mmu;
+ struct kvm_mmu *context = &vcpu->arch.mmu;

ASSERT(vcpu);
ASSERT(!VALID_PAGE(context->root_hpa));
@@ -3793,11 +3795,13 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
+ struct kvm_mmu *context = &vcpu->arch.mmu;
+
kvm_init_shadow_mmu(vcpu);
- vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
- vcpu->arch.walk_mmu->get_cr3 = get_cr3;
- vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+ context->set_cr3 = kvm_x86_ops->set_cr3;
+ context->get_cr3 = get_cr3;
+ context->get_pdptr = kvm_pdptr_read;
+ context->inject_page_fault = kvm_inject_page_fault;
}

static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)

How far should I go? Should I also remove the context argument from
nonpaging_init_context and friends, changing it to a local variable?
(Doesn't seem like a big improvement in clarity).

Paolo

>> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
>> index 2926152..c9d3d8f 100644
>> --- a/arch/x86/kvm/mmu.h
>> +++ b/arch/x86/kvm/mmu.h
>> @@ -70,9 +70,8 @@ enum {
>> };
>>
>> int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
>> -void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
>> -void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
>> - bool execonly);
>> +void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
>> +void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
>>
>> static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
>> {
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index c7168a5..37bcd6b 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
>> @@ -1961,8 +1961,8 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
>>
>> static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
>> {
>> - kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
>> -
>> + WARN_ON(mmu_is_nested(vcpu));
>> + kvm_init_shadow_mmu(vcpu);
>> vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
>> vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
>> vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index 2db9164..fdd1cb8 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -7501,9 +7501,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
>>
>> static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
>> {
>> - kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
>> + WARN_ON(mmu_is_nested(vcpu));
>> + kvm_init_shadow_ept_mmu(vcpu,
>> nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
>> -
>> vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
>> vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
>> vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
>> --
>> 1.8.3.1
>>
>
> --
> Gleb.
>

2013-10-03 11:56:13

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH 2/7] KVM: mmu: remove uninteresting MMU "new_cr3" callbacks

Il 03/10/2013 12:23, Gleb Natapov ha scritto:
>> > The commit message mentioned that "mmu_free_roots() is somewhat of an overkill,
>> > but fixing that is more complicated and will be done after this minimal fix".
>> > One year has passed, and no one really felt the need to do a different fix.
>> > Wrap the call with a kvm_mmu_new_cr3 function for clarity, but remove the
>> > callback.
>> >
> Well the reason for it is because whoever knew about the problem found
> other things to do meanwhile :) But do we really need something smarter
> here anyway? The situation described in e676505 should be extremely rare
> even on non unrestricted guest HW and it happens during slow emulation
> anyway, so optimization will likely not be noticeable.

Yeah, and it's not like code that runs without paging will move to cr3
in a hot loop. :)

Paolo

2013-10-03 12:11:29

by Gleb Natapov

[permalink] [raw]
Subject: Re: [PATCH 5/7] KVM: mmu: remove argument to kvm_init_shadow_mmu and kvm_init_shadow_ept_mmu

On Thu, Oct 03, 2013 at 01:51:09PM +0200, Paolo Bonzini wrote:
> Il 03/10/2013 13:25, Gleb Natapov ha scritto:
> > On Wed, Oct 02, 2013 at 04:56:14PM +0200, Paolo Bonzini wrote:
> >> The initialization function in mmu.c can always use walk_mmu, which
> >> is known to be vcpu->arch.mmu. Only init_kvm_nested_mmu is used to
> >> initialize vcpu->arch.nested_mmu.
> >>
> >> Signed-off-by: Paolo Bonzini <[email protected]>
> >> ---
> >> arch/x86/kvm/mmu.c | 15 +++++++++------
> >> arch/x86/kvm/mmu.h | 5 ++---
> >> arch/x86/kvm/svm.c | 4 ++--
> >> arch/x86/kvm/vmx.c | 4 ++--
> >> 4 files changed, 15 insertions(+), 13 deletions(-)
> >>
> >> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> >> index 40772ef..ac598c8 100644
> >> --- a/arch/x86/kvm/mmu.c
> >> +++ b/arch/x86/kvm/mmu.c
> >> @@ -3742,11 +3742,13 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
> >> update_last_pte_bitmap(vcpu, context);
> >> }
> >>
> >> -void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
> >> +void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
> >> {
> >> bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
> >> + struct kvm_mmu *context = vcpu->arch.walk_mmu;
> > I'd rather use &vcpu->arch.mmu here.
> >
> >> +
> >> ASSERT(vcpu);
> >> - ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
> >> + ASSERT(!VALID_PAGE(context->root_hpa));
> >>
> >> if (!is_paging(vcpu))
> >> nonpaging_init_context(vcpu, context);
> >> @@ -3765,11 +3767,12 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
> >> }
> >> EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
> >>
> >> -void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
> >> - bool execonly)
> >> +void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
> >> {
> >> + struct kvm_mmu *context = vcpu->arch.walk_mmu;
> >> +
> >> ASSERT(vcpu);
> >> - ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
> >> + ASSERT(!VALID_PAGE(context->root_hpa));
> >>
> >> context->shadow_root_level = kvm_x86_ops->get_tdp_level();
> >>
> >> @@ -3790,7 +3793,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
> >>
> >> static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
> >> {
> >> - kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
> >> + kvm_init_shadow_mmu(vcpu);
> >> vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
> >> vcpu->arch.walk_mmu->get_cr3 = get_cr3;
> >> vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
> > And change walk_mmu to mmu here too for consistency with all other
> > places. Basically if you want to initialize use mmu or nested_mmu.
> > Use walk_mmu pointer only when you need to use mmu.
>
> Makes sense, especially considering how kvm_init_shadow_mmu initializes
> vcpu->arch.mmu.base_role directly.
>
> Something like this (large enough that I'll probably make it a separate
> patch in v2)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index ac598c8..d1f53cf 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3702,7 +3704,7 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
>
> static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
> {
> - struct kvm_mmu *context = vcpu->arch.walk_mmu;
> + struct kvm_mmu *context = &vcpu->arch.mmu;
>
> context->base_role.word = 0;
> context->page_fault = tdp_page_fault;
> @@ -3745,7 +3747,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
> void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
> {
> bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
> - struct kvm_mmu *context = vcpu->arch.walk_mmu;
> + struct kvm_mmu *context = &vcpu->arch.mmu;
>
> ASSERT(vcpu);
> ASSERT(!VALID_PAGE(context->root_hpa));
> @@ -3759,17 +3761,17 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
> else
> paging32_init_context(vcpu, context);
>
> - vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
> - vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
> - vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
> - vcpu->arch.mmu.base_role.smep_andnot_wp
> + context->base_role.nxe = is_nx(vcpu);
> + context->base_role.cr4_pae = !!is_pae(vcpu);
> + context->base_role.cr0_wp = is_write_protection(vcpu);
> + context->base_role.smep_andnot_wp
> = smep && !is_write_protection(vcpu);
> }
> EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
>
> void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
> {
> - struct kvm_mmu *context = vcpu->arch.walk_mmu;
> + struct kvm_mmu *context = &vcpu->arch.mmu;
>
> ASSERT(vcpu);
> ASSERT(!VALID_PAGE(context->root_hpa));
> @@ -3793,11 +3795,13 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
>
> static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
> {
> + struct kvm_mmu *context = &vcpu->arch.mmu;
> +
> kvm_init_shadow_mmu(vcpu);
> - vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
> - vcpu->arch.walk_mmu->get_cr3 = get_cr3;
> - vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
> - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
> + context->set_cr3 = kvm_x86_ops->set_cr3;
> + context->get_cr3 = get_cr3;
> + context->get_pdptr = kvm_pdptr_read;
> + context->inject_page_fault = kvm_inject_page_fault;
> }
>
> static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
Yes.

>
> How far should I go? Should I also remove the context argument from
> nonpaging_init_context and friends, changing it to a local variable?
> (Doesn't seem like a big improvement in clarity).
>
If it does not no need to do it. Hard to judge for me without trying.

--
Gleb.

2013-10-03 12:46:50

by Gleb Natapov

[permalink] [raw]
Subject: Re: [PATCH 0/7] KVM: x86: small MMU cleanups

On Wed, Oct 02, 2013 at 04:56:09PM +0200, Paolo Bonzini wrote:
> A few small things (uninteresting callbacks or arguments, duplicate or
> dead code) that I noticed while reviewing Gleb's latest nVMX series.
>
I applied 1-4 meanwhile.

> Paolo Bonzini (7):
> KVM: mmu: remove uninteresting MMU "free" callbacks
> KVM: mmu: remove uninteresting MMU "new_cr3" callbacks
> KVM: mmu: unify destroy_kvm_mmu with kvm_mmu_unload
> KVM: mmu: change useless int return types to void
> KVM: mmu: remove argument to kvm_init_shadow_mmu and kvm_init_shadow_ept_mmu
> KVM: mmu: remove ASSERT(vcpu)
> KVM: mmu: replace assertions with MMU_WARN_ON
>
> arch/x86/include/asm/kvm_host.h | 7 +-
> arch/x86/kvm/mmu.c | 170 ++++++++++++----------------------------
> arch/x86/kvm/mmu.h | 5 +-
> arch/x86/kvm/svm.c | 10 +--
> arch/x86/kvm/vmx.c | 8 +-
> arch/x86/kvm/x86.c | 4 +-
> 6 files changed, 63 insertions(+), 141 deletions(-)
>
> --
> 1.8.3.1

--
Gleb.