2020-06-22 22:00:52

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 0/4] KVM: x86: nVMX: Nested PML bug fix and cleanup

Fix for a theoretical bug in nested PML emulation, and additional clean up
of the related code.

Tested by doing a few rounds of intra-VM migration (same L1) of an L2
guest with PML confirmed enabled in L1.

This has a trivial conflict with patch 3 of the MMU files series[*], both
remove function prototypes from mmu.h.

[*] https://lkml.kernel.org/r/[email protected]

Sean Christopherson (4):
KVM: nVMX: Plumb L2 GPA through to PML emulation
KVM: x86/mmu: Drop kvm_arch_write_log_dirty() wrapper
KVM: nVMX: WARN if PML emulation helper is invoked outside of nested
guest
KVM: x86/mmu: Make .write_log_dirty a nested operation

arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/mmu.h | 1 -
arch/x86/kvm/mmu/mmu.c | 15 -------------
arch/x86/kvm/mmu/paging_tmpl.h | 7 +++---
arch/x86/kvm/vmx/nested.c | 38 +++++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.c | 37 --------------------------------
6 files changed, 43 insertions(+), 57 deletions(-)

--
2.26.0


2020-06-22 22:00:56

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 1/4] KVM: nVMX: Plumb L2 GPA through to PML emulation

Explicitly pass the L2 GPA to kvm_arch_write_log_dirty(), which for all
intents and purposes is vmx_write_pml_buffer(), instead of having the
latter pull the GPA from vmcs.GUEST_PHYSICAL_ADDRESS. If the dirty bit
update is the result of KVM emulation (rare for L2), then the GPA in the
VMCS may be stale and/or hold a completely unrelated GPA.

Fixes: c5f983f6e8455 ("nVMX: Implement emulated Page Modification Logging")
Cc: [email protected]
Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/mmu.h | 2 +-
arch/x86/kvm/mmu/mmu.c | 4 ++--
arch/x86/kvm/mmu/paging_tmpl.h | 7 ++++---
arch/x86/kvm/vmx/vmx.c | 6 +++---
5 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f8998e97457f..446ea70a554d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1220,7 +1220,7 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask);
- int (*write_log_dirty)(struct kvm_vcpu *vcpu);
+ int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);

/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 0ad06bfe2c2c..444bb9c54548 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -222,7 +222,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);

int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index fdd05c233308..76817d13c86e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1745,10 +1745,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
* Emulate arch specific page modification logging for the
* nested hypervisor
*/
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
{
if (kvm_x86_ops.write_log_dirty)
- return kvm_x86_ops.write_log_dirty(vcpu);
+ return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa);

return 0;
}
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 38c576495048..cddd40029553 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -235,7 +235,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
struct guest_walker *walker,
- int write_fault)
+ gpa_t addr, int write_fault)
{
unsigned level, index;
pt_element_t pte, orig_pte;
@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
!(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
#if PTTYPE == PTTYPE_EPT
- if (kvm_arch_write_log_dirty(vcpu))
+ if (kvm_arch_write_log_dirty(vcpu, addr))
return -EINVAL;
#endif
pte |= PT_GUEST_DIRTY_MASK;
@@ -457,7 +457,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);

if (unlikely(!accessed_dirty)) {
- ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
+ ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
+ addr, write_fault);
if (unlikely(ret < 0))
goto error;
else if (ret)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 08e26a9518c2..a2e7e106cc8f 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7501,11 +7501,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
kvm_flush_pml_buffers(kvm);
}

-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
+static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- gpa_t gpa, dst;
+ gpa_t dst;

if (is_guest_mode(vcpu)) {
WARN_ON_ONCE(vmx->nested.pml_full);
@@ -7524,7 +7524,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
return 1;
}

- gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+ gpa &= ~0xFFFull;
dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;

if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
--
2.26.0

2020-06-22 22:03:23

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 4/4] KVM: x86/mmu: Make .write_log_dirty a nested operation

Move .write_log_dirty() into kvm_x86_nested_ops to help differentiate it
from the non-nested dirty log hooks. And because it's a nested-only
operation.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
arch/x86/kvm/vmx/nested.c | 38 +++++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.c | 38 ---------------------------------
4 files changed, 40 insertions(+), 40 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 446ea70a554d..4e6219cb3933 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1220,7 +1220,6 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask);
- int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);

/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
@@ -1281,6 +1280,7 @@ struct kvm_x86_nested_ops {
struct kvm_nested_state __user *user_kvm_nested_state,
struct kvm_nested_state *kvm_state);
bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+ int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);

int (*enable_evmcs)(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 60e7b2308876..c733196fd45b 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
!(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
#if PTTYPE == PTTYPE_EPT
- if (kvm_x86_ops.write_log_dirty(vcpu, addr))
+ if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr))
return -EINVAL;
#endif
pte |= PT_GUEST_DIRTY_MASK;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index adb11b504d5c..db9abcbeefd1 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3205,6 +3205,43 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
return true;
}

+static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+ struct vmcs12 *vmcs12;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ gpa_t dst;
+
+ if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
+ return 0;
+
+ if (WARN_ON_ONCE(vmx->nested.pml_full))
+ return 1;
+
+ /*
+ * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
+ * set is already checked as part of A/D emulation.
+ */
+ vmcs12 = get_vmcs12(vcpu);
+ if (!nested_cpu_has_pml(vmcs12))
+ return 0;
+
+ if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
+ vmx->nested.pml_full = true;
+ return 1;
+ }
+
+ gpa &= ~0xFFFull;
+ dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
+
+ if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
+ offset_in_page(dst), sizeof(gpa)))
+ return 0;
+
+ vmcs12->guest_pml_index--;
+
+ return 0;
+}
+
/*
* Intel's VMX Instruction Reference specifies a common set of prerequisites
* for running VMX instructions (except VMXON, whose prerequisites are
@@ -6503,6 +6540,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
.get_vmcs12_pages = nested_get_vmcs12_pages,
+ .write_log_dirty = nested_vmx_write_pml_buffer,
.enable_evmcs = nested_enable_evmcs,
.get_evmcs_version = nested_get_evmcs_version,
};
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index adf83047bb21..8bf06a59f356 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7501,43 +7501,6 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
kvm_flush_pml_buffers(kvm);
}

-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
-{
- struct vmcs12 *vmcs12;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- gpa_t dst;
-
- if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
- return 0;
-
- if (WARN_ON_ONCE(vmx->nested.pml_full))
- return 1;
-
- /*
- * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
- * set is already checked as part of A/D emulation.
- */
- vmcs12 = get_vmcs12(vcpu);
- if (!nested_cpu_has_pml(vmcs12))
- return 0;
-
- if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
- vmx->nested.pml_full = true;
- return 1;
- }
-
- gpa &= ~0xFFFull;
- dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
-
- if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
- offset_in_page(dst), sizeof(gpa)))
- return 0;
-
- vmcs12->guest_pml_index--;
-
- return 0;
-}
-
static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *memslot,
gfn_t offset, unsigned long mask)
@@ -7966,7 +7929,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.slot_disable_log_dirty = vmx_slot_disable_log_dirty,
.flush_log_dirty = vmx_flush_log_dirty,
.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
- .write_log_dirty = vmx_write_pml_buffer,

.pre_block = vmx_pre_block,
.post_block = vmx_post_block,
--
2.26.0

2020-06-22 22:04:00

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 3/4] KVM: nVMX: WARN if PML emulation helper is invoked outside of nested guest

WARN if vmx_write_pml_buffer() is called outside of guest mode instead
of silently ignoring the condition. The only caller is nested EPT's
ept_update_accessed_dirty_bits(), which should only be reachable when
L2 is active.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/vmx/vmx.c | 45 +++++++++++++++++++++---------------------
1 file changed, 23 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index a2e7e106cc8f..adf83047bb21 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7507,33 +7507,34 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t dst;

- if (is_guest_mode(vcpu)) {
- WARN_ON_ONCE(vmx->nested.pml_full);
+ if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
+ return 0;

- /*
- * Check if PML is enabled for the nested guest.
- * Whether eptp bit 6 is set is already checked
- * as part of A/D emulation.
- */
- vmcs12 = get_vmcs12(vcpu);
- if (!nested_cpu_has_pml(vmcs12))
- return 0;
+ if (WARN_ON_ONCE(vmx->nested.pml_full))
+ return 1;

- if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
- vmx->nested.pml_full = true;
- return 1;
- }
+ /*
+ * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
+ * set is already checked as part of A/D emulation.
+ */
+ vmcs12 = get_vmcs12(vcpu);
+ if (!nested_cpu_has_pml(vmcs12))
+ return 0;

- gpa &= ~0xFFFull;
- dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
-
- if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
- offset_in_page(dst), sizeof(gpa)))
- return 0;
-
- vmcs12->guest_pml_index--;
+ if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
+ vmx->nested.pml_full = true;
+ return 1;
}

+ gpa &= ~0xFFFull;
+ dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
+
+ if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
+ offset_in_page(dst), sizeof(gpa)))
+ return 0;
+
+ vmcs12->guest_pml_index--;
+
return 0;
}

--
2.26.0

2020-06-22 22:04:28

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 2/4] KVM: x86/mmu: Drop kvm_arch_write_log_dirty() wrapper

Drop kvm_arch_write_log_dirty() in favor of invoking .write_log_dirty()
directly from FNAME(update_accessed_dirty_bits). "kvm_arch" is usually
used for x86 functions that are invoked from generic KVM, and implies
that there are external callers, neither of which is true.

Remove the check for a non-NULL kvm_x86_ops hook as the call is wrapped
in PTTYPE_EPT and is unconditionally set by VMX.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/mmu.h | 1 -
arch/x86/kvm/mmu/mmu.c | 15 ---------------
arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
3 files changed, 1 insertion(+), 17 deletions(-)

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 444bb9c54548..81cafc937cfb 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -222,7 +222,6 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);

int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 76817d13c86e..a25427db1bb7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1738,21 +1738,6 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}

-/**
- * kvm_arch_write_log_dirty - emulate dirty page logging
- * @vcpu: Guest mode vcpu
- *
- * Emulate arch specific page modification logging for the
- * nested hypervisor
- */
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
-{
- if (kvm_x86_ops.write_log_dirty)
- return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa);
-
- return 0;
-}
-
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn)
{
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index cddd40029553..60e7b2308876 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
!(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
#if PTTYPE == PTTYPE_EPT
- if (kvm_arch_write_log_dirty(vcpu, addr))
+ if (kvm_x86_ops.write_log_dirty(vcpu, addr))
return -EINVAL;
#endif
pte |= PT_GUEST_DIRTY_MASK;
--
2.26.0

2020-06-22 22:31:20

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH 4/4] KVM: x86/mmu: Make .write_log_dirty a nested operation

On 22/06/20 23:58, Sean Christopherson wrote:
> Move .write_log_dirty() into kvm_x86_nested_ops to help differentiate it
> from the non-nested dirty log hooks. And because it's a nested-only
> operation.
>
> Signed-off-by: Sean Christopherson <[email protected]>
> ---
> arch/x86/include/asm/kvm_host.h | 2 +-
> arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
> arch/x86/kvm/vmx/nested.c | 38 +++++++++++++++++++++++++++++++++
> arch/x86/kvm/vmx/vmx.c | 38 ---------------------------------
> 4 files changed, 40 insertions(+), 40 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 446ea70a554d..4e6219cb3933 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1220,7 +1220,6 @@ struct kvm_x86_ops {
> void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
> struct kvm_memory_slot *slot,
> gfn_t offset, unsigned long mask);
> - int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
>
> /* pmu operations of sub-arch */
> const struct kvm_pmu_ops *pmu_ops;
> @@ -1281,6 +1280,7 @@ struct kvm_x86_nested_ops {
> struct kvm_nested_state __user *user_kvm_nested_state,
> struct kvm_nested_state *kvm_state);
> bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
> + int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
>
> int (*enable_evmcs)(struct kvm_vcpu *vcpu,
> uint16_t *vmcs_version);
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 60e7b2308876..c733196fd45b 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
> !(pte & PT_GUEST_DIRTY_MASK)) {
> trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
> #if PTTYPE == PTTYPE_EPT
> - if (kvm_x86_ops.write_log_dirty(vcpu, addr))
> + if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr))
> return -EINVAL;
> #endif
> pte |= PT_GUEST_DIRTY_MASK;
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index adb11b504d5c..db9abcbeefd1 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -3205,6 +3205,43 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
> return true;
> }
>
> +static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
> +{
> + struct vmcs12 *vmcs12;
> + struct vcpu_vmx *vmx = to_vmx(vcpu);
> + gpa_t dst;
> +
> + if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
> + return 0;
> +
> + if (WARN_ON_ONCE(vmx->nested.pml_full))
> + return 1;
> +
> + /*
> + * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
> + * set is already checked as part of A/D emulation.
> + */
> + vmcs12 = get_vmcs12(vcpu);
> + if (!nested_cpu_has_pml(vmcs12))
> + return 0;
> +
> + if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
> + vmx->nested.pml_full = true;
> + return 1;
> + }
> +
> + gpa &= ~0xFFFull;
> + dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
> +
> + if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
> + offset_in_page(dst), sizeof(gpa)))
> + return 0;
> +
> + vmcs12->guest_pml_index--;
> +
> + return 0;
> +}
> +
> /*
> * Intel's VMX Instruction Reference specifies a common set of prerequisites
> * for running VMX instructions (except VMXON, whose prerequisites are
> @@ -6503,6 +6540,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
> .get_state = vmx_get_nested_state,
> .set_state = vmx_set_nested_state,
> .get_vmcs12_pages = nested_get_vmcs12_pages,
> + .write_log_dirty = nested_vmx_write_pml_buffer,
> .enable_evmcs = nested_enable_evmcs,
> .get_evmcs_version = nested_get_evmcs_version,
> };
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index adf83047bb21..8bf06a59f356 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7501,43 +7501,6 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
> kvm_flush_pml_buffers(kvm);
> }
>
> -static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
> -{
> - struct vmcs12 *vmcs12;
> - struct vcpu_vmx *vmx = to_vmx(vcpu);
> - gpa_t dst;
> -
> - if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
> - return 0;
> -
> - if (WARN_ON_ONCE(vmx->nested.pml_full))
> - return 1;
> -
> - /*
> - * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
> - * set is already checked as part of A/D emulation.
> - */
> - vmcs12 = get_vmcs12(vcpu);
> - if (!nested_cpu_has_pml(vmcs12))
> - return 0;
> -
> - if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
> - vmx->nested.pml_full = true;
> - return 1;
> - }
> -
> - gpa &= ~0xFFFull;
> - dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
> -
> - if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
> - offset_in_page(dst), sizeof(gpa)))
> - return 0;
> -
> - vmcs12->guest_pml_index--;
> -
> - return 0;
> -}
> -
> static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
> struct kvm_memory_slot *memslot,
> gfn_t offset, unsigned long mask)
> @@ -7966,7 +7929,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
> .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
> .flush_log_dirty = vmx_flush_log_dirty,
> .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
> - .write_log_dirty = vmx_write_pml_buffer,
>
> .pre_block = vmx_pre_block,
> .post_block = vmx_post_block,
>

Queued, thanks (patch 1 for 5.8).

Paolo