2020-06-05 12:02:38

by Vitaly Kuznetsov

[permalink] [raw]
Subject: [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result

Syzbot reports the following issue:

WARNING: CPU: 0 PID: 6819 at arch/x86/kvm/x86.c:618
kvm_inject_emulated_page_fault+0x210/0x290 arch/x86/kvm/x86.c:618
...
Call Trace:
...
RIP: 0010:kvm_inject_emulated_page_fault+0x210/0x290 arch/x86/kvm/x86.c:618
...
nested_vmx_get_vmptr+0x1f9/0x2a0 arch/x86/kvm/vmx/nested.c:4638
handle_vmon arch/x86/kvm/vmx/nested.c:4767 [inline]
handle_vmon+0x168/0x3a0 arch/x86/kvm/vmx/nested.c:4728
vmx_handle_exit+0x29c/0x1260 arch/x86/kvm/vmx/vmx.c:6067

'exception' we're trying to inject with kvm_inject_emulated_page_fault()
comes from:

nested_vmx_get_vmptr()
kvm_read_guest_virt()
kvm_read_guest_virt_helper()
vcpu->arch.walk_mmu->gva_to_gpa()

but it is only set when GVA to GPA conversion fails. In case it doesn't but
we still fail kvm_vcpu_read_guest_page(), X86EMUL_IO_NEEDED is returned and
nested_vmx_get_vmptr() calls kvm_inject_emulated_page_fault() with zeroed
'exception'. This happen when the argument is MMIO.

Paolo also noticed that nested_vmx_get_vmptr() is not the only place in
KVM code where kvm_read/write_guest_virt*() return result is mishandled.
All VMX instructions along with INVPCID/INVVPID/INVEPT have the same
issue. This was already noticed before, e.g. see commit 541ab2aeb282 ("KVM:
x86: work around leak of uninitialized stack contents") but was never fully
fixed.

KVM could've handled the request correctly by going to userspace and
performing I/O but there doesn't seem to be a good need for such requests
in the first place.

Introduce vmx_handle_memory_failure() as an interim solution.

Note, nested_vmx_get_vmptr() now has three possible outcomes: OK, PF,
KVM_EXIT_INTERNAL_ERROR and callers need to know if userspace exit is
needed (for KVM_EXIT_INTERNAL_ERROR) in case of failure. We don't seem
to have a good enum describing this tristate, just add "int *ret" to
nested_vmx_get_vmptr() interface to pass the information.

Reported-by: [email protected]
Suggested-by: Sean Christopherson <[email protected]>
Signed-off-by: Vitaly Kuznetsov <[email protected]>
---
arch/x86/kvm/vmx/nested.c | 78 +++++++++++++++++++++------------------
arch/x86/kvm/vmx/vmx.c | 34 +++++++++++++++--
arch/x86/kvm/vmx/vmx.h | 2 +
3 files changed, 74 insertions(+), 40 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 9c74a732b08d..bcb50724be38 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4624,19 +4624,24 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
}
}

-static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
+ int *ret)
{
gva_t gva;
struct x86_exception e;
+ int r;

if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmcs_read32(VMX_INSTRUCTION_INFO), false,
- sizeof(*vmpointer), &gva))
- return 1;
+ sizeof(*vmpointer), &gva)) {
+ *ret = 1;
+ return -EINVAL;
+ }

- if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
+ r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
+ if (r != X86EMUL_CONTINUE) {
+ *ret = vmx_handle_memory_failure(vcpu, r, &e);
+ return -EINVAL;
}

return 0;
@@ -4764,8 +4769,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}

- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
+ if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
+ return ret;

/*
* SDM 3: 24.11.5
@@ -4838,12 +4843,13 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
u32 zero = 0;
gpa_t vmptr;
u64 evmcs_gpa;
+ int r;

if (!nested_vmx_check_permission(vcpu))
return 1;

- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
+ if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
+ return r;

if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu,
@@ -4902,7 +4908,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
u64 value;
gva_t gva = 0;
short offset;
- int len;
+ int len, r;

if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -4943,10 +4949,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
instr_info, true, len, &gva))
return 1;
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
- if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
}

return nested_vmx_succeed(vcpu);
@@ -4987,7 +4992,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
unsigned long field;
short offset;
gva_t gva;
- int len;
+ int len, r;

/*
* The value to write might be 32 or 64 bits, depending on L1's long
@@ -5017,10 +5022,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification,
instr_info, false, len, &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
}

field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
@@ -5103,12 +5107,13 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t vmptr;
+ int r;

if (!nested_vmx_check_permission(vcpu))
return 1;

- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
+ if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
+ return r;

if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu,
@@ -5170,6 +5175,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
struct x86_exception e;
gva_t gva;
+ int r;

if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -5181,11 +5187,11 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
true, sizeof(gpa_t), &gva))
return 1;
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
- if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
- sizeof(gpa_t), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
+ sizeof(gpa_t), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
+
return nested_vmx_succeed(vcpu);
}

@@ -5209,7 +5215,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
struct {
u64 eptp, gpa;
} operand;
- int i;
+ int i, r;

if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_EPT) ||
@@ -5236,10 +5242,9 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false, sizeof(operand), &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);

/*
* Nested EPT roots are always held through guest_mmu,
@@ -5291,6 +5296,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
u64 gla;
} operand;
u16 vpid02;
+ int r;

if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
@@ -5318,10 +5324,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false, sizeof(operand), &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
+
if (operand.vpid >> 16)
return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 170cc76a581f..d9083f80ec87 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1600,6 +1600,32 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
return 1;
}

+/*
+ * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
+ * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
+ * indicates whether exit to userspace is needed.
+ */
+int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+ struct x86_exception *e)
+{
+ if (r == X86EMUL_PROPAGATE_FAULT) {
+ kvm_inject_emulated_page_fault(vcpu, e);
+ return 1;
+ }
+
+ /*
+ * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
+ * while handling a VMX instruction KVM could've handled the request
+ * correctly by exiting to userspace and performing I/O but there
+ * doesn't seem to be a real use-case behind such requests, just return
+ * KVM_EXIT_INTERNAL_ERROR for now.
+ */
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+
+ return 0;
+}

/*
* Recognizes a pending MTF VM-exit and records the nested state for later
@@ -5486,6 +5512,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
u64 pcid;
u64 gla;
} operand;
+ int r;

if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -5508,10 +5535,9 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
sizeof(operand), &gva))
return 1;

- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);

if (operand.pcid >> 12 != 0) {
kvm_inject_gp(vcpu, 0);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 672c28f17e49..8a83b5edc820 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -355,6 +355,8 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
+int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+ struct x86_exception *e);

#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
--
2.25.4


2020-06-05 12:03:55

by Vitaly Kuznetsov

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result

Vitaly Kuznetsov <[email protected]> writes:

> Syzbot reports the following issue:

Noticed while sending: the prefix of the patch should be "KVM: VMX:" as
it is not only nested related...

--
Vitaly

2020-06-05 20:09:42

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result

On Fri, Jun 05, 2020 at 01:59:05PM +0200, Vitaly Kuznetsov wrote:
> Introduce vmx_handle_memory_failure() as an interim solution.

Heh, "interim". I'll take the over on that :-D.

> Note, nested_vmx_get_vmptr() now has three possible outcomes: OK, PF,
> KVM_EXIT_INTERNAL_ERROR and callers need to know if userspace exit is
> needed (for KVM_EXIT_INTERNAL_ERROR) in case of failure. We don't seem
> to have a good enum describing this tristate, just add "int *ret" to
> nested_vmx_get_vmptr() interface to pass the information.
>
> Reported-by: [email protected]
> Suggested-by: Sean Christopherson <[email protected]>
> Signed-off-by: Vitaly Kuznetsov <[email protected]>
> ---

...

> +/*
> + * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
> + * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
> + * indicates whether exit to userspace is needed.
> + */
> +int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
> + struct x86_exception *e)
> +{
> + if (r == X86EMUL_PROPAGATE_FAULT) {
> + kvm_inject_emulated_page_fault(vcpu, e);
> + return 1;
> + }
> +
> + /*
> + * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
> + * while handling a VMX instruction KVM could've handled the request

A nit similar to your observation on the shortlog, this isn't limited to VMX
instructions.

> + * correctly by exiting to userspace and performing I/O but there
> + * doesn't seem to be a real use-case behind such requests, just return
> + * KVM_EXIT_INTERNAL_ERROR for now.
> + */
> + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
> + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
> + vcpu->run->internal.ndata = 0;
> +
> + return 0;
> +}

2020-06-08 08:23:19

by Vitaly Kuznetsov

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] KVM: nVMX: Properly handle kvm_read/write_guest_virt*() result

Sean Christopherson <[email protected]> writes:

> On Fri, Jun 05, 2020 at 01:59:05PM +0200, Vitaly Kuznetsov wrote:
>> Introduce vmx_handle_memory_failure() as an interim solution.
>
> Heh, "interim". I'll take the over on that :-D.
>

We just need a crazy but real use-case to start acting :-)

>> Note, nested_vmx_get_vmptr() now has three possible outcomes: OK, PF,
>> KVM_EXIT_INTERNAL_ERROR and callers need to know if userspace exit is
>> needed (for KVM_EXIT_INTERNAL_ERROR) in case of failure. We don't seem
>> to have a good enum describing this tristate, just add "int *ret" to
>> nested_vmx_get_vmptr() interface to pass the information.

On a loosely related note, while writing this patch I was struggling
with our exit handlers calling convention (that the return value is '1'
- return to the guest, '0' - return to userspace successfully, '< 0' -
return to userspace with an error). This is intertwined with normal
int/bool functions and make it hard to read. At the very minimum we can
introduce an enum for 0/1 return values from exit handlers. Or, maybe,
we can introduce KVM_REQ_USERSPACE_EXIT/KVM_REQ_INTERNAL_ERROR/.. and
make all the exit handlers normal functions returning 0/error?

>>
>> Reported-by: [email protected]
>> Suggested-by: Sean Christopherson <[email protected]>
>> Signed-off-by: Vitaly Kuznetsov <[email protected]>
>> ---
>
> ...
>
>> +/*
>> + * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
>> + * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
>> + * indicates whether exit to userspace is needed.
>> + */
>> +int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
>> + struct x86_exception *e)
>> +{
>> + if (r == X86EMUL_PROPAGATE_FAULT) {
>> + kvm_inject_emulated_page_fault(vcpu, e);
>> + return 1;
>> + }
>> +
>> + /*
>> + * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
>> + * while handling a VMX instruction KVM could've handled the request
>
> A nit similar to your observation on the shortlog, this isn't limited to VMX
> instructions.
>

Yea, it all started with nested_vmx_get_vmptr() then Paolo discovered
vmwrite/vmread/vmptrst/invept/invvpid and then I discovered invpcid but
forgot to update the comment ...

>> + * correctly by exiting to userspace and performing I/O but there
>> + * doesn't seem to be a real use-case behind such requests, just return
>> + * KVM_EXIT_INTERNAL_ERROR for now.
>> + */
>> + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
>> + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
>> + vcpu->run->internal.ndata = 0;
>> +
>> + return 0;
>> +}
>

--
Vitaly