2023-07-18 14:04:55

by Zeng Guang

[permalink] [raw]
Subject: [PATCH v2 5/8] KVM: emulator: Add emulation of LASS violation checks on linear address

When enabled Intel CPU feature Linear Address Space Separation (LASS),
KVM emulator will take LASS violation check on every access to guest
memory by a linear address.

We defined a new function prototype in kvm_x86_ops for emulator to
construct the interface to identify whether a LASS violation occurs.
It can have further practical implementation according to vendor
specific requirements.

Emulator will use the passed (address, size) pair and instruction
operation type (flags) to enforce LASS protection when KVM emulates
instruction fetch, data access including implicit data access to a
system data structure.

Signed-off-by: Zeng Guang <[email protected]>
Tested-by: Xuelian Guo <[email protected]>
---
arch/x86/include/asm/kvm-x86-ops.h | 3 ++-
arch/x86/include/asm/kvm_host.h | 3 +++
arch/x86/kvm/emulate.c | 11 +++++++++++
arch/x86/kvm/kvm_emulate.h | 2 ++
arch/x86/kvm/x86.c | 10 ++++++++++
5 files changed, 28 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 13bc212cd4bc..a301f0a46381 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -132,7 +132,8 @@ KVM_X86_OP_OPTIONAL(migrate_timers)
KVM_X86_OP(msr_filter_changed)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
-KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
+KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons)
+KVM_X86_OP_OPTIONAL_RET0(is_lass_violation)

#undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index fb9d1f2d6136..791f0dd48cd9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1731,6 +1731,9 @@ struct kvm_x86_ops {
* Returns vCPU specific APICv inhibit reasons
*/
unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
+
+ bool (*is_lass_violation)(struct kvm_vcpu *vcpu, unsigned long addr,
+ unsigned int size, unsigned int flags);
};

struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9b4b3ce6d52a..2289a4ad21be 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -742,6 +742,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
}
break;
}
+
+ if (ctxt->ops->is_lass_violation(ctxt, *linear, size, flags))
+ goto bad;
+
if (la & (insn_alignment(ctxt, size) - 1))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
@@ -848,6 +852,9 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
void *data, unsigned size)
{
+ if (ctxt->ops->is_lass_violation(ctxt, linear, size, X86EMUL_F_IMPLICIT))
+ return emulate_gp(ctxt, 0);
+
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
}

@@ -855,6 +862,10 @@ static int linear_write_system(struct x86_emulate_ctxt *ctxt,
ulong linear, void *data,
unsigned int size)
{
+ if (ctxt->ops->is_lass_violation(ctxt, linear, size,
+ X86EMUL_F_IMPLICIT | X86EMUL_F_FETCH))
+ return emulate_gp(ctxt, 0);
+
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
}

diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index c944055091e1..6f0996d0da56 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -232,6 +232,8 @@ struct x86_emulate_ops {
int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
+ bool (*is_lass_violation)(struct x86_emulate_ctxt *ctxt, unsigned long addr,
+ unsigned int size, unsigned int flags);
};

/* Type, address-of, and value of an instruction's operand. */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 04b57a336b34..6448ff706539 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8287,6 +8287,15 @@ static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
kvm_vm_bugged(kvm);
}

+static bool emulator_is_lass_violation(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr,
+ unsigned int size,
+ unsigned int flags)
+{
+ return static_call(kvm_x86_is_lass_violation)(emul_to_vcpu(ctxt),
+ addr, size, flags);
+}
+
static const struct x86_emulate_ops emulate_ops = {
.vm_bugged = emulator_vm_bugged,
.read_gpr = emulator_read_gpr,
@@ -8332,6 +8341,7 @@ static const struct x86_emulate_ops emulate_ops = {
.leave_smm = emulator_leave_smm,
.triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr,
+ .is_lass_violation = emulator_is_lass_violation,
};

static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
--
2.27.0



2023-07-24 05:08:06

by Chao Gao

[permalink] [raw]
Subject: Re: [PATCH v2 5/8] KVM: emulator: Add emulation of LASS violation checks on linear address

On Tue, Jul 18, 2023 at 09:18:41PM +0800, Zeng Guang wrote:
>When enabled Intel CPU feature Linear Address Space Separation (LASS),
>KVM emulator will take LASS violation check on every access to guest
>memory by a linear address.

When Intel Linear Address Space Separation (LASS) is enabled, the processor
applies a LASS violation check to every access to a linear address. To
align with hardware behavior, KVM needs to perform the same check in
instruction emulation.

>
>We defined a new function prototype in kvm_x86_ops for emulator to
>construct the interface to identify whether a LASS violation occurs.
>It can have further practical implementation according to vendor
>specific requirements.
>
>Emulator will use the passed (address, size) pair and instruction
>operation type (flags) to enforce LASS protection when KVM emulates
>instruction fetch, data access including implicit data access to a
>system data structure.

Define a new function in x86_emulator_ops to perform the LASS violation
check in KVM emulator. The function accepts an address and a size, which
delimit the memory access, and a flag, which provides extra information
about the access that is necessary for LASS violation checks, e.g., whether
the access is an instruction fetch or implicit access.

emulator_is_lass_violation() is just a placeholder. it will be wired up
to VMX/SVM implementation by a later patch.

(I think the commit message can also explain why the LASS violation
check is added in the three functions, i.e., __linearize(),
linear_read_system() and linear_write_system(), and why only in them)

>
>Signed-off-by: Zeng Guang <[email protected]>
>Tested-by: Xuelian Guo <[email protected]>
>---
> arch/x86/include/asm/kvm-x86-ops.h | 3 ++-
> arch/x86/include/asm/kvm_host.h | 3 +++
> arch/x86/kvm/emulate.c | 11 +++++++++++
> arch/x86/kvm/kvm_emulate.h | 2 ++
> arch/x86/kvm/x86.c | 10 ++++++++++
> 5 files changed, 28 insertions(+), 1 deletion(-)
>
>diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
>index 13bc212cd4bc..a301f0a46381 100644
>--- a/arch/x86/include/asm/kvm-x86-ops.h
>+++ b/arch/x86/include/asm/kvm-x86-ops.h
>@@ -132,7 +132,8 @@ KVM_X86_OP_OPTIONAL(migrate_timers)
> KVM_X86_OP(msr_filter_changed)
> KVM_X86_OP(complete_emulated_msr)
> KVM_X86_OP(vcpu_deliver_sipi_vector)
>-KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
>+KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons)
>+KVM_X86_OP_OPTIONAL_RET0(is_lass_violation)

...

>
> #undef KVM_X86_OP
> #undef KVM_X86_OP_OPTIONAL
>diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>index fb9d1f2d6136..791f0dd48cd9 100644
>--- a/arch/x86/include/asm/kvm_host.h
>+++ b/arch/x86/include/asm/kvm_host.h
>@@ -1731,6 +1731,9 @@ struct kvm_x86_ops {
> * Returns vCPU specific APICv inhibit reasons
> */
> unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
>+
>+ bool (*is_lass_violation)(struct kvm_vcpu *vcpu, unsigned long addr,
>+ unsigned int size, unsigned int flags);

I may think we can just return false in emulator_is_lass_violation() and
fold this new kvm_x86_ops definition into its vmx implementation. This way
is more natural to me.

> };
>
> struct kvm_x86_nested_ops {
>diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
>index 9b4b3ce6d52a..2289a4ad21be 100644
>--- a/arch/x86/kvm/emulate.c
>+++ b/arch/x86/kvm/emulate.c
>@@ -742,6 +742,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
> }
> break;
> }
>+
>+ if (ctxt->ops->is_lass_violation(ctxt, *linear, size, flags))
>+ goto bad;
>+
> if (la & (insn_alignment(ctxt, size) - 1))
> return emulate_gp(ctxt, 0);
> return X86EMUL_CONTINUE;
>@@ -848,6 +852,9 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
> static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
> void *data, unsigned size)
> {
>+ if (ctxt->ops->is_lass_violation(ctxt, linear, size, X86EMUL_F_IMPLICIT))
>+ return emulate_gp(ctxt, 0);
>+
> return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
> }
>
>@@ -855,6 +862,10 @@ static int linear_write_system(struct x86_emulate_ctxt *ctxt,
> ulong linear, void *data,
> unsigned int size)
> {
>+ if (ctxt->ops->is_lass_violation(ctxt, linear, size,
>+ X86EMUL_F_IMPLICIT | X86EMUL_F_FETCH))

s/X86EMUL_F_FETCH/X86EMUL_F_WRITE/