2023-04-20 14:19:10

by Zeng Guang

[permalink] [raw]
Subject: [PATCH 2/6] KVM: VMX: Add new ops in kvm_x86_ops for LASS violation check

Intel introduce LASS (Linear Address Separation) feature providing
an independent mechanism to achieve the mode-based protection.

LASS partitions 64-bit linear address space into two halves, user-mode
address (LA[bit 63]=0) and supervisor-mode address (LA[bit 63]=1). It
stops any code execution or data access
1. from user mode to supervisor-mode address space
2. from supervisor mode to user-mode address space
and generates LASS violation fault accordingly.

A supervisor mode data access causes a LASS violation only if supervisor
mode access protection is enabled (CR4.SMAP = 1) and either RFLAGS.AC = 0
or the access implicitly accesses a system data structure.

Following are the rule of LASS violation check on the linear address(LA).
User access to supervisor-mode address space:
LA[bit 63] && (CPL == 3)
Supervisor access to user-mode address space:
Instruction fetch: !LA[bit 63] && (CPL < 3)
Data access: !LA[bit 63] && (CR4.SMAP==1) && ((RFLAGS.AC == 0 &&
CPL < 3) || Implicit supervisor access)

Add new ops in kvm_x86_ops to do LASS violation check.

Signed-off-by: Zeng Guang <[email protected]>
---
arch/x86/include/asm/kvm-x86-ops.h | 1 +
arch/x86/include/asm/kvm_host.h | 5 +++
arch/x86/kvm/vmx/vmx.c | 55 ++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.h | 2 ++
4 files changed, 63 insertions(+)

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index abccd51dcfca..f76c07f2674b 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -131,6 +131,7 @@ KVM_X86_OP(msr_filter_changed)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
+KVM_X86_OP_OPTIONAL_RET0(check_lass);

#undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8ff89a52ef66..31fb8699a1ff 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -69,6 +69,9 @@
#define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \
KVM_X86_NOTIFY_VMEXIT_USER)

+/* x86-specific emulation flags */
+#define KVM_X86_EMULFLAG_SKIP_LASS _BITULL(1)
+
/* x86-specific vcpu->requests bit members */
#define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
#define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
@@ -1706,6 +1709,8 @@ struct kvm_x86_ops {
* Returns vCPU specific APICv inhibit reasons
*/
unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
+
+ bool (*check_lass)(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags);
};

struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c923d7599d71..581327ede66a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8070,6 +8070,59 @@ static void vmx_vm_destroy(struct kvm *kvm)
free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
}

+/*
+ * Determine whether an access to the linear address causes a LASS violation.
+ * LASS protection is only effective in long mode. As a prerequisite, caller
+ * should make sure VM running in long mode and invoke this api to do LASS
+ * violation check.
+ */
+bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
+{
+ bool user_mode, user_as, rflags_ac;
+
+ if (!!(flags & KVM_X86_EMULFLAG_SKIP_LASS) ||
+ !kvm_is_cr4_bit_set(vcpu, X86_CR4_LASS))
+ return false;
+
+ WARN_ON_ONCE(!is_long_mode(vcpu));
+
+ user_as = !(la >> 63);
+
+ /*
+ * An access is a supervisor-mode access if CPL < 3 or if it implicitly
+ * accesses a system data structure. For implicit accesses to system
+ * data structure, the processor acts as if RFLAGS.AC is clear.
+ */
+ if (access & PFERR_IMPLICIT_ACCESS) {
+ user_mode = false;
+ rflags_ac = false;
+ } else {
+ user_mode = vmx_get_cpl(vcpu) == 3;
+ if (!user_mode)
+ rflags_ac = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
+ }
+
+ if (user_mode != user_as) {
+ /*
+ * Supervisor-mode _data_ accesses to user address space
+ * cause LASS violations only if SMAP is enabled.
+ */
+ if (!user_mode && !(access & PFERR_FETCH_MASK)) {
+ return kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP) &&
+ !rflags_ac;
+ } else {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
+{
+ return is_long_mode(vcpu) && __vmx_check_lass(vcpu, access, la, flags);
+}
+
static struct kvm_x86_ops vmx_x86_ops __initdata = {
.name = "kvm_intel",

@@ -8207,6 +8260,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.complete_emulated_msr = kvm_complete_insn_gp,

.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
+
+ .check_lass = vmx_check_lass,
};

static unsigned int vmx_handle_intel_pt_intr(void)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index a3da84f4ea45..6569385a5978 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -433,6 +433,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);

+bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags);
+
static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
int type, bool value)
{
--
2.27.0


2023-04-24 07:56:38

by Binbin Wu

[permalink] [raw]
Subject: Re: [PATCH 2/6] KVM: VMX: Add new ops in kvm_x86_ops for LASS violation check



On 4/20/2023 9:37 PM, Zeng Guang wrote:
> Intel introduce LASS (Linear Address Separation) feature providing
/s/introduce/introduces

> an independent mechanism to achieve the mode-based protection.
>
> LASS partitions 64-bit linear address space into two halves, user-mode
> address (LA[bit 63]=0) and supervisor-mode address (LA[bit 63]=1). It
> stops any code execution or data access
> 1. from user mode to supervisor-mode address space
> 2. from supervisor mode to user-mode address space
> and generates LASS violation fault accordingly.
IMO, the description of the point 2 may be misleading that LASS stops
any data access from supervisor mode to user mode address space,
although the description following adds the conditions.


>
> A supervisor mode data access causes a LASS violation only if supervisor
> mode access protection is enabled (CR4.SMAP = 1) and either RFLAGS.AC = 0
> or the access implicitly accesses a system data structure.
>
> Following are the rule of LASS violation check on the linear address(LA).
/s/rule/rules

> User access to supervisor-mode address space:
> LA[bit 63] && (CPL == 3)
> Supervisor access to user-mode address space:
> Instruction fetch: !LA[bit 63] && (CPL < 3)
> Data access: !LA[bit 63] && (CR4.SMAP==1) && ((RFLAGS.AC == 0 &&
> CPL < 3) || Implicit supervisor access)
>
> Add new ops in kvm_x86_ops to do LASS violation check.
>
> Signed-off-by: Zeng Guang <[email protected]>
> ---
> arch/x86/include/asm/kvm-x86-ops.h | 1 +
> arch/x86/include/asm/kvm_host.h | 5 +++
> arch/x86/kvm/vmx/vmx.c | 55 ++++++++++++++++++++++++++++++
> arch/x86/kvm/vmx/vmx.h | 2 ++
> 4 files changed, 63 insertions(+)
>
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index abccd51dcfca..f76c07f2674b 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -131,6 +131,7 @@ KVM_X86_OP(msr_filter_changed)
> KVM_X86_OP(complete_emulated_msr)
> KVM_X86_OP(vcpu_deliver_sipi_vector)
> KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
> +KVM_X86_OP_OPTIONAL_RET0(check_lass);
>
> #undef KVM_X86_OP
> #undef KVM_X86_OP_OPTIONAL
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 8ff89a52ef66..31fb8699a1ff 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -69,6 +69,9 @@
> #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \
> KVM_X86_NOTIFY_VMEXIT_USER)
>
> +/* x86-specific emulation flags */
> +#define KVM_X86_EMULFLAG_SKIP_LASS _BITULL(1)
Do you use the flag outside of emulator?
For LAM patch, it's planned to move the flags inside emulator.

> +
> /* x86-specific vcpu->requests bit members */
> #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
> #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
> @@ -1706,6 +1709,8 @@ struct kvm_x86_ops {
> * Returns vCPU specific APICv inhibit reasons
> */
> unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
> +
> + bool (*check_lass)(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags);
The flags may be dropped if the caller knows to skip it or not.

> };
>
> struct kvm_x86_nested_ops {
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index c923d7599d71..581327ede66a 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -8070,6 +8070,59 @@ static void vmx_vm_destroy(struct kvm *kvm)
> free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
> }
>
> +/*
> + * Determine whether an access to the linear address causes a LASS violation.
> + * LASS protection is only effective in long mode. As a prerequisite, caller
> + * should make sure VM
Should be vCPU?

> running in long mode and invoke this api to do LASS
> + * violation check.
> + */
> +bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
> +{
> + bool user_mode, user_as, rflags_ac;
> +
> + if (!!(flags & KVM_X86_EMULFLAG_SKIP_LASS) ||
> + !kvm_is_cr4_bit_set(vcpu, X86_CR4_LASS))
> + return false;
> +
> + WARN_ON_ONCE(!is_long_mode(vcpu));
> +
> + user_as = !(la >> 63);
> +
> + /*
> + * An access is a supervisor-mode access if CPL < 3 or if it implicitly
> + * accesses a system data structure. For implicit accesses to system
> + * data structure, the processor acts as if RFLAGS.AC is clear.
> + */
> + if (access & PFERR_IMPLICIT_ACCESS) {
> + user_mode = false;
> + rflags_ac = false;
> + } else {
> + user_mode = vmx_get_cpl(vcpu) == 3;
> + if (!user_mode)
> + rflags_ac = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
> + }
> +
> + if (user_mode != user_as) {
> + /*
> + * Supervisor-mode _data_ accesses to user address space
> + * cause LASS violations only if SMAP is enabled.
> + */
> + if (!user_mode && !(access & PFERR_FETCH_MASK)) {
> + return kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP) &&
> + !rflags_ac;
> + } else {
> + return true;
> + }
> + }
> +
> + return false;
> +}
> +
> +static bool vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
> +{
> + return is_long_mode(vcpu) && __vmx_check_lass(vcpu, access, la, flags);
> +}
> +
> static struct kvm_x86_ops vmx_x86_ops __initdata = {
> .name = "kvm_intel",
>
> @@ -8207,6 +8260,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
> .complete_emulated_msr = kvm_complete_insn_gp,
>
> .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
> +
> + .check_lass = vmx_check_lass,
> };
>
> static unsigned int vmx_handle_intel_pt_intr(void)
> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
> index a3da84f4ea45..6569385a5978 100644
> --- a/arch/x86/kvm/vmx/vmx.h
> +++ b/arch/x86/kvm/vmx/vmx.h
> @@ -433,6 +433,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
> u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
> u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
>
> +bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags);
> +
> static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
> int type, bool value)
> {

2023-04-25 03:11:41

by Chao Gao

[permalink] [raw]
Subject: Re: [PATCH 2/6] KVM: VMX: Add new ops in kvm_x86_ops for LASS violation check

On Thu, Apr 20, 2023 at 09:37:20PM +0800, Zeng Guang wrote:
>+/*
>+ * Determine whether an access to the linear address causes a LASS violation.
>+ * LASS protection is only effective in long mode. As a prerequisite, caller
>+ * should make sure VM running in long mode and invoke this api to do LASS
>+ * violation check.

Could you place the comment above vmx_check_lass()?

And for __vmx_check_lass(), just add:

A variant of vmx_check_lass() without the check for long mode.

>+ */
>+bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
>+{
>+ bool user_mode, user_as, rflags_ac;
>+
>+ if (!!(flags & KVM_X86_EMULFLAG_SKIP_LASS) ||
>+ !kvm_is_cr4_bit_set(vcpu, X86_CR4_LASS))
>+ return false;
>+
>+ WARN_ON_ONCE(!is_long_mode(vcpu));
>+
>+ user_as = !(la >> 63);
>+


>+ /*
>+ * An access is a supervisor-mode access if CPL < 3 or if it implicitly
>+ * accesses a system data structure. For implicit accesses to system
>+ * data structure, the processor acts as if RFLAGS.AC is clear.
>+ */
>+ if (access & PFERR_IMPLICIT_ACCESS) {
>+ user_mode = false;
>+ rflags_ac = false;
>+ } else {
>+ user_mode = vmx_get_cpl(vcpu) == 3;
>+ if (!user_mode)
>+ rflags_ac = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
>+ }
>+
>+ if (user_mode != user_as) {

to reduce one level of indentation, how about:

if (user_mode == user_as)
return false;

/*
* Supervisor-mode _data_ accesses to user address space
* cause LASS violations only if SMAP is enabled.
*/
if (!user_mode && !(access & PFERR_FETCH_MASK)) {
return kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP) && !rflags_ac;

return true;


>+ /*
>+ * Supervisor-mode _data_ accesses to user address space
>+ * cause LASS violations only if SMAP is enabled.
>+ */
>+ if (!user_mode && !(access & PFERR_FETCH_MASK)) {
>+ return kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP) &&
>+ !rflags_ac;
>+ } else {
>+ return true;
>+ }
>+ }
>+
>+ return false;
>+}
>+
>+static bool vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
>+{
>+ return is_long_mode(vcpu) && __vmx_check_lass(vcpu, access, la, flags);

Why not request all callers to check if vcpu is in long mode?

e.g.,
return is_long_mode(vcpu) && static_call(kvm_x86_check_lass)(...);

then you can rename __vmx_check_lass() to vmx_check_lass() and drop the
original one.

>+}
>+
> static struct kvm_x86_ops vmx_x86_ops __initdata = {
> .name = "kvm_intel",
>
>@@ -8207,6 +8260,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
> .complete_emulated_msr = kvm_complete_insn_gp,
>
> .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
>+
>+ .check_lass = vmx_check_lass,
> };
>
> static unsigned int vmx_handle_intel_pt_intr(void)
>diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
>index a3da84f4ea45..6569385a5978 100644
>--- a/arch/x86/kvm/vmx/vmx.h
>+++ b/arch/x86/kvm/vmx/vmx.h
>@@ -433,6 +433,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
> u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
> u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
>
>+bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags);
>+

no one uses this function. You can defer exporting it to when the first
external caller is added.

> static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
> int type, bool value)
> {
>--
>2.27.0
>

2023-04-25 03:41:05

by Zeng Guang

[permalink] [raw]
Subject: Re: [PATCH 2/6] KVM: VMX: Add new ops in kvm_x86_ops for LASS violation check


On 4/24/2023 3:43 PM, Binbin Wu wrote:
>
> On 4/20/2023 9:37 PM, Zeng Guang wrote:
>> Intel introduce LASS (Linear Address Separation) feature providing
> /s/introduce/introduces
OK.
>
>> an independent mechanism to achieve the mode-based protection.
>>
>> LASS partitions 64-bit linear address space into two halves, user-mode
>> address (LA[bit 63]=0) and supervisor-mode address (LA[bit 63]=1). It
>> stops any code execution or data access
>> 1. from user mode to supervisor-mode address space
>> 2. from supervisor mode to user-mode address space
>> and generates LASS violation fault accordingly.
> IMO, the description of the point 2 may be misleading that LASS stops
> any data access from supervisor mode to user mode address space,
> although the description following adds the conditions.

May change to " It stops any code execution or conditional data access".
The condition
is illustrated in next paragraph.

>
>> A supervisor mode data access causes a LASS violation only if supervisor
>> mode access protection is enabled (CR4.SMAP = 1) and either RFLAGS.AC = 0
>> or the access implicitly accesses a system data structure.
>>
>> Following are the rule of LASS violation check on the linear address(LA).
> /s/rule/rules
OK.
>> User access to supervisor-mode address space:
>> LA[bit 63] && (CPL == 3)
>> Supervisor access to user-mode address space:
>> Instruction fetch: !LA[bit 63] && (CPL < 3)
>> Data access: !LA[bit 63] && (CR4.SMAP==1) && ((RFLAGS.AC == 0 &&
>> CPL < 3) || Implicit supervisor access)
>>
>> Add new ops in kvm_x86_ops to do LASS violation check.
>>
>> Signed-off-by: Zeng Guang <[email protected]>
>> ---
>> arch/x86/include/asm/kvm-x86-ops.h | 1 +
>> arch/x86/include/asm/kvm_host.h | 5 +++
>> arch/x86/kvm/vmx/vmx.c | 55 ++++++++++++++++++++++++++++++
>> arch/x86/kvm/vmx/vmx.h | 2 ++
>> 4 files changed, 63 insertions(+)
>>
>> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
>> index abccd51dcfca..f76c07f2674b 100644
>> --- a/arch/x86/include/asm/kvm-x86-ops.h
>> +++ b/arch/x86/include/asm/kvm-x86-ops.h
>> @@ -131,6 +131,7 @@ KVM_X86_OP(msr_filter_changed)
>> KVM_X86_OP(complete_emulated_msr)
>> KVM_X86_OP(vcpu_deliver_sipi_vector)
>> KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
>> +KVM_X86_OP_OPTIONAL_RET0(check_lass);
>>
>> #undef KVM_X86_OP
>> #undef KVM_X86_OP_OPTIONAL
>> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>> index 8ff89a52ef66..31fb8699a1ff 100644
>> --- a/arch/x86/include/asm/kvm_host.h
>> +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -69,6 +69,9 @@
>> #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \
>> KVM_X86_NOTIFY_VMEXIT_USER)
>>
>> +/* x86-specific emulation flags */
>> +#define KVM_X86_EMULFLAG_SKIP_LASS _BITULL(1)
> Do you use the flag outside of emulator?
> For LAM patch, it's planned to move the flags inside emulator.
IMO, the detailed flag is implementation specific. Is it necessary to
bind with emulator
though it's only used inside emulator ?
>> +
>> /* x86-specific vcpu->requests bit members */
>> #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
>> #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
>> @@ -1706,6 +1709,8 @@ struct kvm_x86_ops {
>> * Returns vCPU specific APICv inhibit reasons
>> */
>> unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
>> +
>> + bool (*check_lass)(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags);
> The flags may be dropped if the caller knows to skip it or not.
Probably I don't get you right. Do you mean it need define another
function without flags ?

>> };
>>
>> struct kvm_x86_nested_ops {
>> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>> index c923d7599d71..581327ede66a 100644
>> --- a/arch/x86/kvm/vmx/vmx.c
>> +++ b/arch/x86/kvm/vmx/vmx.c
>> @@ -8070,6 +8070,59 @@ static void vmx_vm_destroy(struct kvm *kvm)
>> free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
>> }
>>
>> +/*
>> + * Determine whether an access to the linear address causes a LASS violation.
>> + * LASS protection is only effective in long mode. As a prerequisite, caller
>> + * should make sure VM
> Should be vCPU?
Similar meaning, I think. :)
>> running in long mode and invoke this api to do LASS
>> + * violation check.
>> + */
>> +bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
>> +{
>> + bool user_mode, user_as, rflags_ac;
>> +
>> + if (!!(flags & KVM_X86_EMULFLAG_SKIP_LASS) ||
>> + !kvm_is_cr4_bit_set(vcpu, X86_CR4_LASS))
>> + return false;
>> +
>> + WARN_ON_ONCE(!is_long_mode(vcpu));
>> +
>> + user_as = !(la >> 63);
>> +
>> + /*
>> + * An access is a supervisor-mode access if CPL < 3 or if it implicitly
>> + * accesses a system data structure. For implicit accesses to system
>> + * data structure, the processor acts as if RFLAGS.AC is clear.
>> + */
>> + if (access & PFERR_IMPLICIT_ACCESS) {
>> + user_mode = false;
>> + rflags_ac = false;
>> + } else {
>> + user_mode = vmx_get_cpl(vcpu) == 3;
>> + if (!user_mode)
>> + rflags_ac = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
>> + }
>> +
>> + if (user_mode != user_as) {
>> + /*
>> + * Supervisor-mode _data_ accesses to user address space
>> + * cause LASS violations only if SMAP is enabled.
>> + */
>> + if (!user_mode && !(access & PFERR_FETCH_MASK)) {
>> + return kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP) &&
>> + !rflags_ac;
>> + } else {
>> + return true;
>> + }
>> + }
>> +
>> + return false;
>> +}
>> +
>> +static bool vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
>> +{
>> + return is_long_mode(vcpu) && __vmx_check_lass(vcpu, access, la, flags);
>> +}
>> +
>> static struct kvm_x86_ops vmx_x86_ops __initdata = {
>> .name = "kvm_intel",
>>
>> @@ -8207,6 +8260,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
>> .complete_emulated_msr = kvm_complete_insn_gp,
>>
>> .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
>> +
>> + .check_lass = vmx_check_lass,
>> };
>>
>> static unsigned int vmx_handle_intel_pt_intr(void)
>> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
>> index a3da84f4ea45..6569385a5978 100644
>> --- a/arch/x86/kvm/vmx/vmx.h
>> +++ b/arch/x86/kvm/vmx/vmx.h
>> @@ -433,6 +433,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
>> u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
>> u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
>>
>> +bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags);
>> +
>> static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
>> int type, bool value)
>> {

2023-04-25 07:33:37

by Zeng Guang

[permalink] [raw]
Subject: Re: [PATCH 2/6] KVM: VMX: Add new ops in kvm_x86_ops for LASS violation check


On 4/25/2023 11:10 AM, Gao, Chao wrote:
> On Thu, Apr 20, 2023 at 09:37:20PM +0800, Zeng Guang wrote:
>> +/*
>> + * Determine whether an access to the linear address causes a LASS violation.
>> + * LASS protection is only effective in long mode. As a prerequisite, caller
>> + * should make sure VM running in long mode and invoke this api to do LASS
>> + * violation check.
> Could you place the comment above vmx_check_lass()?
>
> And for __vmx_check_lass(), just add:
>
> A variant of vmx_check_lass() without the check for long mode.
>
>> + */
>> +bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
>> +{
>> + bool user_mode, user_as, rflags_ac;
>> +
>> + if (!!(flags & KVM_X86_EMULFLAG_SKIP_LASS) ||
>> + !kvm_is_cr4_bit_set(vcpu, X86_CR4_LASS))
>> + return false;
>> +
>> + WARN_ON_ONCE(!is_long_mode(vcpu));
>> +
>> + user_as = !(la >> 63);
>> +
>
>> + /*
>> + * An access is a supervisor-mode access if CPL < 3 or if it implicitly
>> + * accesses a system data structure. For implicit accesses to system
>> + * data structure, the processor acts as if RFLAGS.AC is clear.
>> + */
>> + if (access & PFERR_IMPLICIT_ACCESS) {
>> + user_mode = false;
>> + rflags_ac = false;
>> + } else {
>> + user_mode = vmx_get_cpl(vcpu) == 3;
>> + if (!user_mode)
>> + rflags_ac = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
>> + }
>> +
>> + if (user_mode != user_as) {
> to reduce one level of indentation, how about:
>
> if (user_mode == user_as)
> return false;
>
> /*
> * Supervisor-mode _data_ accesses to user address space
> * cause LASS violations only if SMAP is enabled.
> */
> if (!user_mode && !(access & PFERR_FETCH_MASK)) {
> return kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP) && !rflags_ac;
>
> return true;
>
Looks better.


>> + /*
>> + * Supervisor-mode _data_ accesses to user address space
>> + * cause LASS violations only if SMAP is enabled.
>> + */
>> + if (!user_mode && !(access & PFERR_FETCH_MASK)) {
>> + return kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP) &&
>> + !rflags_ac;
>> + } else {
>> + return true;
>> + }
>> + }
>> +
>> + return false;
>> +}
>> +
>> +static bool vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags)
>> +{
>> + return is_long_mode(vcpu) && __vmx_check_lass(vcpu, access, la, flags);
> Why not request all callers to check if vcpu is in long mode?
>
> e.g.,
> return is_long_mode(vcpu) && static_call(kvm_x86_check_lass)(...);
>
> then you can rename __vmx_check_lass() to vmx_check_lass() and drop the
> original one.
By design, __vmx_check_lass() is standalone to be used for checking LASS
violation only. In some cases, cpu mode is already identified prior to
performing
LASS protection. please refer to patch 4. So we provide two interfaces,
vmx_check_lass() with cpu mode check wrapped for other modules usage, e.g.
kvm emulator and __vmx_check_lass() dedicated for VMX.

I would check the feasibility to re-organize code to be more optimal.

Thanks.
>> +}
>> +
>> static struct kvm_x86_ops vmx_x86_ops __initdata = {
>> .name = "kvm_intel",
>>
>> @@ -8207,6 +8260,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
>> .complete_emulated_msr = kvm_complete_insn_gp,
>>
>> .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
>> +
>> + .check_lass = vmx_check_lass,
>> };
>>
>> static unsigned int vmx_handle_intel_pt_intr(void)
>> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
>> index a3da84f4ea45..6569385a5978 100644
>> --- a/arch/x86/kvm/vmx/vmx.h
>> +++ b/arch/x86/kvm/vmx/vmx.h
>> @@ -433,6 +433,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
>> u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
>> u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
>>
>> +bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la, u64 flags);
>> +
> no one uses this function. You can defer exporting it to when the first
> external caller is added.
>
>> static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
>> int type, bool value)
>> {
>> --
>> 2.27.0
>>

2023-04-26 01:50:17

by Binbin Wu

[permalink] [raw]
Subject: Re: [PATCH 2/6] KVM: VMX: Add new ops in kvm_x86_ops for LASS violation check



On 4/25/2023 11:26 AM, Zeng Guang wrote:
>
> On 4/24/2023 3:43 PM, Binbin Wu wrote:
>>
[...]
>> On 4/20/2023 9:37 PM, Zeng Guang wrote:
>>
>> diff --git a/arch/x86/include/asm/kvm-x86-ops.h
>> b/arch/x86/include/asm/kvm-x86-ops.h
>> index abccd51dcfca..f76c07f2674b 100644
>> --- a/arch/x86/include/asm/kvm-x86-ops.h
>> +++ b/arch/x86/include/asm/kvm-x86-ops.h
>> @@ -131,6 +131,7 @@ KVM_X86_OP(msr_filter_changed)
>>    KVM_X86_OP(complete_emulated_msr)
>>    KVM_X86_OP(vcpu_deliver_sipi_vector)
>>    KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
>> +KVM_X86_OP_OPTIONAL_RET0(check_lass);
>>       #undef KVM_X86_OP
>>    #undef KVM_X86_OP_OPTIONAL
>> diff --git a/arch/x86/include/asm/kvm_host.h
>> b/arch/x86/include/asm/kvm_host.h
>> index 8ff89a52ef66..31fb8699a1ff 100644
>> --- a/arch/x86/include/asm/kvm_host.h
>> +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -69,6 +69,9 @@
>>    #define KVM_X86_NOTIFY_VMEXIT_VALID_BITS
>> (KVM_X86_NOTIFY_VMEXIT_ENABLED | \
>>                             KVM_X86_NOTIFY_VMEXIT_USER)
>>    +/* x86-specific emulation flags */
>> +#define KVM_X86_EMULFLAG_SKIP_LASS    _BITULL(1)
>> Do you use the flag outside of emulator?
>> For LAM patch, it's planned to move the flags inside emulator.
> IMO, the detailed flag is implementation specific. Is it necessary to
> bind with emulator
> though it's only used inside emulator ?
For the rest part (i.e., VMExit handlings), the code is already in the
vendor specific implementations.
The callers are aware of the information to skip LASS check or not.

I plan to do a cleanup to consolidate the flags into one parameter for
__linearize().
And the consolidated flags value will be extended for LAM and other
features (e.g. LASS).

I post the proposed patch as following, could you help to check wether
it is OK for LASS to follow?



arch/x86/kvm/emulate.c     | 20 ++++++++++++++------
 arch/x86/kvm/kvm_emulate.h |  4 ++++
 2 files changed, 18 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a20bec931764..5fb516bc5731 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -687,8 +687,8 @@ static unsigned insn_alignment(struct
x86_emulate_ctxt *ctxt, unsigned size)
 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
                        struct segmented_address addr,
                        unsigned *max_size, unsigned size,
-                       bool write, bool fetch,
-                       enum x86emul_mode mode, ulong *linear)
+                       u64 flags, enum x86emul_mode mode,
+                       ulong *linear)
 {
     struct desc_struct desc;
     bool usable;
@@ -696,6 +696,8 @@ static __always_inline int __linearize(struct
x86_emulate_ctxt *ctxt,
     u32 lim;
     u16 sel;
     u8  va_bits;
+    bool fetch = !!(flags & X86_EMULFLAG_FETCH);
+    bool write = !!(flags & X86_EMULFLAG_WRITE);

     la = seg_base(ctxt, addr.seg) + addr.ea;
     *max_size = 0;
@@ -757,7 +759,12 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
              ulong *linear)
 {
     unsigned max_size;
-    return __linearize(ctxt, addr, &max_size, size, write, false,
+    u64 flags = 0;
+
+    if (write)
+        flags |= X86_EMULFLAG_WRITE;
+
+    return __linearize(ctxt, addr, &max_size, size, flags,
                ctxt->mode, linear);
 }

@@ -768,10 +775,11 @@ static inline int assign_eip(struct
x86_emulate_ctxt *ctxt, ulong dst)
     unsigned max_size;
     struct segmented_address addr = { .seg = VCPU_SREG_CS,
                        .ea = dst };
+    u64 flags = X86_EMULFLAG_FETCH;

     if (ctxt->op_bytes != sizeof(unsigned long))
         addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
-    rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode,
&linear);
+    rc = __linearize(ctxt, addr, &max_size, 1, flags, ctxt->mode, &linear);
     if (rc == X86EMUL_CONTINUE)
         ctxt->_eip = addr.ea;
     return rc;
@@ -896,6 +904,7 @@ static int __do_insn_fetch_bytes(struct
x86_emulate_ctxt *ctxt, int op_size)
     int cur_size = ctxt->fetch.end - ctxt->fetch.data;
     struct segmented_address addr = { .seg = VCPU_SREG_CS,
                        .ea = ctxt->eip + cur_size };
+    u64 flags = X86_EMULFLAG_FETCH;

     /*
      * We do not know exactly how many bytes will be needed, and
@@ -907,8 +916,7 @@ static int __do_insn_fetch_bytes(struct
x86_emulate_ctxt *ctxt, int op_size)
      * boundary check itself.  Instead, we use max_size to check
      * against op_size.
      */
-    rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
-             &linear);
+    rc = __linearize(ctxt, addr, &max_size, 0, flags, ctxt->mode, &linear);
     if (unlikely(rc != X86EMUL_CONTINUE))
         return rc;

diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index ab65f3a47dfd..5451a37f135f 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -88,6 +88,10 @@ struct x86_instruction_info {
 #define X86EMUL_IO_NEEDED       5 /* IO is needed to complete emulation */
 #define X86EMUL_INTERCEPTED     6 /* Intercepted by nested VMCB/VMCS */

+/* x86-specific emulation flags */
+#define X86_EMULFLAG_FETCH            _BITULL(0)
+#define X86_EMULFLAG_WRITE            _BITULL(1)
+
 struct x86_emulate_ops {
     void (*vm_bugged)(struct x86_emulate_ctxt *ctxt);
     /*

>>> +
>>>    /* x86-specific vcpu->requests bit members */
>>>    #define KVM_REQ_MIGRATE_TIMER        KVM_ARCH_REQ(0)
>>>    #define KVM_REQ_REPORT_TPR_ACCESS    KVM_ARCH_REQ(1)
>>> @@ -1706,6 +1709,8 @@ struct kvm_x86_ops {
>>>         * Returns vCPU specific APICv inhibit reasons
>>>         */
>>>        unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct
>>> kvm_vcpu *vcpu);
>>> +
>>> +    bool (*check_lass)(struct kvm_vcpu *vcpu, u64 access, u64 la,
>>> u64 flags);
>> The flags may be dropped if the caller knows to skip it or not.
> Probably I don't get you right. Do you mean it need define another
> function without flags ?
>
>>>    };
>>>       struct kvm_x86_nested_ops {
>>> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>>> index c923d7599d71..581327ede66a 100644
>>> --- a/arch/x86/kvm/vmx/vmx.c
>>> +++ b/arch/x86/kvm/vmx/vmx.c
>>> @@ -8070,6 +8070,59 @@ static void vmx_vm_destroy(struct kvm *kvm)
>>>        free_pages((unsigned long)kvm_vmx->pid_table,
>>> vmx_get_pid_table_order(kvm));
>>>    }
>>>    +/*
>>> + * Determine whether an access to the linear address causes a LASS
>>> violation.
>>> + * LASS protection is only effective in long mode. As a
>>> prerequisite, caller
>>> + * should make sure VM
>> Should be vCPU?
> Similar meaning, I think. :)
>>> running in long mode and invoke this api to do LASS
>>> + * violation check.
>>> + */
>>> +bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la,
>>> u64 flags)
>>> +{
>>> +    bool user_mode, user_as, rflags_ac;
>>> +
>>> +    if (!!(flags & KVM_X86_EMULFLAG_SKIP_LASS) ||
>>> +        !kvm_is_cr4_bit_set(vcpu, X86_CR4_LASS))
>>> +        return false;
>>> +
>>> +    WARN_ON_ONCE(!is_long_mode(vcpu));
>>> +
>>> +    user_as = !(la >> 63);
>>> +
>>> +    /*
>>> +     * An access is a supervisor-mode access if CPL < 3 or if it
>>> implicitly
>>> +     * accesses a system data structure. For implicit accesses to
>>> system
>>> +     * data structure, the processor acts as if RFLAGS.AC is clear.
>>> +     */
>>> +    if (access & PFERR_IMPLICIT_ACCESS) {
>>> +        user_mode = false;
>>> +        rflags_ac = false;
>>> +    } else {
>>> +        user_mode = vmx_get_cpl(vcpu) == 3;
>>> +        if (!user_mode)
>>> +            rflags_ac = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
>>> +    }
>>> +
>>> +    if (user_mode != user_as) {
>>> +        /*
>>> +         * Supervisor-mode _data_ accesses to user address space
>>> +         * cause LASS violations only if SMAP is enabled.
>>> +         */
>>> +        if (!user_mode && !(access & PFERR_FETCH_MASK)) {
>>> +            return kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP) &&
>>> +                   !rflags_ac;
>>> +        } else {
>>> +            return true;
>>> +        }
>>> +    }
>>> +
>>> +    return false;
>>> +}
>>> +
>>> +static bool vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64
>>> la, u64 flags)
>>> +{
>>> +    return is_long_mode(vcpu) && __vmx_check_lass(vcpu, access, la,
>>> flags);
>>> +}
>>> +
>>>    static struct kvm_x86_ops vmx_x86_ops __initdata = {
>>>        .name = "kvm_intel",
>>>    @@ -8207,6 +8260,8 @@ static struct kvm_x86_ops vmx_x86_ops
>>> __initdata = {
>>>        .complete_emulated_msr = kvm_complete_insn_gp,
>>>           .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
>>> +
>>> +    .check_lass = vmx_check_lass,
>>>    };
>>>       static unsigned int vmx_handle_intel_pt_intr(void)
>>> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
>>> index a3da84f4ea45..6569385a5978 100644
>>> --- a/arch/x86/kvm/vmx/vmx.h
>>> +++ b/arch/x86/kvm/vmx/vmx.h
>>> @@ -433,6 +433,8 @@ void vmx_enable_intercept_for_msr(struct
>>> kvm_vcpu *vcpu, u32 msr, int type);
>>>    u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
>>>    u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
>>>    +bool __vmx_check_lass(struct kvm_vcpu *vcpu, u64 access, u64 la,
>>> u64 flags);
>>> +
>>>    static inline void vmx_set_intercept_for_msr(struct kvm_vcpu
>>> *vcpu, u32 msr,
>>>                             int type, bool value)
>>>    {