2015-05-07 08:23:58

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH 0/3] KVM: MMU: fix SMAP virtualization

There are some bugs in current code:
- the PFEC.RSVD is not always true and KVM reuses this bit to cache
CPL and flags.AC bit

- the shadow page will be resued for SMAP enabled and SMAP disabled,
however, user page will be turned to kernel page under some cases
so that SMAP check is missed on that entry

This patchset fixes these bugs and a test case will be posted out soon

Xiao Guangrong (3):
KVM: MMU: fix smap permission check
KVM: MMU: fix SMAP virtualization
KVM: MMU: document smap_andnot_wp

Documentation/virtual/kvm/mmu.txt | 18 ++++++++++++++----
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/mmu.c | 7 +++++--
arch/x86/kvm/mmu.h | 4 ++--
arch/x86/kvm/paging_tmpl.h | 7 +++++++
arch/x86/kvm/x86.c | 8 +++-----
6 files changed, 32 insertions(+), 13 deletions(-)

--
2.1.0


2015-05-07 08:25:01

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH 1/3] KVM: MMU: fix smap permission check

Current permission check assumes that RSVD bit in PFEC is always zero,
however, it is not true since MMIO #PF will use it to quickly identify
MMIO access

Fix it by clearing the bit if walking guest page table is needed

Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/kvm/mmu.h | 2 ++
arch/x86/kvm/paging_tmpl.h | 7 +++++++
2 files changed, 9 insertions(+)

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index c7d6563..06eb2fc 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -166,6 +166,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
int index = (pfec >> 1) +
(smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));

+ WARN_ON(pfec & PFERR_RSVD_MASK);
+
return (mmu->permissions[index] >> pte_access) & 1;
}

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index fd49c86..6e6d115 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
mmu_is_nested(vcpu));
if (likely(r != RET_MMIO_PF_INVALID))
return r;
+
+ /*
+ * page fault with PFEC.RSVD = 1 is caused by shadow
+ * page fault, should not be used to walk guest page
+ * table.
+ */
+ error_code &= ~PFERR_RSVD_MASK;
};

r = mmu_topup_memory_caches(vcpu);
--
2.1.0

2015-05-07 08:24:05

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH 2/3] KVM: MMU: fix SMAP virtualization

KVM may turn a user page to a kernel page when kernel writes a readonly
user page if CR0.WP = 1. This shadow page entry will be reused after
SMAP is enabled so that kernel is allowed to access this user page

Fix it by setting SMAP && !CR0.WP into shadow page's role and reset mmu
once CR4.SMAP is updated

Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/mmu.c | 7 +++++--
arch/x86/kvm/mmu.h | 2 --
arch/x86/kvm/x86.c | 8 +++-----
4 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8b661d1..bbb8f4e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -207,6 +207,7 @@ union kvm_mmu_page_role {
unsigned nxe:1;
unsigned cr0_wp:1;
unsigned smep_andnot_wp:1;
+ unsigned smap_andnot_wp:1;
};
};

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e73bbda..545d9e4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3737,8 +3737,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
}
}

-void update_permission_bitmask(struct kvm_vcpu *vcpu,
- struct kvm_mmu *mmu, bool ept)
+static void update_permission_bitmask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu, bool ept)
{
unsigned bit, byte, pfec;
u8 map;
@@ -3919,6 +3919,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
+ bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
struct kvm_mmu *context = &vcpu->arch.mmu;

MMU_WARN_ON(VALID_PAGE(context->root_hpa));
@@ -3937,6 +3938,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
context->base_role.cr0_wp = is_write_protection(vcpu);
context->base_role.smep_andnot_wp
= smep && !is_write_protection(vcpu);
+ context->base_role.smap_andnot_wp
+ = smap && !is_write_protection(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 06eb2fc..0ada65e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -71,8 +71,6 @@ enum {
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
-void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- bool ept);

static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3dc6c97..c39d34a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
- unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
- X86_CR4_PAE | X86_CR4_SMEP;
+ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+ X86_CR4_SMEP | X86_CR4_SMAP;
+
if (cr4 & CR4_RESERVED_BITS)
return 1;

@@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);

- if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
- update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
-
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu);

--
2.1.0

2015-05-07 08:24:08

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH 3/3] KVM: MMU: document smap_andnot_wp

Document this new role field

Signed-off-by: Xiao Guangrong <[email protected]>
---
Documentation/virtual/kvm/mmu.txt | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index 53838d9..c59bd9b 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -169,6 +169,10 @@ Shadow pages contain the following information:
Contains the value of cr4.smep && !cr0.wp for which the page is valid
(pages for which this is true are different from other pages; see the
treatment of cr0.wp=0 below).
+ role.smap_andnot_wp:
+ Contains the value of cr4.smap && !cr0.wp for which the page is valid
+ (pages for which this is true are different from other pages; see the
+ treatment of cr0.wp=0 below).
gfn:
Either the guest page table containing the translations shadowed by this
page, or the base page frame for linear translations. See role.direct.
@@ -344,10 +348,16 @@ on fault type:

(user write faults generate a #PF)

-In the first case there is an additional complication if CR4.SMEP is
-enabled: since we've turned the page into a kernel page, the kernel may now
-execute it. We handle this by also setting spte.nx. If we get a user
-fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back.
+In the first case there are two additional complications:
+- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
+ the kernel may now execute it. We handle this by also setting spte.nx.
+ If we get a user fetch or read fault, we'll change spte.u=1 and
+ spte.nx=gpte.nx back.
+- if CR4.SMAP is disabled: since the page has been changed to a kernel
+ page, it can not be reused when CR4.SMAP is enabled. We set
+ CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
+ here we do not care the case that CR4.SMAP is enabled since KVM will
+ directly inject #PF to guest due to failed permission check.

To prevent an spte that was converted into a kernel page with cr0.wp=0
from being written by the kernel after cr0.wp has changed to 1, we make
--
2.1.0

2015-05-07 09:32:21

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH 1/3] KVM: MMU: fix smap permission check



On 07/05/2015 10:20, Xiao Guangrong wrote:
> Current permission check assumes that RSVD bit in PFEC is always zero,
> however, it is not true since MMIO #PF will use it to quickly identify
> MMIO access
>
> Fix it by clearing the bit if walking guest page table is needed
>
> Signed-off-by: Xiao Guangrong <[email protected]>
> ---
> arch/x86/kvm/mmu.h | 2 ++
> arch/x86/kvm/paging_tmpl.h | 7 +++++++
> 2 files changed, 9 insertions(+)
>
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index c7d6563..06eb2fc 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -166,6 +166,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> int index = (pfec >> 1) +
> (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
>
> + WARN_ON(pfec & PFERR_RSVD_MASK);
> +
> return (mmu->permissions[index] >> pte_access) & 1;
> }
>
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index fd49c86..6e6d115 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
> mmu_is_nested(vcpu));
> if (likely(r != RET_MMIO_PF_INVALID))
> return r;
> +
> + /*
> + * page fault with PFEC.RSVD = 1 is caused by shadow
> + * page fault, should not be used to walk guest page
> + * table.
> + */
> + error_code &= ~PFERR_RSVD_MASK;
> };
>
> r = mmu_topup_memory_caches(vcpu);
>

Applied.

For the other patches I'm waiting for an answer re. kvm_mmu_pte_write.

Thanks,

Paolo

2015-05-07 09:54:02

by Xiao Guangrong

[permalink] [raw]
Subject: Re: [PATCH 1/3] KVM: MMU: fix smap permission check



On 05/07/2015 05:32 PM, Paolo Bonzini wrote:
>
>
> On 07/05/2015 10:20, Xiao Guangrong wrote:
>> Current permission check assumes that RSVD bit in PFEC is always zero,
>> however, it is not true since MMIO #PF will use it to quickly identify
>> MMIO access
>>
>> Fix it by clearing the bit if walking guest page table is needed
>>
>> Signed-off-by: Xiao Guangrong <[email protected]>
>> ---
>> arch/x86/kvm/mmu.h | 2 ++
>> arch/x86/kvm/paging_tmpl.h | 7 +++++++
>> 2 files changed, 9 insertions(+)
>>
>> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
>> index c7d6563..06eb2fc 100644
>> --- a/arch/x86/kvm/mmu.h
>> +++ b/arch/x86/kvm/mmu.h
>> @@ -166,6 +166,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
>> int index = (pfec >> 1) +
>> (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
>>
>> + WARN_ON(pfec & PFERR_RSVD_MASK);
>> +
>> return (mmu->permissions[index] >> pte_access) & 1;
>> }
>>
>> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
>> index fd49c86..6e6d115 100644
>> --- a/arch/x86/kvm/paging_tmpl.h
>> +++ b/arch/x86/kvm/paging_tmpl.h
>> @@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
>> mmu_is_nested(vcpu));
>> if (likely(r != RET_MMIO_PF_INVALID))
>> return r;
>> +
>> + /*
>> + * page fault with PFEC.RSVD = 1 is caused by shadow
>> + * page fault, should not be used to walk guest page
>> + * table.
>> + */
>> + error_code &= ~PFERR_RSVD_MASK;
>> };
>>
>> r = mmu_topup_memory_caches(vcpu);
>>
>
> Applied.
>
> For the other patches I'm waiting for an answer re. kvm_mmu_pte_write.

Sure. Actually, i noticed these bugs when i was reviewing your patches,
will continue to review soon. :)

2015-05-11 13:20:47

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH v2 2/3] KVM: MMU: fix SMAP virtualization


From: Xiao Guangrong <[email protected]>
Date: Mon, 11 May 2015 21:09:15 +0800
Subject: [PATCH] KVM: MMU: fix SMAP virtualization

KVM may turn a user page to a kernel page when kernel writes a readonly
user page if CR0.WP = 1. This shadow page entry will be reused after
SMAP is enabled so that kernel is allowed to access this user page

Fix it by setting SMAP && !CR0.WP into shadow page's role and reset mmu
once CR4.SMAP is updated

Changelog in v2:
- rebase the patch against the commit 31fd9880a1c5 (Paolo Bonzini:
KVM: MMU: fix CR4.SMEP=1, CR0.WP=0 with shadow pages)
- do not prefetch the spte if it does not match smap_andnot_wp with
current vcpu

Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/mmu.c | 16 ++++++++++++----
arch/x86/kvm/mmu.h | 2 --
arch/x86/kvm/x86.c | 8 +++-----
4 files changed, 16 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8b661d1..bbb8f4e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -207,6 +207,7 @@ union kvm_mmu_page_role {
unsigned nxe:1;
unsigned cr0_wp:1;
unsigned smep_andnot_wp:1;
+ unsigned smap_andnot_wp:1;
};
};

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3711095..4058a6b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3737,8 +3737,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
}
}

-void update_permission_bitmask(struct kvm_vcpu *vcpu,
- struct kvm_mmu *mmu, bool ept)
+static void update_permission_bitmask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu, bool ept)
{
unsigned bit, byte, pfec;
u8 map;
@@ -3919,6 +3919,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
+ bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
struct kvm_mmu *context = &vcpu->arch.mmu;

MMU_WARN_ON(VALID_PAGE(context->root_hpa));
@@ -3937,6 +3938,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
context->base_role.cr0_wp = is_write_protection(vcpu);
context->base_role.smep_andnot_wp
= smep && !is_write_protection(vcpu);
+ context->base_role.smap_andnot_wp
+ = smap && !is_write_protection(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

@@ -4208,12 +4211,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
- union kvm_mmu_page_role mask = { .word = 0 };
struct kvm_mmu_page *sp;
LIST_HEAD(invalid_list);
u64 entry, gentry, *spte;
int npte;
bool remote_flush, local_flush, zap_page;
+ union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
+ .cr0_wp = 1,
+ .cr4_pae = 1,
+ .nxe = 1,
+ .smep_andnot_wp = 1,
+ .smap_andnot_wp = 1,
+ };

/*
* If we don't have indirect shadow pages, it means no page is
@@ -4239,7 +4248,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);

- mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (detect_write_misaligned(sp, gpa, bytes) ||
detect_write_flooding(sp)) {
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 06eb2fc..0ada65e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -71,8 +71,6 @@ enum {
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
-void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- bool ept);

static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cdccbe1..cde5d61 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
- unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
- X86_CR4_PAE | X86_CR4_SMEP;
+ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+ X86_CR4_SMEP | X86_CR4_SMAP;
+
if (cr4 & CR4_RESERVED_BITS)
return 1;

@@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);

- if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
- update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
-
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu);

--
2.1.0

2015-05-11 13:41:34

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH v2 2/3] KVM: MMU: fix SMAP virtualization

On 11/05/2015 15:17, Xiao Guangrong wrote:
>
> From: Xiao Guangrong <[email protected]>
> Date: Mon, 11 May 2015 21:09:15 +0800
> Subject: [PATCH] KVM: MMU: fix SMAP virtualization
>
> KVM may turn a user page to a kernel page when kernel writes a readonly
> user page if CR0.WP = 1. This shadow page entry will be reused after
> SMAP is enabled so that kernel is allowed to access this user page
>
> Fix it by setting SMAP && !CR0.WP into shadow page's role and reset mmu
> once CR4.SMAP is updated
>
> Changelog in v2:
> - rebase the patch against the commit 31fd9880a1c5 (Paolo Bonzini:
> KVM: MMU: fix CR4.SMEP=1, CR0.WP=0 with shadow pages)
> - do not prefetch the spte if it does not match smap_andnot_wp with
> current vcpu
>
> Signed-off-by: Xiao Guangrong <[email protected]>

Sorry, whitespace is mangled in the patch.

Paolo

> ---
> arch/x86/include/asm/kvm_host.h | 1 +
> arch/x86/kvm/mmu.c | 16 ++++++++++++----
> arch/x86/kvm/mmu.h | 2 --
> arch/x86/kvm/x86.c | 8 +++-----
> 4 files changed, 16 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h
> b/arch/x86/include/asm/kvm_host.h
> index 8b661d1..bbb8f4e 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -207,6 +207,7 @@ union kvm_mmu_page_role {
> unsigned nxe:1;
> unsigned cr0_wp:1;
> unsigned smep_andnot_wp:1;
> + unsigned smap_andnot_wp:1;
> };
> };
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 3711095..4058a6b 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3737,8 +3737,8 @@ static void reset_rsvds_bits_mask_ept(struct
> kvm_vcpu *vcpu,
> }
> }
>
> -void update_permission_bitmask(struct kvm_vcpu *vcpu,
> - struct kvm_mmu *mmu, bool ept)
> +static void update_permission_bitmask(struct kvm_vcpu *vcpu,
> + struct kvm_mmu *mmu, bool ept)
> {
> unsigned bit, byte, pfec;
> u8 map;
> @@ -3919,6 +3919,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
> void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
> {
> bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
> + bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
> struct kvm_mmu *context = &vcpu->arch.mmu;
>
> MMU_WARN_ON(VALID_PAGE(context->root_hpa));
> @@ -3937,6 +3938,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
> context->base_role.cr0_wp = is_write_protection(vcpu);
> context->base_role.smep_andnot_wp
> = smep && !is_write_protection(vcpu);
> + context->base_role.smap_andnot_wp
> + = smap && !is_write_protection(vcpu);
> }
> EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
>
> @@ -4208,12 +4211,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
> gpa_t gpa,
> const u8 *new, int bytes)
> {
> gfn_t gfn = gpa >> PAGE_SHIFT;
> - union kvm_mmu_page_role mask = { .word = 0 };
> struct kvm_mmu_page *sp;
> LIST_HEAD(invalid_list);
> u64 entry, gentry, *spte;
> int npte;
> bool remote_flush, local_flush, zap_page;
> + union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
> + .cr0_wp = 1,
> + .cr4_pae = 1,
> + .nxe = 1,
> + .smep_andnot_wp = 1,
> + .smap_andnot_wp = 1,
> + };
>
> /*
> * If we don't have indirect shadow pages, it means no page is
> @@ -4239,7 +4248,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu,
> gpa_t gpa,
> ++vcpu->kvm->stat.mmu_pte_write;
> kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
>
> - mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
> for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
> if (detect_write_misaligned(sp, gpa, bytes) ||
> detect_write_flooding(sp)) {
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 06eb2fc..0ada65e 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -71,8 +71,6 @@ enum {
> int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool
> direct);
> void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
> void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
> -void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> - bool ept);
>
> static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
> {
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index cdccbe1..cde5d61 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
> int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
> {
> unsigned long old_cr4 = kvm_read_cr4(vcpu);
> - unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
> - X86_CR4_PAE | X86_CR4_SMEP;
> + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
> + X86_CR4_SMEP | X86_CR4_SMAP;
> +
> if (cr4 & CR4_RESERVED_BITS)
> return 1;
>
> @@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long
> cr4)
> (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
> kvm_mmu_reset_context(vcpu);
>
> - if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
> - update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
> -
> if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
> kvm_update_cpuid(vcpu);
>