2010-07-27 03:35:43

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH 1/3] KVM: MMU: using kvm_set_pfn_accessed() instead of mark_page_accessed()

It's a small cleanup that using using kvm_set_pfn_accessed() instead
of mark_page_accessed()

Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/kvm/mmu.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bd82635..e10f2bd 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -313,7 +313,7 @@ static void update_spte(u64 *sptep, u64 new_spte)
else {
old_spte = __xchg_spte(sptep, new_spte);
if (old_spte & shadow_accessed_mask)
- mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
+ kvm_set_pfn_accessed(spte_to_pfn(old_spte));
}
}

--
1.6.1.2


2010-07-27 03:37:13

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH 2/3] KVM: MMU: move bits lost judgement into a separate function

Introduce spte_bits_lost() function to judge whether spte bits will
miss, it's more readable and can help us to cleanup code later

Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/kvm/mmu.c | 20 +++++++++++++++++---
1 files changed, 17 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e10f2bd..dd6c192 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -303,6 +303,20 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte)
#endif
}

+static bool spte_bits_lost(u64 spte)
+{
+ if (!shadow_accessed_mask)
+ return false;
+
+ if (!is_shadow_present_pte(spte))
+ return false;
+
+ if (spte & shadow_accessed_mask)
+ return false;
+
+ return true;
+}
+
static void update_spte(u64 *sptep, u64 new_spte)
{
u64 old_spte;
@@ -683,14 +697,14 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
pfn_t pfn;
u64 old_spte = *sptep;

- if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) ||
- old_spte & shadow_accessed_mask) {
+ if (!spte_bits_lost(old_spte))
__set_spte(sptep, new_spte);
- } else
+ else
old_spte = __xchg_spte(sptep, new_spte);

if (!is_rmap_spte(old_spte))
return;
+
pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
--
1.6.1.2

2010-07-27 03:39:29

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH 3/3] KVM: MMU: mark page dirty only when page is really written

Mark page dirty only when this page is really written, it's more exacter,
and also can fix dirty page marking in speculation path

Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/kvm/mmu.c | 47 ++++++++++++++++++++++++++++-------------------
1 files changed, 28 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index dd6c192..bcc2173 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -311,24 +311,42 @@ static bool spte_bits_lost(u64 spte)
if (!is_shadow_present_pte(spte))
return false;

- if (spte & shadow_accessed_mask)
+ if (spte & shadow_accessed_mask &&
+ (!is_writable_pte(spte) || spte & shadow_dirty_mask))
return false;

return true;
}

+static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
+{
+ return old_spte & bit_mask && !(new_spte & bit_mask);
+}
+
static void update_spte(u64 *sptep, u64 new_spte)
{
- u64 old_spte;
+ u64 mask, old_spte = *sptep;
+
+ WARN_ON(!is_rmap_spte(new_spte));

- if (!shadow_accessed_mask || (new_spte & shadow_accessed_mask) ||
- !is_rmap_spte(*sptep))
+ new_spte |= old_spte & shadow_dirty_mask;
+
+ mask = shadow_accessed_mask;
+ if (is_writable_pte(old_spte))
+ mask |= shadow_dirty_mask;
+
+ if (!spte_bits_lost(old_spte) || (new_spte & mask) == mask)
__set_spte(sptep, new_spte);
- else {
+ else
old_spte = __xchg_spte(sptep, new_spte);
- if (old_spte & shadow_accessed_mask)
- kvm_set_pfn_accessed(spte_to_pfn(old_spte));
- }
+
+ if (!shadow_accessed_mask)
+ return;
+
+ if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
+ kvm_set_pfn_accessed(spte_to_pfn(old_spte));
+ if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
+ kvm_set_pfn_dirty(spte_to_pfn(old_spte));
}

static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -708,7 +726,7 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
- if (is_writable_pte(old_spte))
+ if (!shadow_dirty_mask || old_spte & shadow_dirty_mask)
kvm_set_pfn_dirty(pfn);
}

@@ -763,13 +781,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
}
spte = rmap_next(kvm, rmapp, spte);
}
- if (write_protected) {
- pfn_t pfn;
-
- spte = rmap_next(kvm, rmapp, NULL);
- pfn = spte_to_pfn(*spte);
- kvm_set_pfn_dirty(pfn);
- }

/* check for huge page mappings */
for (i = PT_DIRECTORY_LEVEL;
@@ -1942,7 +1953,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
* whether the guest actually used the pte (in order to detect
* demand paging).
*/
- spte = shadow_base_present_pte | shadow_dirty_mask;
+ spte = shadow_base_present_pte;
if (!speculative)
spte |= shadow_accessed_mask;
if (!dirty)
@@ -2003,8 +2014,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
mark_page_dirty(vcpu->kvm, gfn);

set_pte:
- if (is_writable_pte(*sptep) && !is_writable_pte(spte))
- kvm_set_pfn_dirty(pfn);
update_spte(sptep, spte);
done:
return ret;
--
1.6.1.2

2010-08-02 07:41:15

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH 2/3] KVM: MMU: move bits lost judgement into a separate function

On 07/27/2010 06:33 AM, Xiao Guangrong wrote:
> Introduce spte_bits_lost() function to judge whether spte bits will
> miss, it's more readable and can help us to cleanup code later
>
> Signed-off-by: Xiao Guangrong<[email protected]>
> ---
> arch/x86/kvm/mmu.c | 20 +++++++++++++++++---
> 1 files changed, 17 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index e10f2bd..dd6c192 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -303,6 +303,20 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte)
> #endif
> }
>
> +static bool spte_bits_lost(u64 spte)
> +{
> + if (!shadow_accessed_mask)
> + return false;
> +
> + if (!is_shadow_present_pte(spte))
> + return false;
> +
> + if (spte& shadow_accessed_mask)
> + return false;
> +
> + return true;
> +}

IMO spte_has_volatile_bits() is a clearer name, "lost" implies they are
already gone.


--
error compiling committee.c: too many arguments to function

2010-08-02 07:51:08

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH 3/3] KVM: MMU: mark page dirty only when page is really written

On 07/27/2010 06:35 AM, Xiao Guangrong wrote:
> Mark page dirty only when this page is really written, it's more exacter,
> and also can fix dirty page marking in speculation path
>
> Signed-off-by: Xiao Guangrong<[email protected]>
> ---
> arch/x86/kvm/mmu.c | 47 ++++++++++++++++++++++++++++-------------------
> 1 files changed, 28 insertions(+), 19 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index dd6c192..bcc2173 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -311,24 +311,42 @@ static bool spte_bits_lost(u64 spte)
> if (!is_shadow_present_pte(spte))
> return false;
>
> - if (spte& shadow_accessed_mask)
> + if (spte& shadow_accessed_mask&&
> + (!is_writable_pte(spte) || spte& shadow_dirty_mask))
> return false;
>

Please add parentheses around bitwise operators to reduce confusion.
The rest looks good.

--
error compiling committee.c: too many arguments to function

2010-08-02 07:55:19

by Xiao Guangrong

[permalink] [raw]
Subject: Re: [PATCH 2/3] KVM: MMU: move bits lost judgement into a separate function



Avi Kivity wrote:
> On 07/27/2010 06:33 AM, Xiao Guangrong wrote:
>> Introduce spte_bits_lost() function to judge whether spte bits will
>> miss, it's more readable and can help us to cleanup code later
>>
>> Signed-off-by: Xiao Guangrong<[email protected]>
>> ---
>> arch/x86/kvm/mmu.c | 20 +++++++++++++++++---
>> 1 files changed, 17 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index e10f2bd..dd6c192 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -303,6 +303,20 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte)
>> #endif
>> }
>>
>> +static bool spte_bits_lost(u64 spte)
>> +{
>> + if (!shadow_accessed_mask)
>> + return false;
>> +
>> + if (!is_shadow_present_pte(spte))
>> + return false;
>> +
>> + if (spte& shadow_accessed_mask)
>> + return false;
>> +
>> + return true;
>> +}
>
> IMO spte_has_volatile_bits() is a clearer name, "lost" implies they are
> already gone.

Yeah, it's the better name, will fix soon.

2010-08-02 07:55:59

by Xiao Guangrong

[permalink] [raw]
Subject: Re: [PATCH 3/3] KVM: MMU: mark page dirty only when page is really written



Avi Kivity wrote:
> On 07/27/2010 06:35 AM, Xiao Guangrong wrote:
>> Mark page dirty only when this page is really written, it's more exacter,
>> and also can fix dirty page marking in speculation path
>>
>> Signed-off-by: Xiao Guangrong<[email protected]>
>> ---
>> arch/x86/kvm/mmu.c | 47
>> ++++++++++++++++++++++++++++-------------------
>> 1 files changed, 28 insertions(+), 19 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index dd6c192..bcc2173 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -311,24 +311,42 @@ static bool spte_bits_lost(u64 spte)
>> if (!is_shadow_present_pte(spte))
>> return false;
>>
>> - if (spte& shadow_accessed_mask)
>> + if (spte& shadow_accessed_mask&&
>> + (!is_writable_pte(spte) || spte& shadow_dirty_mask))
>> return false;
>>
>
> Please add parentheses around bitwise operators to reduce confusion.
> The rest looks good.
>

OK, will do it in the new version, thanks.

2010-08-02 08:16:24

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH v2 1/3] KVM: MMU: using kvm_set_pfn_accessed() instead of mark_page_accessed()

It's a small cleanup that using using kvm_set_pfn_accessed() instead
of mark_page_accessed()

Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/kvm/mmu.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 56da796..f7b379a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -313,7 +313,7 @@ static void update_spte(u64 *sptep, u64 new_spte)
else {
old_spte = __xchg_spte(sptep, new_spte);
if (old_spte & shadow_accessed_mask)
- mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
+ kvm_set_pfn_accessed(spte_to_pfn(old_spte));
}
}

--
1.6.1.2

2010-08-02 08:18:19

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH v2 2/3] KVM: MMU: move bits lost judgement into a separate function

Introduce spte_has_volatile_bits() function to judge whether spte
bits will miss, it's more readable and can help us to cleanup code
later

Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/kvm/mmu.c | 20 +++++++++++++++++---
1 files changed, 17 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f7b379a..e18834c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -303,6 +303,20 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte)
#endif
}

+static bool spte_has_volatile_bits(u64 spte)
+{
+ if (!shadow_accessed_mask)
+ return false;
+
+ if (!is_shadow_present_pte(spte))
+ return false;
+
+ if (spte & shadow_accessed_mask)
+ return false;
+
+ return true;
+}
+
static void update_spte(u64 *sptep, u64 new_spte)
{
u64 old_spte;
@@ -683,14 +697,14 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
pfn_t pfn;
u64 old_spte = *sptep;

- if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) ||
- old_spte & shadow_accessed_mask) {
+ if (!spte_has_volatile_bits(old_spte))
__set_spte(sptep, new_spte);
- } else
+ else
old_spte = __xchg_spte(sptep, new_spte);

if (!is_rmap_spte(old_spte))
return;
+
pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
--
1.6.1.2

2010-08-02 08:19:24

by Xiao Guangrong

[permalink] [raw]
Subject: [PATCH v2 3/3] KVM: MMU: mark page dirty only when page is really written

Mark page dirty only when this page is really written, it's more exacter,
and also can fix dirty page marking in speculation path

Signed-off-by: Xiao Guangrong <[email protected]>
---
arch/x86/kvm/mmu.c | 47 ++++++++++++++++++++++++++++-------------------
1 files changed, 28 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e18834c..9c69725 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -311,24 +311,42 @@ static bool spte_has_volatile_bits(u64 spte)
if (!is_shadow_present_pte(spte))
return false;

- if (spte & shadow_accessed_mask)
+ if ((spte & shadow_accessed_mask) &&
+ (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
return false;

return true;
}

+static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
+{
+ return (old_spte & bit_mask) && !(new_spte & bit_mask);
+}
+
static void update_spte(u64 *sptep, u64 new_spte)
{
- u64 old_spte;
+ u64 mask, old_spte = *sptep;
+
+ WARN_ON(!is_rmap_spte(new_spte));

- if (!shadow_accessed_mask || (new_spte & shadow_accessed_mask) ||
- !is_rmap_spte(*sptep))
+ new_spte |= old_spte & shadow_dirty_mask;
+
+ mask = shadow_accessed_mask;
+ if (is_writable_pte(old_spte))
+ mask |= shadow_dirty_mask;
+
+ if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
__set_spte(sptep, new_spte);
- else {
+ else
old_spte = __xchg_spte(sptep, new_spte);
- if (old_spte & shadow_accessed_mask)
- kvm_set_pfn_accessed(spte_to_pfn(old_spte));
- }
+
+ if (!shadow_accessed_mask)
+ return;
+
+ if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
+ kvm_set_pfn_accessed(spte_to_pfn(old_spte));
+ if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
+ kvm_set_pfn_dirty(spte_to_pfn(old_spte));
}

static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -708,7 +726,7 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
- if (is_writable_pte(old_spte))
+ if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
kvm_set_pfn_dirty(pfn);
}

@@ -763,13 +781,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
}
spte = rmap_next(kvm, rmapp, spte);
}
- if (write_protected) {
- pfn_t pfn;
-
- spte = rmap_next(kvm, rmapp, NULL);
- pfn = spte_to_pfn(*spte);
- kvm_set_pfn_dirty(pfn);
- }

/* check for huge page mappings */
for (i = PT_DIRECTORY_LEVEL;
@@ -1942,7 +1953,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
* whether the guest actually used the pte (in order to detect
* demand paging).
*/
- spte = shadow_base_present_pte | shadow_dirty_mask;
+ spte = shadow_base_present_pte;
if (!speculative)
spte |= shadow_accessed_mask;
if (!dirty)
@@ -2003,8 +2014,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
mark_page_dirty(vcpu->kvm, gfn);

set_pte:
- if (is_writable_pte(*sptep) && !is_writable_pte(spte))
- kvm_set_pfn_dirty(pfn);
update_spte(sptep, spte);
done:
return ret;
--
1.6.1.2

2010-08-02 09:53:50

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH v2 1/3] KVM: MMU: using kvm_set_pfn_accessed() instead of mark_page_accessed()

On 08/02/2010 11:12 AM, Xiao Guangrong wrote:
> It's a small cleanup that using using kvm_set_pfn_accessed() instead
> of mark_page_accessed()

Applied all of v2, thanks.

--
error compiling committee.c: too many arguments to function