As the underlying software walkers are able to traverse and update
stage-2 in parallel there is no need to serialize access faults.
Only take the read lock when handling an access fault.
Signed-off-by: Oliver Upton <[email protected]>
---
arch/arm64/kvm/hyp/pgtable.c | 2 +-
arch/arm64/kvm/mmu.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 9626f615d9b8..1a3dd9774707 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -1097,7 +1097,7 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
int ret;
ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
- &pte, NULL, 0);
+ &pte, NULL, KVM_PGTABLE_WALK_SHARED);
if (!ret)
dsb(ishst);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 886ad5ee767a..347985a56414 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1404,10 +1404,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
trace_kvm_access_fault(fault_ipa);
- write_lock(&vcpu->kvm->mmu_lock);
+ read_lock(&vcpu->kvm->mmu_lock);
mmu = vcpu->arch.hw_mmu;
pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
- write_unlock(&vcpu->kvm->mmu_lock);
+ read_unlock(&vcpu->kvm->mmu_lock);
if (kvm_pte_valid(pte))
kvm_set_pfn_accessed(kvm_pte_to_pfn(pte));
--
2.38.1.584.g0f3c55d4c2-goog
On Tue, Nov 29, 2022 at 07:19:45PM +0000, Oliver Upton wrote:
> As the underlying software walkers are able to traverse and update
> stage-2 in parallel there is no need to serialize access faults.
>
> Only take the read lock when handling an access fault.
>
> Signed-off-by: Oliver Upton <[email protected]>
> ---
> arch/arm64/kvm/hyp/pgtable.c | 2 +-
> arch/arm64/kvm/mmu.c | 4 ++--
> 2 files changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
> index 9626f615d9b8..1a3dd9774707 100644
> --- a/arch/arm64/kvm/hyp/pgtable.c
> +++ b/arch/arm64/kvm/hyp/pgtable.c
> @@ -1097,7 +1097,7 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
> int ret;
>
> ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
> - &pte, NULL, 0);
> + &pte, NULL, KVM_PGTABLE_WALK_SHARED);
> if (!ret)
> dsb(ishst);
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 886ad5ee767a..347985a56414 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1404,10 +1404,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
>
> trace_kvm_access_fault(fault_ipa);
>
> - write_lock(&vcpu->kvm->mmu_lock);
> + read_lock(&vcpu->kvm->mmu_lock);
> mmu = vcpu->arch.hw_mmu;
> pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
> - write_unlock(&vcpu->kvm->mmu_lock);
> + read_unlock(&vcpu->kvm->mmu_lock);
>
> if (kvm_pte_valid(pte))
> kvm_set_pfn_accessed(kvm_pte_to_pfn(pte));
> --
> 2.38.1.584.g0f3c55d4c2-goog
>
> _______________________________________________
> kvmarm mailing list
> [email protected]
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Reviewed-by: Ricardo Koller <[email protected]>