2019-09-25 23:59:43

by Leonardo Bras

[permalink] [raw]
Subject: [PATCH 1/1] powerpc: kvm: Reduce calls to get current->mm by storing the value locally

Reduces the number of calls to get_current() in order to get the value of
current->mm by doing it once and storing the value, since it is not
supposed to change inside the same process).

Signed-off-by: Leonardo Bras <[email protected]>
---
Re-sending to all lists involved. (I missed kvm ones)

arch/powerpc/kvm/book3s_64_mmu_hv.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 9a75f0e1933b..f2b9aea43216 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -508,6 +508,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct vm_area_struct *vma;
unsigned long rcbits;
long mmio_update;
+ struct mm_struct *mm;

if (kvm_is_radix(kvm))
return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
@@ -584,6 +585,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
is_ci = false;
pfn = 0;
page = NULL;
+ mm = current->mm;
pte_size = PAGE_SIZE;
writing = (dsisr & DSISR_ISSTORE) != 0;
/* If writing != 0, then the HPTE must allow writing, if we get here */
@@ -592,8 +594,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
if (npages < 1) {
/* Check if it's an I/O mapping */
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, hva);
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, hva);
if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
(vma->vm_flags & VM_PFNMAP)) {
pfn = vma->vm_pgoff +
@@ -602,7 +604,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
write_ok = vma->vm_flags & VM_WRITE;
}
- up_read(&current->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
if (!pfn)
goto out_put;
} else {
@@ -621,8 +623,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
* hugepage split and collapse.
*/
local_irq_save(flags);
- ptep = find_current_mm_pte(current->mm->pgd,
- hva, NULL, NULL);
+ ptep = find_current_mm_pte(mm->pgd, hva, NULL, NULL);
if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1);
if (__pte_write(pte))
--
2.20.1


2019-09-26 00:38:52

by Leonardo Bras

[permalink] [raw]
Subject: Re: [PATCH 1/1] powerpc: kvm: Reduce calls to get current->mm by storing the value locally

I have done a very simple comparison with gdb disassemble:
By applying this patch, there was a reduction in the function size from
882 to 878 instructions.

(It's a resend, due to not having all the correct lists on my previous
mail)

On Mon, 2019-09-23 at 18:30 -0300, Leonardo Bras wrote:
> Reduces the number of calls to get_current() in order to get the value of
> current->mm by doing it once and storing the value, since it is not
> supposed to change inside the same process).
>
> Signed-off-by: Leonardo Bras <[email protected]>
> ---
> Re-sending to all lists involved. (I missed kvm ones)
>
> arch/powerpc/kvm/book3s_64_mmu_hv.c | 11 ++++++-----
> 1 file changed, 6 insertions(+), 5 deletions(-)
>
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
> index 9a75f0e1933b..f2b9aea43216 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
> @@ -508,6 +508,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
> struct vm_area_struct *vma;
> unsigned long rcbits;
> long mmio_update;
> + struct mm_struct *mm;
>
> if (kvm_is_radix(kvm))
> return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
> @@ -584,6 +585,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
> is_ci = false;
> pfn = 0;
> page = NULL;
> + mm = current->mm;
> pte_size = PAGE_SIZE;
> writing = (dsisr & DSISR_ISSTORE) != 0;
> /* If writing != 0, then the HPTE must allow writing, if we get here */
> @@ -592,8 +594,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
> npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
> if (npages < 1) {
> /* Check if it's an I/O mapping */
> - down_read(&current->mm->mmap_sem);
> - vma = find_vma(current->mm, hva);
> + down_read(&mm->mmap_sem);
> + vma = find_vma(mm, hva);
> if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
> (vma->vm_flags & VM_PFNMAP)) {
> pfn = vma->vm_pgoff +
> @@ -602,7 +604,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
> is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
> write_ok = vma->vm_flags & VM_WRITE;
> }
> - up_read(&current->mm->mmap_sem);
> + up_read(&mm->mmap_sem);
> if (!pfn)
> goto out_put;
> } else {
> @@ -621,8 +623,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
> * hugepage split and collapse.
> */
> local_irq_save(flags);
> - ptep = find_current_mm_pte(current->mm->pgd,
> - hva, NULL, NULL);
> + ptep = find_current_mm_pte(mm->pgd, hva, NULL, NULL);
> if (ptep) {
> pte = kvmppc_read_update_linux_pte(ptep, 1);
> if (__pte_write(pte))


Attachments:
signature.asc (849.00 B)
This is a digitally signed message part