Instead of convert adjust_next between virtual address and page frame
number, let's just store the virtual address into adjust_next.
Also, this patch fixes one typo in the comment of
vma_adjust_trans_huge().
Signed-off-by: Wei Yang <[email protected]>
---
mm/huge_memory.c | 4 ++--
mm/mmap.c | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 78c84bee7e29..2c633ba14440 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2300,13 +2300,13 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
/*
* If we're also updating the vma->vm_next->vm_start, if the new
- * vm_next->vm_start isn't page aligned and it could previously
+ * vm_next->vm_start isn't hpage aligned and it could previously
* contain an hugepage: check if we need to split an huge pmd.
*/
if (adjust_next > 0) {
struct vm_area_struct *next = vma->vm_next;
unsigned long nstart = next->vm_start;
- nstart += adjust_next << PAGE_SHIFT;
+ nstart += adjust_next;
if (nstart & ~HPAGE_PMD_MASK &&
(nstart & HPAGE_PMD_MASK) >= next->vm_start &&
(nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
diff --git a/mm/mmap.c b/mm/mmap.c
index 90b1298d4222..e4c9bbfd4103 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -758,7 +758,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
* vma expands, overlapping part of the next:
* mprotect case 5 shifting the boundary up.
*/
- adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
+ adjust_next = (end - next->vm_start);
exporter = next;
importer = vma;
VM_WARN_ON(expand != importer);
@@ -768,7 +768,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
* split_vma inserting another: so it must be
* mprotect case 4 shifting the boundary down.
*/
- adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
+ adjust_next = -(vma->vm_end - end);
exporter = vma;
importer = next;
VM_WARN_ON(expand != importer);
@@ -840,8 +840,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
}
vma->vm_pgoff = pgoff;
if (adjust_next) {
- next->vm_start += adjust_next << PAGE_SHIFT;
- next->vm_pgoff += adjust_next;
+ next->vm_start += adjust_next;
+ next->vm_pgoff += adjust_next >> PAGE_SHIFT;
}
if (root) {
--
2.20.1 (Apple Git-117)
On 8/28/20 10:10 AM, Wei Yang wrote:
> Instead of convert adjust_next between virtual address and page frame
> number, let's just store the virtual address into adjust_next.
IMHO more precisely/less confusing it's "bytes" and "pages" instead of "virtual
address" (which is absolute address, but this variable holds a difference) and
"page frame number" (which is related to absolute physical address, but what we
have is difference in pages in virtual address space).
> Also, this patch fixes one typo in the comment of
> vma_adjust_trans_huge().
>
> Signed-off-by: Wei Yang <[email protected]>
Other than that, seems like it leads to less shifting, so
Acked-by: Vlastimil Babka <[email protected]>
> ---
> mm/huge_memory.c | 4 ++--
> mm/mmap.c | 8 ++++----
> 2 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 78c84bee7e29..2c633ba14440 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2300,13 +2300,13 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
>
> /*
> * If we're also updating the vma->vm_next->vm_start, if the new
> - * vm_next->vm_start isn't page aligned and it could previously
> + * vm_next->vm_start isn't hpage aligned and it could previously
> * contain an hugepage: check if we need to split an huge pmd.
> */
> if (adjust_next > 0) {
> struct vm_area_struct *next = vma->vm_next;
> unsigned long nstart = next->vm_start;
> - nstart += adjust_next << PAGE_SHIFT;
> + nstart += adjust_next;
> if (nstart & ~HPAGE_PMD_MASK &&
> (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
> (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 90b1298d4222..e4c9bbfd4103 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -758,7 +758,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
> * vma expands, overlapping part of the next:
> * mprotect case 5 shifting the boundary up.
> */
> - adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
> + adjust_next = (end - next->vm_start);
> exporter = next;
> importer = vma;
> VM_WARN_ON(expand != importer);
> @@ -768,7 +768,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
> * split_vma inserting another: so it must be
> * mprotect case 4 shifting the boundary down.
> */
> - adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
> + adjust_next = -(vma->vm_end - end);
> exporter = vma;
> importer = next;
> VM_WARN_ON(expand != importer);
> @@ -840,8 +840,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
> }
> vma->vm_pgoff = pgoff;
> if (adjust_next) {
> - next->vm_start += adjust_next << PAGE_SHIFT;
> - next->vm_pgoff += adjust_next;
> + next->vm_start += adjust_next;
> + next->vm_pgoff += adjust_next >> PAGE_SHIFT;
> }
>
> if (root) {
>
On Tue, Sep 08, 2020 at 05:31:22PM +0200, Vlastimil Babka wrote:
>On 8/28/20 10:10 AM, Wei Yang wrote:
>> Instead of convert adjust_next between virtual address and page frame
>> number, let's just store the virtual address into adjust_next.
>
>IMHO more precisely/less confusing it's "bytes" and "pages" instead of "virtual
>address" (which is absolute address, but this variable holds a difference) and
>"page frame number" (which is related to absolute physical address, but what we
>have is difference in pages in virtual address space).
>
Thanks for your comment.
To be honest, I am not sure which one is more precise. English is not my
mother tongue. If others think this is better, I am fine to adjust this.
>> Also, this patch fixes one typo in the comment of
>> vma_adjust_trans_huge().
>>
>> Signed-off-by: Wei Yang <[email protected]>
>
>Other than that, seems like it leads to less shifting, so
>Acked-by: Vlastimil Babka <[email protected]>
>
>> ---
>> mm/huge_memory.c | 4 ++--
>> mm/mmap.c | 8 ++++----
>> 2 files changed, 6 insertions(+), 6 deletions(-)
>>
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index 78c84bee7e29..2c633ba14440 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -2300,13 +2300,13 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
>>
>> /*
>> * If we're also updating the vma->vm_next->vm_start, if the new
>> - * vm_next->vm_start isn't page aligned and it could previously
>> + * vm_next->vm_start isn't hpage aligned and it could previously
>> * contain an hugepage: check if we need to split an huge pmd.
>> */
>> if (adjust_next > 0) {
>> struct vm_area_struct *next = vma->vm_next;
>> unsigned long nstart = next->vm_start;
>> - nstart += adjust_next << PAGE_SHIFT;
>> + nstart += adjust_next;
>> if (nstart & ~HPAGE_PMD_MASK &&
>> (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
>> (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
>> diff --git a/mm/mmap.c b/mm/mmap.c
>> index 90b1298d4222..e4c9bbfd4103 100644
>> --- a/mm/mmap.c
>> +++ b/mm/mmap.c
>> @@ -758,7 +758,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
>> * vma expands, overlapping part of the next:
>> * mprotect case 5 shifting the boundary up.
>> */
>> - adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
>> + adjust_next = (end - next->vm_start);
>> exporter = next;
>> importer = vma;
>> VM_WARN_ON(expand != importer);
>> @@ -768,7 +768,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
>> * split_vma inserting another: so it must be
>> * mprotect case 4 shifting the boundary down.
>> */
>> - adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
>> + adjust_next = -(vma->vm_end - end);
>> exporter = vma;
>> importer = next;
>> VM_WARN_ON(expand != importer);
>> @@ -840,8 +840,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
>> }
>> vma->vm_pgoff = pgoff;
>> if (adjust_next) {
>> - next->vm_start += adjust_next << PAGE_SHIFT;
>> - next->vm_pgoff += adjust_next;
>> + next->vm_start += adjust_next;
>> + next->vm_pgoff += adjust_next >> PAGE_SHIFT;
>> }
>>
>> if (root) {
>>
--
Wei Yang
Help you, Help me