2023-04-18 21:47:47

by Liam R. Howlett

[permalink] [raw]
Subject: [PATCH] mm: move 'mmap_min_addr' logic from callers into vm_unmapped_area()

From: Linus Torvalds <[email protected]>

Instead of having callers care about the mmap_min_addr logic for the
lowest valid mapping address (and some of them getting it wrong), just
move the logic into vm_unmapped_area() itself. One less thing for
various architecture cases (and generic helpers) to worry about.

We should really try to make much more of this be common code, but baby
steps..

Without this, vm_unmapped_area() could return an address below
mmap_min_addr (because some caller forgot about that). That then causes
the mmap machinery to think it has found a workable address, but then
later security_mmap_addr(addr) is unhappy about it and the mmap()
returns with a nonsensical error (EPERM).

The proper action is to either return ENOMEM (if the virtual address
space is exhausted), or try to find another address (ie do a bottom-up
search for free addresses after the top-down one failed).

See commit 2afc745f3e30 ("mm: ensure get_unmapped_area() returns higher
address than mmap_min_addr"), which fixed this for one call site (the
generic arch_get_unmapped_area_topdown() fallback) but left other cases
alone.

Cc: Russell King <[email protected]>
Cc: Liam Howlett <[email protected]>
Cc: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Liam R. Howlett <[email protected]>
---
arch/s390/mm/hugetlbpage.c | 2 +-
arch/s390/mm/mmap.c | 2 +-
fs/hugetlbfs/inode.c | 2 +-
mm/mmap.c | 19 +++++++++++++------
4 files changed, 16 insertions(+), 9 deletions(-)

diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index c299a18273ff..c718f2a0de94 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -273,7 +273,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,

info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
- info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.low_limit = PAGE_SIZE;
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 3327c47bc181..fc9a7dc26c5e 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -136,7 +136,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad

info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
- info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
if (filp || (flags & MAP_SHARED))
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 702d79639c0d..ecfdfb2529a3 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -208,7 +208,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,

info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
- info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.low_limit = PAGE_SIZE;
info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
diff --git a/mm/mmap.c b/mm/mmap.c
index 790cc62c0038..c0977a98a91c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1547,7 +1547,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
*/
static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
- unsigned long length, gap, low_limit;
+ unsigned long length, gap;
+ unsigned long low_limit, high_limit;
struct vm_area_struct *tmp;

MA_STATE(mas, &current->mm->mm_mt, 0, 0);
@@ -1558,8 +1559,11 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
return -ENOMEM;

low_limit = info->low_limit;
+ if (low_limit < mmap_min_addr)
+ low_limit = mmap_min_addr;
+ high_limit = info->high_limit;
retry:
- if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length))
+ if (mas_empty_area(&mas, low_limit, high_limit - 1, length))
return -ENOMEM;

gap = mas.index;
@@ -1595,7 +1599,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
*/
static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{
- unsigned long length, gap, high_limit, gap_end;
+ unsigned long length, gap, gap_end;
+ unsigned long low_limit, high_limit;
struct vm_area_struct *tmp;

MA_STATE(mas, &current->mm->mm_mt, 0, 0);
@@ -1604,10 +1609,12 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
if (length < info->length)
return -ENOMEM;

+ low_limit = info->low_limit;
+ if (low_limit < mmap_min_addr)
+ low_limit = mmap_min_addr;
high_limit = info->high_limit;
retry:
- if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1,
- length))
+ if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length))
return -ENOMEM;

gap = mas.last + 1 - info->length;
@@ -1742,7 +1749,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,

info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
- info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.low_limit = PAGE_SIZE;
info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
info.align_mask = 0;
info.align_offset = 0;
--
2.39.2