2023-09-20 02:19:14

by Rik van Riel

[permalink] [raw]
Subject: [PATCH 0/2] hugetlbfs: close race between MADV_DONTNEED and page fault

Malloc libraries, like jemalloc and tcalloc, take decisions on when
to call madvise independently from the code in the main application.

This sometimes results in the application page faulting on an address,
right after the malloc library has shot down the backing memory with
MADV_DONTNEED.

Usually this is harmless, because we always have some 4kB pages
sitting around to satisfy a page fault. However, with hugetlbfs
systems often allocate only the exact number of huge pages that
the application wants.

Due to TLB batching, hugetlbfs MADV_DONTNEED will free pages outside of
any lock taken on the page fault path, which can open up the following
race condition:

CPU 1 CPU 2

MADV_DONTNEED
unmap page
shoot down TLB entry
page fault
fail to allocate a huge page
killed with SIGBUS
free page

Fix that race by extending the hugetlb_vma_lock locking scheme to also
cover private hugetlb mappings (with resv_map), and pulling the locking
from __unmap_hugepage_final_range into helper functions called from
zap_page_range_single. This ensures page faults stay locked out of
the MADV_DONTNEED VMA until the huge pages have actually been freed.



2023-09-20 02:19:20

by Rik van Riel

[permalink] [raw]
Subject: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

From: Rik van Riel <[email protected]>

Extend the locking scheme used to protect shared hugetlb mappings
from truncate vs page fault races, in order to protect private
hugetlb mappings (with resv_map) against MADV_DONTNEED.

Add a read-write semaphore to the resv_map data structure, and
use that from the hugetlb_vma_(un)lock_* functions, in preparation
for closing the race between MADV_DONTNEED and page faults.

Signed-off-by: Rik van Riel <[email protected]>
---
include/linux/hugetlb.h | 6 ++++++
mm/hugetlb.c | 36 ++++++++++++++++++++++++++++++++----
2 files changed, 38 insertions(+), 4 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5b2626063f4f..694928fa06a3 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -60,6 +60,7 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+ struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
@@ -1231,6 +1232,11 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
}

+static inline bool __vma_private_lock(struct vm_area_struct *vma)
+{
+ return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
+}
+
/*
* Safe version of huge_pte_offset() to check the locks. See comments
* above huge_pte_offset().
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ba6d39b71cb1..b99d215d2939 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -97,6 +97,7 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+static struct resv_map *vma_resv_map(struct vm_area_struct *vma);

static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
@@ -267,6 +268,10 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;

down_read(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ down_read(&resv_map->rw_sema);
}
}

@@ -276,6 +281,10 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;

up_read(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ up_read(&resv_map->rw_sema);
}
}

@@ -285,6 +294,10 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;

down_write(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ down_write(&resv_map->rw_sema);
}
}

@@ -294,17 +307,27 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;

up_write(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ up_write(&resv_map->rw_sema);
}
}

int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;

- if (!__vma_shareable_lock(vma))
- return 1;
+ if (__vma_shareable_lock(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ return down_write_trylock(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ return down_write_trylock(&resv_map->rw_sema);
+ }

- return down_write_trylock(&vma_lock->rw_sema);
+ return 1;
}

void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
@@ -313,6 +336,10 @@ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;

lockdep_assert_held(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ lockdep_assert_held(&resv_map->rw_sema);
}
}

@@ -1068,6 +1095,7 @@ struct resv_map *resv_map_alloc(void)
kref_init(&resv_map->refs);
spin_lock_init(&resv_map->lock);
INIT_LIST_HEAD(&resv_map->regions);
+ init_rwsem(&resv_map->rw_sema);

resv_map->adds_in_progress = 0;
/*
--
2.41.0

2023-09-20 02:19:34

by Rik van Riel

[permalink] [raw]
Subject: [PATCH 2/2] hugetlbfs: close race between MADV_DONTNEED and page fault

From: Rik van Riel <[email protected]>

Malloc libraries, like jemalloc and tcalloc, take decisions on when
to call madvise independently from the code in the main application.

This sometimes results in the application page faulting on an address,
right after the malloc library has shot down the backing memory with
MADV_DONTNEED.

Usually this is harmless, because we always have some 4kB pages
sitting around to satisfy a page fault. However, with hugetlbfs
systems often allocate only the exact number of huge pages that
the application wants.

Due to TLB batching, hugetlbfs MADV_DONTNEED will free pages outside of
any lock taken on the page fault path, which can open up the following
race condition:

CPU 1 CPU 2

MADV_DONTNEED
unmap page
shoot down TLB entry
page fault
fail to allocate a huge page
killed with SIGBUS
free page

Fix that race by pulling the locking from __unmap_hugepage_final_range
into helper functions called from zap_page_range_single. This ensures
page faults stay locked out of the MADV_DONTNEED VMA until the
huge pages have actually been freed.

Signed-off-by: Rik van Riel <[email protected]>
---
include/linux/hugetlb.h | 35 +++++++++++++++++++++++++++++++++--
mm/hugetlb.c | 20 +++++++++++---------
mm/memory.c | 7 +++----
3 files changed, 47 insertions(+), 15 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 694928fa06a3..d9ec500cfef9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -139,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags);
@@ -246,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);

+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_end(vma, details);
+}
+
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
@@ -297,6 +316,18 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}

+static inline void hugetlb_zap_begin(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
+static inline void hugetlb_zap_end(
+ struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+}
+
static inline struct page *hugetlb_follow_page_mask(
struct vm_area_struct *vma, unsigned long address, unsigned int flags,
unsigned int *page_mask)
@@ -442,7 +473,7 @@ static inline long hugetlb_change_protection(
return 0;
}

-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b99d215d2939..3510e2bf23da 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5301,9 +5301,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
return len + old_addr - old_end;
}

-static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page, zap_flags_t zap_flags)
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page, zap_flags_t zap_flags)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -5430,16 +5430,18 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
tlb_flush_mmu_tlbonly(tlb);
}

-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page,
- zap_flags_t zap_flags)
+void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
{
+ adjust_range_if_pmd_sharing_possible(vma, start, end);
hugetlb_vma_lock_write(vma);
i_mmap_lock_write(vma->vm_file->f_mapping);
+}

- /* mmu notification performed in caller */
- __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
+void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ zap_flags_t zap_flags = details ? details->zap_flags : 0;

if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
/*
diff --git a/mm/memory.c b/mm/memory.c
index 6c264d2f969c..a07ae3b60530 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1683,7 +1683,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (vma->vm_file) {
zap_flags_t zap_flags = details ?
details->zap_flags : 0;
- __unmap_hugepage_range_final(tlb, vma, start, end,
+ __unmap_hugepage_range(tlb, vma, start, end,
NULL, zap_flags);
}
} else
@@ -1753,9 +1753,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, end);
- if (is_vm_hugetlb_page(vma))
- adjust_range_if_pmd_sharing_possible(vma, &range.start,
- &range.end);
+ hugetlb_zap_begin(vma, &range.start, &range.end);
tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
@@ -1766,6 +1764,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unmap_single_vma(&tlb, vma, address, end, details, false);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb);
+ hugetlb_zap_end(vma, details);
}

/**
--
2.41.0

2023-09-20 04:18:18

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

On Wed, 2023-09-20 at 04:57 +0100, Matthew Wilcox wrote:
> On Tue, Sep 19, 2023 at 10:16:09PM -0400, [email protected] wrote:
> > From: Rik van Riel <[email protected]>
> >
> > Extend the locking scheme used to protect shared hugetlb mappings
> > from truncate vs page fault races, in order to protect private
> > hugetlb mappings (with resv_map) against MADV_DONTNEED.
> >
> > Add a read-write semaphore to the resv_map data structure, and
> > use that from the hugetlb_vma_(un)lock_* functions, in preparation
> > for closing the race between MADV_DONTNEED and page faults.
>
> This feels an awful lot like the invalidate_lock in struct
> address_space
> which was recently added by Jan Kara.
>
Indeed it does.

It might be even nicer if we could replace the hugetlb_vma_lock
special logic with the invalidate_lock for hugetlbfs.

Mike, can you think of any reason why the hugetlb_vma_lock logic
should not be replaced with the invalidate_lock?

If not, I'd be happy to implement that.

--
All Rights Reversed.

2023-09-20 09:30:31

by Matthew Wilcox

[permalink] [raw]
Subject: Re: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

On Tue, Sep 19, 2023 at 10:16:09PM -0400, [email protected] wrote:
> From: Rik van Riel <[email protected]>
>
> Extend the locking scheme used to protect shared hugetlb mappings
> from truncate vs page fault races, in order to protect private
> hugetlb mappings (with resv_map) against MADV_DONTNEED.
>
> Add a read-write semaphore to the resv_map data structure, and
> use that from the hugetlb_vma_(un)lock_* functions, in preparation
> for closing the race between MADV_DONTNEED and page faults.

This feels an awful lot like the invalidate_lock in struct address_space
which was recently added by Jan Kara.

2023-09-20 15:36:19

by Rik van Riel

[permalink] [raw]
Subject: [RFC PATCH 3/3] hugetlbfs: replace hugetlb_vma_lock with invalidate_lock

On Wed, 20 Sep 2023 04:57:33 +0100
Matthew Wilcox <[email protected]> wrote:
> On Tue, Sep 19, 2023 at 10:16:09PM -0400, [email protected] wrote:
> > From: Rik van Riel <[email protected]>
> >
> > Extend the locking scheme used to protect shared hugetlb mappings
> > from truncate vs page fault races, in order to protect private
> > hugetlb mappings (with resv_map) against MADV_DONTNEED.

> This feels an awful lot like the invalidate_lock in struct address_space
> which was recently added by Jan Kara.
>

Sure enough, the invalidate_lock is looking really similar.
This has the potential to simplify the hugetlbfs code a bunch!

On the flip side, this is a large change, and it may make sense
to merge it separately to help with bisectability?

Thank you for the suggestion!

Mike, did I forget a spot somewhere? :)

---8<---

From 11ea3afbd836fff6d20ef946b52544162466cb9c Mon Sep 17 00:00:00 2001
From: Rik van Riel <[email protected]>
Date: Wed, 20 Sep 2023 10:54:10 -0400
Subject: [PATCH 3/3] hugetlbfs: replace hugetlb_vma_lock with invalidate_lock

Replace the custom hugetlbfs VMA locking code with the recently
introduced invalidate_lock. This greatly simplifies things.

However, this is a large enough change that it should probably go in
separately from the other changes.

Suggested-by: Matthew Wilcox <[email protected]>
Signed-off-by: Rik van Riel <[email protected]>
---
fs/hugetlbfs/inode.c | 68 +-----------
include/linux/fs.h | 6 ++
include/linux/hugetlb.h | 7 --
kernel/fork.c | 6 --
mm/hugetlb.c | 233 +++-------------------------------------
5 files changed, 26 insertions(+), 294 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 316c4cebd3f3..5ff18b0933bc 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -485,7 +485,6 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
struct folio *folio, pgoff_t index)
{
struct rb_root_cached *root = &mapping->i_mmap;
- struct hugetlb_vma_lock *vma_lock;
struct page *page = &folio->page;
struct vm_area_struct *vma;
unsigned long v_start;
@@ -496,8 +495,8 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
end = (index + 1) * pages_per_huge_page(h);

i_mmap_lock_write(mapping);
-retry:
- vma_lock = NULL;
+ filemap_invalidate_lock(mapping);
+
vma_interval_tree_foreach(vma, root, start, end - 1) {
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
@@ -505,62 +504,13 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
if (!hugetlb_vma_maps_page(vma, v_start, page))
continue;

- if (!hugetlb_vma_trylock_write(vma)) {
- vma_lock = vma->vm_private_data;
- /*
- * If we can not get vma lock, we need to drop
- * immap_sema and take locks in order. First,
- * take a ref on the vma_lock structure so that
- * we can be guaranteed it will not go away when
- * dropping immap_sema.
- */
- kref_get(&vma_lock->refs);
- break;
- }
-
unmap_hugepage_range(vma, v_start, v_end, NULL,
ZAP_FLAG_DROP_MARKER);
hugetlb_vma_unlock_write(vma);
}

+ filemap_invalidate_unlock(mapping);
i_mmap_unlock_write(mapping);
-
- if (vma_lock) {
- /*
- * Wait on vma_lock. We know it is still valid as we have
- * a reference. We must 'open code' vma locking as we do
- * not know if vma_lock is still attached to vma.
- */
- down_write(&vma_lock->rw_sema);
- i_mmap_lock_write(mapping);
-
- vma = vma_lock->vma;
- if (!vma) {
- /*
- * If lock is no longer attached to vma, then just
- * unlock, drop our reference and retry looking for
- * other vmas.
- */
- up_write(&vma_lock->rw_sema);
- kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
- goto retry;
- }
-
- /*
- * vma_lock is still attached to vma. Check to see if vma
- * still maps page and if so, unmap.
- */
- v_start = vma_offset_start(vma, start);
- v_end = vma_offset_end(vma, end);
- if (hugetlb_vma_maps_page(vma, v_start, page))
- unmap_hugepage_range(vma, v_start, v_end, NULL,
- ZAP_FLAG_DROP_MARKER);
-
- kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
- hugetlb_vma_unlock_write(vma);
-
- goto retry;
- }
}

static void
@@ -578,20 +528,10 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
unsigned long v_start;
unsigned long v_end;

- if (!hugetlb_vma_trylock_write(vma))
- continue;
-
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);

unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags);
-
- /*
- * Note that vma lock only exists for shared/non-private
- * vmas. Therefore, lock is not held when calling
- * unmap_hugepage_range for private vmas.
- */
- hugetlb_vma_unlock_write(vma);
}
}

@@ -726,9 +666,11 @@ static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)

i_size_write(inode, offset);
i_mmap_lock_write(mapping);
+ filemap_invalidate_lock(mapping);
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
ZAP_FLAG_DROP_MARKER);
+ filemap_invalidate_unlock(mapping);
i_mmap_unlock_write(mapping);
remove_inode_hugepages(inode, offset, LLONG_MAX);
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4aeb3fa11927..b455a8913db4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -847,6 +847,12 @@ static inline void filemap_invalidate_lock(struct address_space *mapping)
down_write(&mapping->invalidate_lock);
}

+static inline int filemap_invalidate_trylock(
+ struct address_space *mapping)
+{
+ return down_write_trylock(&mapping->invalidate_lock);
+}
+
static inline void filemap_invalidate_unlock(struct address_space *mapping)
{
up_write(&mapping->invalidate_lock);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d9ec500cfef9..af60b67ed828 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -60,7 +60,6 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
- struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
@@ -107,12 +106,6 @@ struct file_region {
#endif
};

-struct hugetlb_vma_lock {
- struct kref refs;
- struct rw_semaphore rw_sema;
- struct vm_area_struct *vma;
-};
-
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);

diff --git a/kernel/fork.c b/kernel/fork.c
index 3b6d20dfb9a8..42453437b615 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -743,12 +743,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
i_mmap_unlock_write(mapping);
}

- /*
- * Copy/update hugetlb private vma information.
- */
- if (is_vm_hugetlb_page(tmp))
- hugetlb_dup_vma_private(tmp);
-
/* Link the vma into the MT */
if (vma_iter_bulk_store(&vmi, tmp))
goto fail_nomem_vmi_store;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3510e2bf23da..3b97bd762049 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -92,9 +92,6 @@ struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;

/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
-static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
-static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
-static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
@@ -264,165 +261,41 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
*/
void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
- down_read(&vma_lock->rw_sema);
- } else if (__vma_private_lock(vma)) {
- struct resv_map *resv_map = vma_resv_map(vma);
-
- down_read(&resv_map->rw_sema);
- }
+ if (vma->vm_file)
+ filemap_invalidate_lock_shared(vma->vm_file->f_mapping);
}

void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
- up_read(&vma_lock->rw_sema);
- } else if (__vma_private_lock(vma)) {
- struct resv_map *resv_map = vma_resv_map(vma);
-
- up_read(&resv_map->rw_sema);
- }
+ if (vma->vm_file)
+ filemap_invalidate_unlock_shared(vma->vm_file->f_mapping);
}

void hugetlb_vma_lock_write(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
- down_write(&vma_lock->rw_sema);
- } else if (__vma_private_lock(vma)) {
- struct resv_map *resv_map = vma_resv_map(vma);
-
- down_write(&resv_map->rw_sema);
- }
+ if (vma->vm_file)
+ filemap_invalidate_lock(vma->vm_file->f_mapping);
}

void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
- up_write(&vma_lock->rw_sema);
- } else if (__vma_private_lock(vma)) {
- struct resv_map *resv_map = vma_resv_map(vma);
-
- up_write(&resv_map->rw_sema);
- }
+ if (vma->vm_file)
+ filemap_invalidate_unlock(vma->vm_file->f_mapping);
}

int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{

- if (__vma_shareable_lock(vma)) {
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
- return down_write_trylock(&vma_lock->rw_sema);
- } else if (__vma_private_lock(vma)) {
- struct resv_map *resv_map = vma_resv_map(vma);
-
- return down_write_trylock(&resv_map->rw_sema);
- }
+ if (vma->vm_file)
+ return filemap_invalidate_trylock(vma->vm_file->f_mapping);

return 1;
}

void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
{
- if (__vma_shareable_lock(vma)) {
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
- lockdep_assert_held(&vma_lock->rw_sema);
- } else if (__vma_private_lock(vma)) {
- struct resv_map *resv_map = vma_resv_map(vma);
-
- lockdep_assert_held(&resv_map->rw_sema);
- }
-}
-
-void hugetlb_vma_lock_release(struct kref *kref)
-{
- struct hugetlb_vma_lock *vma_lock = container_of(kref,
- struct hugetlb_vma_lock, refs);
-
- kfree(vma_lock);
-}
-
-static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
-{
- struct vm_area_struct *vma = vma_lock->vma;
-
- /*
- * vma_lock structure may or not be released as a result of put,
- * it certainly will no longer be attached to vma so clear pointer.
- * Semaphore synchronizes access to vma_lock->vma field.
- */
- vma_lock->vma = NULL;
- vma->vm_private_data = NULL;
- up_write(&vma_lock->rw_sema);
- kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
-}
-
-static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
-{
- if (__vma_shareable_lock(vma)) {
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
- __hugetlb_vma_unlock_write_put(vma_lock);
- }
-}
-
-static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
-{
- /*
- * Only present in sharable vmas.
- */
- if (!vma || !__vma_shareable_lock(vma))
- return;
-
- if (vma->vm_private_data) {
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
- down_write(&vma_lock->rw_sema);
- __hugetlb_vma_unlock_write_put(vma_lock);
- }
-}
-
-static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
-{
- struct hugetlb_vma_lock *vma_lock;
-
- /* Only establish in (flags) sharable vmas */
- if (!vma || !(vma->vm_flags & VM_MAYSHARE))
- return;
-
- /* Should never get here with non-NULL vm_private_data */
- if (vma->vm_private_data)
- return;
-
- vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
- if (!vma_lock) {
- /*
- * If we can not allocate structure, then vma can not
- * participate in pmd sharing. This is only a possible
- * performance enhancement and memory saving issue.
- * However, the lock is also used to synchronize page
- * faults with truncation. If the lock is not present,
- * unlikely races could leave pages in a file past i_size
- * until the file is removed. Warn in the unlikely case of
- * allocation failure.
- */
- pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
- return;
- }
-
- kref_init(&vma_lock->refs);
- init_rwsem(&vma_lock->rw_sema);
- vma_lock->vma = vma;
- vma->vm_private_data = vma_lock;
+ if (vma->vm_file)
+ lockdep_assert_held(&vma->vm_file->f_mapping->invalidate_lock);
}

/* Helper that removes a struct file_region from the resv_map cache and returns
@@ -1095,7 +968,6 @@ struct resv_map *resv_map_alloc(void)
kref_init(&resv_map->refs);
spin_lock_init(&resv_map->lock);
INIT_LIST_HEAD(&resv_map->regions);
- init_rwsem(&resv_map->rw_sema);

resv_map->adds_in_progress = 0;
/*
@@ -1185,30 +1057,6 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
return (get_vma_private_data(vma) & flag) != 0;
}

-void hugetlb_dup_vma_private(struct vm_area_struct *vma)
-{
- VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
- /*
- * Clear vm_private_data
- * - For shared mappings this is a per-vma semaphore that may be
- * allocated in a subsequent call to hugetlb_vm_op_open.
- * Before clearing, make sure pointer is not associated with vma
- * as this will leak the structure. This is the case when called
- * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
- * been called to allocate a new structure.
- * - For MAP_PRIVATE mappings, this is the reserve map which does
- * not apply to children. Faults generated by the children are
- * not guaranteed to succeed, even if read-only.
- */
- if (vma->vm_flags & VM_MAYSHARE) {
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
- if (vma_lock && vma_lock->vma != vma)
- vma->vm_private_data = NULL;
- } else
- vma->vm_private_data = NULL;
-}
-
/*
* Reset and decrement one ref on hugepage private reservation.
* Called with mm->mmap_lock writer semaphore held.
@@ -1236,8 +1084,6 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
kref_put(&reservations->refs, resv_map_release);
}
-
- hugetlb_dup_vma_private(vma);
}

/* Returns true if the VMA has associated reserve pages */
@@ -4841,25 +4687,6 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
kref_get(&resv->refs);
}
-
- /*
- * vma_lock structure for sharable mappings is vma specific.
- * Clear old pointer (if copied via vm_area_dup) and allocate
- * new structure. Before clearing, make sure vma_lock is not
- * for this vma.
- */
- if (vma->vm_flags & VM_MAYSHARE) {
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
-
- if (vma_lock) {
- if (vma_lock->vma != vma) {
- vma->vm_private_data = NULL;
- hugetlb_vma_lock_alloc(vma);
- } else
- pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
- } else
- hugetlb_vma_lock_alloc(vma);
- }
}

static void hugetlb_vm_op_close(struct vm_area_struct *vma)
@@ -4870,8 +4697,6 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
unsigned long reserve, start, end;
long gbl_reserve;

- hugetlb_vma_lock_free(vma);
-
resv = vma_resv_map(vma);
if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return;
@@ -5441,24 +5266,8 @@ void __hugetlb_zap_begin(struct vm_area_struct *vma,
void __hugetlb_zap_end(struct vm_area_struct *vma,
struct zap_details *details)
{
- zap_flags_t zap_flags = details ? details->zap_flags : 0;
-
- if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
- /*
- * Unlock and free the vma lock before releasing i_mmap_rwsem.
- * When the vma_lock is freed, this makes the vma ineligible
- * for pmd sharing. And, i_mmap_rwsem is required to set up
- * pmd sharing. This is important as page tables for this
- * unmapped range will be asynchrously deleted. If the page
- * tables are shared, there will be issues when accessed by
- * someone else.
- */
- __hugetlb_vma_unlock_write_free(vma);
- i_mmap_unlock_write(vma->vm_file->f_mapping);
- } else {
- i_mmap_unlock_write(vma->vm_file->f_mapping);
- hugetlb_vma_unlock_write(vma);
- }
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
+ hugetlb_vma_unlock_write(vma);
}

void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
@@ -6701,12 +6510,6 @@ bool hugetlb_reserve_pages(struct inode *inode,
return false;
}

- /*
- * vma specific semaphore used for pmd sharing and fault/truncation
- * synchronization
- */
- hugetlb_vma_lock_alloc(vma);
-
/*
* Only apply hugepage reservation if asked. At fault time, an
* attempt will be made for VM_NORESERVE to allocate a page
@@ -6829,7 +6632,6 @@ bool hugetlb_reserve_pages(struct inode *inode,
hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
chg * pages_per_huge_page(h), h_cg);
out_err:
- hugetlb_vma_lock_free(vma);
if (!vma || vma->vm_flags & VM_MAYSHARE)
/* Only call region_abort if the region_chg succeeded but the
* region_add failed or didn't run.
@@ -6899,13 +6701,10 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
/*
* match the virtual addresses, permission and the alignment of the
* page table page.
- *
- * Also, vma_lock (vm_private_data) is required for sharing.
*/
if (pmd_index(addr) != pmd_index(saddr) ||
vm_flags != svm_flags ||
- !range_in_vma(svma, sbase, s_end) ||
- !svma->vm_private_data)
+ !range_in_vma(svma, sbase, s_end))
return 0;

return saddr;
@@ -6925,8 +6724,6 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
*/
if (!(vma->vm_flags & VM_MAYSHARE))
return false;
- if (!vma->vm_private_data) /* vma lock required for sharing */
- return false;
if (!range_in_vma(vma, start, end))
return false;
return true;
--
2.41.0

2023-09-21 02:48:57

by Mike Kravetz

[permalink] [raw]
Subject: Re: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

On 09/20/23 00:09, Rik van Riel wrote:
> On Wed, 2023-09-20 at 04:57 +0100, Matthew Wilcox wrote:
> > On Tue, Sep 19, 2023 at 10:16:09PM -0400, [email protected]?wrote:
> > > From: Rik van Riel <[email protected]>
> > >
> > > Extend the locking scheme used to protect shared hugetlb mappings
> > > from truncate vs page fault races, in order to protect private
> > > hugetlb mappings (with resv_map) against MADV_DONTNEED.
> > >
> > > Add a read-write semaphore to the resv_map data structure, and
> > > use that from the hugetlb_vma_(un)lock_* functions, in preparation
> > > for closing the race between MADV_DONTNEED and page faults.
> >
> > This feels an awful lot like the invalidate_lock in struct
> > address_space
> > which was recently added by Jan Kara.
> >
> Indeed it does.
>
> It might be even nicer if we could replace the hugetlb_vma_lock
> special logic with the invalidate_lock for hugetlbfs.
>
> Mike, can you think of any reason why the hugetlb_vma_lock logic
> should not be replaced with the invalidate_lock?
>
> If not, I'd be happy to implement that.
>

Sorry Rik,

I have some other things that need immediate attention and have not had a
chance to take a close look here. I'll take a closer look later (my) today
or tomorrow.
--
Mike Kravetz

2023-09-22 00:42:24

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

On Thu, 2023-09-21 at 15:42 -0700, Mike Kravetz wrote:
> On 09/19/23 22:16, [email protected] wrote:
> > From: Rik van Riel <[email protected]>
> >
> > Extend the locking scheme used to protect shared hugetlb mappings
> > from truncate vs page fault races, in order to protect private
> > hugetlb mappings (with resv_map) against MADV_DONTNEED.
> >
> > Add a read-write semaphore to the resv_map data structure, and
> > use that from the hugetlb_vma_(un)lock_* functions, in preparation
> > for closing the race between MADV_DONTNEED and page faults.
> >
> > Signed-off-by: Rik van Riel <[email protected]>
> > ---
> >  include/linux/hugetlb.h |  6 ++++++
> >  mm/hugetlb.c            | 36 ++++++++++++++++++++++++++++++++----
> >  2 files changed, 38 insertions(+), 4 deletions(-)
>
> This looks straight forward.
>
> However, I ran just this patch through libhugetlbfs test suite and it
> hung on
> misaligned_offset (2M: 32).
> https://github.com/libhugetlbfs/libhugetlbfs/blob/master/tests/misaligned_offset.c

Ah, so that's why I couldn't find hugetlbfs tests in the kernel
selftests directory. They're in libhugetlbfs.

I'll play around with those tests tomorrow. Let me see what's
going on.

--
All Rights Reversed.

2023-09-22 01:34:19

by Mike Kravetz

[permalink] [raw]
Subject: Re: [PATCH 0/2] hugetlbfs: close race between MADV_DONTNEED and page fault

On 09/19/23 22:16, [email protected] wrote:
> Malloc libraries, like jemalloc and tcalloc, take decisions on when
> to call madvise independently from the code in the main application.
>
> This sometimes results in the application page faulting on an address,
> right after the malloc library has shot down the backing memory with
> MADV_DONTNEED.
>
> Usually this is harmless, because we always have some 4kB pages
> sitting around to satisfy a page fault. However, with hugetlbfs
> systems often allocate only the exact number of huge pages that
> the application wants.
>
> Due to TLB batching, hugetlbfs MADV_DONTNEED will free pages outside of
> any lock taken on the page fault path, which can open up the following
> race condition:
>
> CPU 1 CPU 2
>
> MADV_DONTNEED
> unmap page
> shoot down TLB entry
> page fault
> fail to allocate a huge page
> killed with SIGBUS
> free page

Hi Rik,

I think we discussed this before. Even with your changes there is no
guarantee that the free'ed hugetlb page can not be stolen by another
application. This is true even with hugetlb reservations as the
reservation is consumed by the first fault. After the MADV_DONTNEED
no reservation will exist, which allows another application to steal
the page.

This is VERY unlikely to actually happen. However, I do want to point
out that it is possible. Of course, the way the code is today you will
always fail if there is only one hugetlb page in the above scenario. So,
your changes will help tremendously and I support them moving forward.

I suspect you are already aware of this, but just want to make sure you
are aware there are no guarantees here.
--
Mike Kravetz

>
> Fix that race by extending the hugetlb_vma_lock locking scheme to also
> cover private hugetlb mappings (with resv_map), and pulling the locking
> from __unmap_hugepage_final_range into helper functions called from
> zap_page_range_single. This ensures page faults stay locked out of
> the MADV_DONTNEED VMA until the huge pages have actually been freed.

2023-09-22 06:46:21

by Mike Kravetz

[permalink] [raw]
Subject: Re: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

On 09/21/23 15:42, Mike Kravetz wrote:
> On 09/19/23 22:16, [email protected] wrote:
> > From: Rik van Riel <[email protected]>
> >
> > Extend the locking scheme used to protect shared hugetlb mappings
> > from truncate vs page fault races, in order to protect private
> > hugetlb mappings (with resv_map) against MADV_DONTNEED.
> >
> > Add a read-write semaphore to the resv_map data structure, and
> > use that from the hugetlb_vma_(un)lock_* functions, in preparation
> > for closing the race between MADV_DONTNEED and page faults.
> >
> > Signed-off-by: Rik van Riel <[email protected]>
> > ---
> > include/linux/hugetlb.h | 6 ++++++
> > mm/hugetlb.c | 36 ++++++++++++++++++++++++++++++++----
> > 2 files changed, 38 insertions(+), 4 deletions(-)
>
> This looks straight forward.
>
> However, I ran just this patch through libhugetlbfs test suite and it hung on
> misaligned_offset (2M: 32).
> https://github.com/libhugetlbfs/libhugetlbfs/blob/master/tests/misaligned_offset.c
>
> Added lock/semaphore debugging to the kernel and got:
> [ 38.094690] =========================
> [ 38.095517] WARNING: held lock freed!
> [ 38.096350] 6.6.0-rc2-next-20230921-dirty #4 Not tainted
> [ 38.097556] -------------------------
> [ 38.098439] mlock/1002 is freeing memory ffff8881eff8dc00-ffff8881eff8ddff, with a lock still held there!
> [ 38.100550] ffff8881eff8dce8 (&resv_map->rw_sema){++++}-{3:3}, at: __unmap_hugepage_range_final+0x29/0x120
> [ 38.103564] 2 locks held by mlock/1002:
> [ 38.104552] #0: ffff8881effa42a0 (&mm->mmap_lock){++++}-{3:3}, at: do_vmi_align_munmap+0x5c6/0x650
> [ 38.106611] #1: ffff8881eff8dce8 (&resv_map->rw_sema){++++}-{3:3}, at: __unmap_hugepage_range_final+0x29/0x120
> [ 38.108827]
> [ 38.108827] stack backtrace:
> [ 38.109929] CPU: 0 PID: 1002 Comm: mlock Not tainted 6.6.0-rc2-next-20230921-dirty #4
> [ 38.111812] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc37 04/01/2014
> [ 38.113784] Call Trace:
> [ 38.114456] <TASK>
> [ 38.115066] dump_stack_lvl+0x57/0x90
> [ 38.116001] debug_check_no_locks_freed+0x137/0x170
> [ 38.117193] ? remove_vma+0x28/0x70
> [ 38.118088] __kmem_cache_free+0x8f/0x2b0
> [ 38.119080] remove_vma+0x28/0x70
> [ 38.119960] do_vmi_align_munmap+0x3b1/0x650
> [ 38.121051] do_vmi_munmap+0xc9/0x1a0
> [ 38.122006] __vm_munmap+0xa4/0x190
> [ 38.122931] __ia32_sys_munmap+0x15/0x20
> [ 38.123926] __do_fast_syscall_32+0x68/0x100
> [ 38.125031] do_fast_syscall_32+0x2f/0x70
> [ 38.126060] entry_SYSENTER_compat_after_hwframe+0x7b/0x8d
> [ 38.127366] RIP: 0023:0xf7f05579
> [ 38.128198] Code: b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 00 00 00 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00
> [ 38.132534] RSP: 002b:00000000fffa877c EFLAGS: 00000286 ORIG_RAX: 000000000000005b
> [ 38.135703] RAX: ffffffffffffffda RBX: 00000000f7a00000 RCX: 0000000000200000
> [ 38.137323] RDX: 00000000f7a00000 RSI: 0000000000200000 RDI: 0000000000000003
> [ 38.138965] RBP: 0000000000000002 R08: 0000000000000000 R09: 0000000000000000
> [ 38.140574] R10: 0000000000000000 R11: 0000000000000286 R12: 0000000000000000
> [ 38.142191] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
> [ 38.143865] </TASK>
>
> Something is not quite right. If you do not get to it first, I will take a
> look as time permits.

Just for grins I threw on patch 2 (with lock debugging) and ran the test
suite. It gets past misaligned_offset, but is spewing locking warnings
too fast to read. Something is certainly missing.
--
Mike Kravetz

2023-09-22 08:44:52

by Mike Kravetz

[permalink] [raw]
Subject: Re: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

On 09/19/23 22:16, [email protected] wrote:
> From: Rik van Riel <[email protected]>
>
> Extend the locking scheme used to protect shared hugetlb mappings
> from truncate vs page fault races, in order to protect private
> hugetlb mappings (with resv_map) against MADV_DONTNEED.
>
> Add a read-write semaphore to the resv_map data structure, and
> use that from the hugetlb_vma_(un)lock_* functions, in preparation
> for closing the race between MADV_DONTNEED and page faults.
>
> Signed-off-by: Rik van Riel <[email protected]>
> ---
> include/linux/hugetlb.h | 6 ++++++
> mm/hugetlb.c | 36 ++++++++++++++++++++++++++++++++----
> 2 files changed, 38 insertions(+), 4 deletions(-)

This looks straight forward.

However, I ran just this patch through libhugetlbfs test suite and it hung on
misaligned_offset (2M: 32).
https://github.com/libhugetlbfs/libhugetlbfs/blob/master/tests/misaligned_offset.c

Added lock/semaphore debugging to the kernel and got:
[ 38.094690] =========================
[ 38.095517] WARNING: held lock freed!
[ 38.096350] 6.6.0-rc2-next-20230921-dirty #4 Not tainted
[ 38.097556] -------------------------
[ 38.098439] mlock/1002 is freeing memory ffff8881eff8dc00-ffff8881eff8ddff, with a lock still held there!
[ 38.100550] ffff8881eff8dce8 (&resv_map->rw_sema){++++}-{3:3}, at: __unmap_hugepage_range_final+0x29/0x120
[ 38.103564] 2 locks held by mlock/1002:
[ 38.104552] #0: ffff8881effa42a0 (&mm->mmap_lock){++++}-{3:3}, at: do_vmi_align_munmap+0x5c6/0x650
[ 38.106611] #1: ffff8881eff8dce8 (&resv_map->rw_sema){++++}-{3:3}, at: __unmap_hugepage_range_final+0x29/0x120
[ 38.108827]
[ 38.108827] stack backtrace:
[ 38.109929] CPU: 0 PID: 1002 Comm: mlock Not tainted 6.6.0-rc2-next-20230921-dirty #4
[ 38.111812] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc37 04/01/2014
[ 38.113784] Call Trace:
[ 38.114456] <TASK>
[ 38.115066] dump_stack_lvl+0x57/0x90
[ 38.116001] debug_check_no_locks_freed+0x137/0x170
[ 38.117193] ? remove_vma+0x28/0x70
[ 38.118088] __kmem_cache_free+0x8f/0x2b0
[ 38.119080] remove_vma+0x28/0x70
[ 38.119960] do_vmi_align_munmap+0x3b1/0x650
[ 38.121051] do_vmi_munmap+0xc9/0x1a0
[ 38.122006] __vm_munmap+0xa4/0x190
[ 38.122931] __ia32_sys_munmap+0x15/0x20
[ 38.123926] __do_fast_syscall_32+0x68/0x100
[ 38.125031] do_fast_syscall_32+0x2f/0x70
[ 38.126060] entry_SYSENTER_compat_after_hwframe+0x7b/0x8d
[ 38.127366] RIP: 0023:0xf7f05579
[ 38.128198] Code: b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 00 00 00 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00
[ 38.132534] RSP: 002b:00000000fffa877c EFLAGS: 00000286 ORIG_RAX: 000000000000005b
[ 38.135703] RAX: ffffffffffffffda RBX: 00000000f7a00000 RCX: 0000000000200000
[ 38.137323] RDX: 00000000f7a00000 RSI: 0000000000200000 RDI: 0000000000000003
[ 38.138965] RBP: 0000000000000002 R08: 0000000000000000 R09: 0000000000000000
[ 38.140574] R10: 0000000000000000 R11: 0000000000000286 R12: 0000000000000000
[ 38.142191] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
[ 38.143865] </TASK>

Something is not quite right. If you do not get to it first, I will take a
look as time permits.
--
Mike Kravetz

2023-09-22 14:51:24

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

On Thu, 2023-09-21 at 15:42 -0700, Mike Kravetz wrote:
> On 09/19/23 22:16, [email protected] wrote:
> > From: Rik van Riel <[email protected]>
> >
> > Extend the locking scheme used to protect shared hugetlb mappings
> > from truncate vs page fault races, in order to protect private
> > hugetlb mappings (with resv_map) against MADV_DONTNEED.
> >
> > Add a read-write semaphore to the resv_map data structure, and
> > use that from the hugetlb_vma_(un)lock_* functions, in preparation
> > for closing the race between MADV_DONTNEED and page faults.
> >
> > Signed-off-by: Rik van Riel <[email protected]>
> > ---
> >  include/linux/hugetlb.h |  6 ++++++
> >  mm/hugetlb.c            | 36 ++++++++++++++++++++++++++++++++----
> >  2 files changed, 38 insertions(+), 4 deletions(-)
>
> This looks straight forward.
>
> However, I ran just this patch through libhugetlbfs test suite and it
> hung on
> misaligned_offset (2M: 32).
> https://github.com/libhugetlbfs/libhugetlbfs/blob/master/tests/misaligned_offset.c


Speaking of "looks straightforward", how do I compile the
libhugetlbfs code?

The __morecore variable, which is pointed at either the
THP or hugetlbfs morecore function, does not seem to be
defined anywhere in the sources.

Do I need to run some magic script (didn't find it) to
get a special header file set up before I can build
libhugetlbfs?



$ make
CC32 obj32/morecore.o
morecore.c: In function ‘__lh_hugetlbfs_setup_morecore’:
morecore.c:368:17: error: ‘__morecore’ undeclared (first use in this
function); did you mean ‘thp_morecore’?
368 | __morecore = &thp_morecore;
| ^~~~~~~~~~
| thp_morecore
morecore.c:368:17: note: each undeclared identifier is reported only
once for each function it appears in
make: *** [Makefile:292: obj32/morecore.o] Error 1
$ grep __morecore *.[ch]
morecore.c: __morecore = &thp_morecore;
morecore.c: __morecore = &hugetlbfs_morecore;



--
All Rights Reversed.

2023-09-22 17:02:43

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

On Fri, 2023-09-22 at 09:44 -0700, Mike Kravetz wrote:
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index f906c5fa4d09..8f3d5895fffc 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -372,6 +372,11 @@ static void
> __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
>                 struct hugetlb_vma_lock *vma_lock = vma-
> >vm_private_data;
>  
>                 __hugetlb_vma_unlock_write_put(vma_lock);
> +       } else if (__vma_private_lock(vma)) {
> +               struct resv_map *resv_map = vma_resv_map(vma);
> +
> +               /* no free for anon vmas, but still need to unlock */
> +               up_write(&resv_map->rw_sema);
>         }
>  }
>

Nice catch. I'll add that.

I was still trying to reproduce the bug here.

The libhugetlbfs code compiles with the offending bits
commented out, but the misaligned_offset test wasn't
causing trouble on my test VM here.

Given the potential negative impact of moving from a
per-VMA lock to a per-backing-address_space lock, I'll
keep the 3 patches separate, and in the order they are
in now.

Let me go spin and test v2.

--
All Rights Reversed.

2023-09-22 17:02:45

by Mike Kravetz

[permalink] [raw]
Subject: Re: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

On 09/22/23 10:37, Rik van Riel wrote:
> On Thu, 2023-09-21 at 15:42 -0700, Mike Kravetz wrote:
> > On 09/19/23 22:16, [email protected]?wrote:
> > > From: Rik van Riel <[email protected]>
> > >
> > > Extend the locking scheme used to protect shared hugetlb mappings
> > > from truncate vs page fault races, in order to protect private
> > > hugetlb mappings (with resv_map) against MADV_DONTNEED.
> > >
> > > Add a read-write semaphore to the resv_map data structure, and
> > > use that from the hugetlb_vma_(un)lock_* functions, in preparation
> > > for closing the race between MADV_DONTNEED and page faults.
> > >
> > > Signed-off-by: Rik van Riel <[email protected]>
> > > ---
> > > ?include/linux/hugetlb.h |? 6 ++++++
> > > ?mm/hugetlb.c??????????? | 36 ++++++++++++++++++++++++++++++++----
> > > ?2 files changed, 38 insertions(+), 4 deletions(-)
> >
> > This looks straight forward.
> >
> > However, I ran just this patch through libhugetlbfs test suite and it
> > hung on
> > misaligned_offset (2M: 32).
> > https://github.com/libhugetlbfs/libhugetlbfs/blob/master/tests/misaligned_offset.c
>
>
> Speaking of "looks straightforward", how do I compile the
> libhugetlbfs code?
>
> The __morecore variable, which is pointed at either the
> THP or hugetlbfs morecore function, does not seem to be
> defined anywhere in the sources.
>
> Do I need to run some magic script (didn't find it) to
> get a special header file set up before I can build
> libhugetlbfs?

libhugetlbfs is a mess! Distros have dropped it. However, I still find
the test cases useful. I have a special VM with an old glibc just for
running the tests.

Sorry, can't give instructions for using tests on a recent glibc.

But, back to this patch ...
With the hints from the locking debug code, it came to me on my walk this
morning. We need to also have __hugetlb_vma_unlock_write_free() work
for private vmas as called from __unmap_hugepage_range_final. This
additional change (or something like it) is required in this patch.

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f906c5fa4d09..8f3d5895fffc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -372,6 +372,11 @@ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;

__hugetlb_vma_unlock_write_put(vma_lock);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ /* no free for anon vmas, but still need to unlock */
+ up_write(&resv_map->rw_sema);
}
}


--
Mike Kravetz

2023-09-22 18:37:39

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 1/2] hugetlbfs: extend hugetlb_vma_lock to private VMAs

On Fri, 2023-09-22 at 09:44 -0700, Mike Kravetz wrote:
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index f906c5fa4d09..8f3d5895fffc 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -372,6 +372,11 @@ static void
> __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
>                 struct hugetlb_vma_lock *vma_lock = vma-
> >vm_private_data;
>  
>                 __hugetlb_vma_unlock_write_put(vma_lock);
> +       } else if (__vma_private_lock(vma)) {
> +               struct resv_map *resv_map = vma_resv_map(vma);
> +
> +               /* no free for anon vmas, but still need to unlock */
> +               up_write(&resv_map->rw_sema);
>         }
>  }
>  

That did the trick. The libhugetlbfs tests pass now, with
lockdep and KASAN enabled. Breno's MADV_DONTNEED test case
for hugetlbfs still passes, too.


--
All Rights Reversed.