2022-05-23 07:59:34

by Yang Shi

[permalink] [raw]
Subject: [mm-unstable PATCH 0/7] Cleanup transhuge_xxx helpers


This series is the follow-up of the discussion about cleaning up transhuge_xxx
helpers at https://lore.kernel.org/linux-mm/[email protected]/.

THP has a bunch of helpers that do VMA sanity check for different paths, they
do the similar checks for the most callsites and have a lot duplicate codes.
And it is confusing what helpers should be used at what conditions.

This series reorganized and cleaned up the code so that we could consolidate
all the checks into hugepage_vma_check().

The transhuge_vma_enabled(), transparent_hugepage_active() and
__transparent_hugepage_enabled() are killed by this series.

Added transhuge_vma_size_ok() helper to remove some duplicate code.


b/fs/proc/task_mmu.c | 4 +-
b/include/linux/huge_mm.h | 102 ++++++++++++++++++++++++++-----------------------------------------
b/include/linux/khugepaged.h | 21 +------------
b/mm/huge_memory.c | 74 ++++++++++++++++++++++++++++++++++++------------
b/mm/khugepaged.c | 92 ++++++++++++++++--------------------------------------------
b/mm/memory.c | 6 ++-
6 files changed, 129 insertions(+), 170 deletions(-)



2022-05-23 08:07:22

by Yang Shi

[permalink] [raw]
Subject: [PATCH 5/7] mm: thp: kill transparent_hugepage_active()

The transparent_hugepage_active() was introduced to show THP eligibility
bit in smaps in proc, smaps is the only user. But it actually does the
similar check as hugepage_vma_check() which is used by khugepaged. We
definitely don't have to maintain two similar checks, so kill
transparent_hugepage_active().

Also move hugepage_vma_check() to huge_memory.c and huge_mm.h since it
is not only for khugepaged anymore.

Signed-off-by: Yang Shi <[email protected]>
---
fs/proc/task_mmu.c | 2 +-
include/linux/huge_mm.h | 14 +++++++------
include/linux/khugepaged.h | 2 --
mm/huge_memory.c | 41 ++++++++++++++++++++++++++++++------
mm/khugepaged.c | 43 --------------------------------------
5 files changed, 44 insertions(+), 58 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f9c9abb50bb7..ab4849d07a1f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -863,7 +863,7 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);

seq_printf(m, "THPeligible: %d\n",
- transparent_hugepage_active(vma));
+ hugepage_vma_check(vma, vma->vm_flags));

if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 79d5919beb83..2da52ca8fd52 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -209,7 +209,8 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}

-bool transparent_hugepage_active(struct vm_area_struct *vma);
+extern bool hugepage_vma_check(struct vm_area_struct *vma,
+ unsigned long vm_flags);

#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
@@ -358,11 +359,6 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
return false;
}

-static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
-{
- return false;
-}
-
static inline bool transhuge_vma_size_ok(struct vm_area_struct *vma)
{
return false;
@@ -380,6 +376,12 @@ static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
return false;
}

+static inline bool hugepage_vma_check(struct vm_area_struct *vma,
+ unsigned long vm_flags)
+{
+ return false;
+}
+
static inline void prep_transhuge_page(struct page *page) {}

#define transparent_hugepage_flags 0UL
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 392d34c3c59a..795c8d2170b0 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -10,8 +10,6 @@ extern struct attribute_group khugepaged_attr_group;
extern int khugepaged_init(void);
extern void khugepaged_destroy(void);
extern int start_stop_khugepaged(void);
-extern bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags);
extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern void khugepaged_enter_vma(struct vm_area_struct *vma,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d633f97452c1..d9ec82892b05 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -69,18 +69,47 @@ static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
unsigned long huge_zero_pfn __read_mostly = ~0UL;

-bool transparent_hugepage_active(struct vm_area_struct *vma)
+bool hugepage_vma_check(struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
+ if (!transhuge_vma_enabled(vma, vm_flags))
+ return false;
+
+ if (vm_flags & VM_NO_KHUGEPAGED)
+ return false;
+
+ /* Don't run khugepaged against DAX vma */
+ if (vma_is_dax(vma))
+ return false;
+
+ if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
+ vma->vm_pgoff, HPAGE_PMD_NR))
+ return false;
+
if (!transhuge_vma_size_ok(vma))
return false;
- if (vma_is_anonymous(vma))
- return __transparent_hugepage_enabled(vma);
- if (vma_is_shmem(vma))
+
+ /* Enabled via shmem mount options or sysfs settings. */
+ if (shmem_file(vma->vm_file))
return shmem_huge_enabled(vma);
- if (transhuge_vma_enabled(vma, vma->vm_flags) && file_thp_enabled(vma))
+
+ if (!khugepaged_enabled())
+ return false;
+
+ /* THP settings require madvise. */
+ if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
+ return false;
+
+ /* Only regular file is valid */
+ if (file_thp_enabled(vma))
return true;

- return false;
+ if (!vma->anon_vma || !vma_is_anonymous(vma))
+ return false;
+ if (vma_is_temporary_stack(vma))
+ return false;
+
+ return true;
}

static bool get_huge_zero_page(void)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 51f0e6ea3977..a7ac1ae418f8 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -437,49 +437,6 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
return atomic_read(&mm->mm_users) == 0;
}

-bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
- if (!transhuge_vma_enabled(vma, vm_flags))
- return false;
-
- if (vm_flags & VM_NO_KHUGEPAGED)
- return false;
-
- /* Don't run khugepaged against DAX vma */
- if (vma_is_dax(vma))
- return false;
-
- if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
- vma->vm_pgoff, HPAGE_PMD_NR))
- return false;
-
- if (!transhuge_vma_size_ok(vma))
- return false;
-
- /* Enabled via shmem mount options or sysfs settings. */
- if (shmem_file(vma->vm_file))
- return shmem_huge_enabled(vma);
-
- if (!khugepaged_enabled())
- return false;
-
- /* THP settings require madvise. */
- if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
- return false;
-
- /* Only regular file is valid */
- if (file_thp_enabled(vma))
- return true;
-
- if (!vma->anon_vma || !vma_is_anonymous(vma))
- return false;
- if (vma_is_temporary_stack(vma))
- return false;
-
- return true;
-}
-
void __khugepaged_enter(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
--
2.26.3