2023-01-09 21:35:46

by SeongJae Park

[permalink] [raw]
Subject: [PATCH 0/6] mm/damon/{v,p}addr: misc fixups for folio usage

DAMON's monitoring operations set for the virtual and the physical
address spaces use folio now, but some code is not reflecting the fact.
Further cleanup the code for folio usage.

SeongJae Park (6):
mm/damon/vaddr: rename 'damon_young_walk_private->page_sz' to
'folio_sz'
mm/damon/vaddr: support folio of neither HPAGE_PMD_SIZE nor PAGE_SIZE
mm/damon/vaddr: record appropriate folio size when the access is not
found
mm/damon/paddr: rename 'damon_pa_access_chk_result->page_sz' to
'folio_sz'
mm/damon/paddr: remove folio_sz filed from damon_pa_access_chk_result
mm/damon/paddr: remove damon_pa_access_chk_result struct

mm/damon/paddr.c | 44 +++++++++++++++++---------------------------
mm/damon/vaddr.c | 30 ++++++++++++++----------------
2 files changed, 31 insertions(+), 43 deletions(-)

--
2.25.1


2023-01-09 21:44:06

by SeongJae Park

[permalink] [raw]
Subject: [PATCH 6/6] mm/damon/paddr: remove damon_pa_access_chk_result struct

'damon_pa_access_chk_result' struct contains only one field. Use a
variable instead.

Signed-off-by: SeongJae Park <[email protected]>
---
mm/damon/paddr.c | 28 +++++++++++-----------------
1 file changed, 11 insertions(+), 17 deletions(-)

diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index b51606519bbd..b4df9b9bcc0a 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -79,50 +79,44 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
}
}

-struct damon_pa_access_chk_result {
- bool accessed;
-};
-
static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
- struct damon_pa_access_chk_result *result = arg;
+ bool *accessed = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);

- result->accessed = false;
+ *accessed = false;
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte) {
- result->accessed = pte_young(*pvmw.pte) ||
+ *accessed = pte_young(*pvmw.pte) ||
!folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr);
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- result->accessed = pmd_young(*pvmw.pmd) ||
+ *accessed = pmd_young(*pvmw.pmd) ||
!folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr);
#else
WARN_ON_ONCE(1);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}
- if (result->accessed) {
+ if (*accessed) {
page_vma_mapped_walk_done(&pvmw);
break;
}
}

/* If accessed, stop walking */
- return !result->accessed;
+ return *accessed == false;
}

static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
{
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
- struct damon_pa_access_chk_result result = {
- .accessed = false,
- };
+ bool accessed = false;
struct rmap_walk_control rwc = {
- .arg = &result,
+ .arg = &accessed,
.rmap_one = __damon_pa_young,
.anon_lock = folio_lock_anon_vma_read,
};
@@ -133,9 +127,9 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)

if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
if (folio_test_idle(folio))
- result.accessed = false;
+ accessed = false;
else
- result.accessed = true;
+ accessed = true;
folio_put(folio);
goto out;
}
@@ -154,7 +148,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)

out:
*folio_sz = folio_size(folio);
- return result.accessed;
+ return accessed;
}

static void __damon_pa_check_access(struct damon_region *r)
--
2.25.1

2023-01-09 21:52:22

by SeongJae Park

[permalink] [raw]
Subject: [PATCH 1/6] mm/damon/vaddr: rename 'damon_young_walk_private->page_sz' to 'folio_sz'

DAMON's virtual address space monitoring operations set is using folio
now. Rename 'damon_pa_access_chk_result->page_sz' to reflect the fact.

Signed-off-by: SeongJae Park <[email protected]>
---
mm/damon/vaddr.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 9d92c5eb3a1f..d6cb1fca1769 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -422,7 +422,8 @@ static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
}

struct damon_young_walk_private {
- unsigned long *page_sz;
+ /* size of the folio for the access checked virtual memory address */
+ unsigned long *folio_sz;
bool young;
};

@@ -452,7 +453,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
if (pmd_young(*pmd) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm,
addr)) {
- *priv->page_sz = HPAGE_PMD_SIZE;
+ *priv->folio_sz = HPAGE_PMD_SIZE;
priv->young = true;
}
folio_put(folio);
@@ -474,7 +475,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
goto out;
if (pte_young(*pte) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm, addr)) {
- *priv->page_sz = PAGE_SIZE;
+ *priv->folio_sz = PAGE_SIZE;
priv->young = true;
}
folio_put(folio);
@@ -504,7 +505,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,

if (pte_young(entry) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm, addr)) {
- *priv->page_sz = huge_page_size(h);
+ *priv->folio_sz = huge_page_size(h);
priv->young = true;
}

@@ -524,10 +525,10 @@ static const struct mm_walk_ops damon_young_ops = {
};

static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
- unsigned long *page_sz)
+ unsigned long *folio_sz)
{
struct damon_young_walk_private arg = {
- .page_sz = page_sz,
+ .folio_sz = folio_sz,
.young = false,
};

@@ -547,18 +548,18 @@ static void __damon_va_check_access(struct mm_struct *mm,
struct damon_region *r, bool same_target)
{
static unsigned long last_addr;
- static unsigned long last_page_sz = PAGE_SIZE;
+ static unsigned long last_folio_sz = PAGE_SIZE;
static bool last_accessed;

/* If the region is in the last checked page, reuse the result */
- if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) ==
- ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
+ if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
+ ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
if (last_accessed)
r->nr_accesses++;
return;
}

- last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
+ last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
if (last_accessed)
r->nr_accesses++;

--
2.25.1

2023-01-09 21:53:48

by SeongJae Park

[permalink] [raw]
Subject: [PATCH 3/6] mm/damon/vaddr: record appropriate folio size when the access is not found

DAMON virtual address spaces monitoring operations set doesn't set folio
size of the access checked address if access is not found. It could
result in unnecessary and inefficient repeated check. Appropriately set
the size regardless of access check result.

Signed-off-by: SeongJae Park <[email protected]>
---
mm/damon/vaddr.c | 15 ++++++---------
1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index c7b192006fe6..1fec16d7263e 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -452,10 +452,9 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
goto huge_out;
if (pmd_young(*pmd) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm,
- addr)) {
- *priv->folio_sz = HPAGE_PMD_SIZE;
+ addr))
priv->young = true;
- }
+ *priv->folio_sz = HPAGE_PMD_SIZE;
folio_put(folio);
huge_out:
spin_unlock(ptl);
@@ -474,10 +473,9 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
if (!folio)
goto out;
if (pte_young(*pte) || !folio_test_idle(folio) ||
- mmu_notifier_test_young(walk->mm, addr)) {
- *priv->folio_sz = folio_size(folio);
+ mmu_notifier_test_young(walk->mm, addr))
priv->young = true;
- }
+ *priv->folio_sz = folio_size(folio);
folio_put(folio);
out:
pte_unmap_unlock(pte, ptl);
@@ -504,10 +502,9 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
folio_get(folio);

if (pte_young(entry) || !folio_test_idle(folio) ||
- mmu_notifier_test_young(walk->mm, addr)) {
- *priv->folio_sz = huge_page_size(h);
+ mmu_notifier_test_young(walk->mm, addr))
priv->young = true;
- }
+ *priv->folio_sz = huge_page_size(h);

folio_put(folio);

--
2.25.1

2023-01-09 21:54:39

by SeongJae Park

[permalink] [raw]
Subject: [PATCH 2/6] mm/damon/vaddr: support folio of neither HPAGE_PMD_SIZE nor PAGE_SIZE

DAMON virtual address space monitoring operations set treats folios
having non-HPAGE_PMD_SIZE size as having PAGE_SIZE size. Use the exact
size of the folio.

Signed-off-by: SeongJae Park <[email protected]>
---
mm/damon/vaddr.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index d6cb1fca1769..c7b192006fe6 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -475,7 +475,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
goto out;
if (pte_young(*pte) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm, addr)) {
- *priv->folio_sz = PAGE_SIZE;
+ *priv->folio_sz = folio_size(folio);
priv->young = true;
}
folio_put(folio);
--
2.25.1

2023-01-09 22:41:23

by SeongJae Park

[permalink] [raw]
Subject: [PATCH 4/6] mm/damon/paddr: rename 'damon_pa_access_chk_result->page_sz' to 'folio_sz'

DAMON's physical address space monitoring operations set is using folio
now. Rename 'damon_pa_access_chk_result->page_sz' to reflect the fact.

Signed-off-by: SeongJae Park <[email protected]>
---
mm/damon/paddr.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 99d4c357ef2b..65c1e0f91535 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -80,7 +80,8 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
}

struct damon_pa_access_chk_result {
- unsigned long page_sz;
+ /* size of the folio for the access checked physical memory address */
+ unsigned long folio_sz;
bool accessed;
};

@@ -91,7 +92,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);

result->accessed = false;
- result->page_sz = PAGE_SIZE;
+ result->folio_sz = PAGE_SIZE;
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte) {
@@ -103,7 +104,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
result->accessed = pmd_young(*pvmw.pmd) ||
!folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr);
- result->page_sz = HPAGE_PMD_SIZE;
+ result->folio_sz = HPAGE_PMD_SIZE;
#else
WARN_ON_ONCE(1);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -118,11 +119,11 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
return !result->accessed;
}

-static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
+static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
{
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
struct damon_pa_access_chk_result result = {
- .page_sz = PAGE_SIZE,
+ .folio_sz = PAGE_SIZE,
.accessed = false,
};
struct rmap_walk_control rwc = {
@@ -157,25 +158,25 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
folio_put(folio);

out:
- *page_sz = result.page_sz;
+ *folio_sz = result.folio_sz;
return result.accessed;
}

static void __damon_pa_check_access(struct damon_region *r)
{
static unsigned long last_addr;
- static unsigned long last_page_sz = PAGE_SIZE;
+ static unsigned long last_folio_sz = PAGE_SIZE;
static bool last_accessed;

/* If the region is in the last checked page, reuse the result */
- if (ALIGN_DOWN(last_addr, last_page_sz) ==
- ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
+ if (ALIGN_DOWN(last_addr, last_folio_sz) ==
+ ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
if (last_accessed)
r->nr_accesses++;
return;
}

- last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
+ last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
if (last_accessed)
r->nr_accesses++;

--
2.25.1

2023-01-09 22:42:12

by SeongJae Park

[permalink] [raw]
Subject: [PATCH 5/6] mm/damon/paddr: remove folio_sz field from damon_pa_access_chk_result

DAMON physical address space monitoring operations set gets and saves
size of the folio for a given physical address inside rmap walks, but it
can be directly caluclated outside of the walks. Remove the 'folio_sz'
field from 'damon_pa_access_chk_result struct' and calculate the size
directly from outside of the walks.

Signed-off-by: SeongJae Park <[email protected]>
---
mm/damon/paddr.c | 7 +------
1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 65c1e0f91535..b51606519bbd 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -80,8 +80,6 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
}

struct damon_pa_access_chk_result {
- /* size of the folio for the access checked physical memory address */
- unsigned long folio_sz;
bool accessed;
};

@@ -92,7 +90,6 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);

result->accessed = false;
- result->folio_sz = PAGE_SIZE;
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte) {
@@ -104,7 +101,6 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
result->accessed = pmd_young(*pvmw.pmd) ||
!folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr);
- result->folio_sz = HPAGE_PMD_SIZE;
#else
WARN_ON_ONCE(1);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -123,7 +119,6 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
{
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
struct damon_pa_access_chk_result result = {
- .folio_sz = PAGE_SIZE,
.accessed = false,
};
struct rmap_walk_control rwc = {
@@ -158,7 +153,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
folio_put(folio);

out:
- *folio_sz = result.folio_sz;
+ *folio_sz = folio_size(folio);
return result.accessed;
}

--
2.25.1