Hi,
While I'm working with CXL memory, I have been facing migraion overhead
esp. TLB shootdown on promotion or demotion between different tiers.
Yeah.. most TLB shootdowns on migration through hinting fault can be
avoided thanks to Huang Ying's work, commit 4d4b6d66db ("mm,unmap: avoid
flushing TLB in batch if PTE is inaccessible").
However, it's only for ones using hinting fault. I thought it'd be much
better if we have a general mechanism to reduce # of TLB flushes that
we can apply to any type of migration. I tried it only for tiering
migration for now tho.
I'm suggesting a mechanism to reduce TLB flushes by keeping source and
destination of folios participated in the migrations until all TLB
flushes required are done, only if those folios are not mapped with
write permission PTE entries at all.
I saw the number of TLB full flush reduced over 50% and the performance
a little bit improved but not that big with the workload I tested with,
XSBench. However, I believe that it would help more with other ones or
any real ones. It'd be appreciated to tell me if I'm missing something.
Byungchul
---
Changes from RFC:
1. Fix a bug triggered when a destination folio at the previous
migration becomes a source folio at the next migration,
before the folio gets handled properly so that the folio can
play with another migration. There was inconsistency in the
folio's state. Fixed it.
2. Split the patch set into more pieces so that the folks can
review better. (Feedbacked by Nadav Amit)
3. Fix a wrong usage of barrier e.g. smp_mb__after_atomic().
(Feedbacked by Nadav Amit)
4. Tried to add sufficient comments to explain the patch set
better. (Feedbacked by Nadav Amit)
Byungchul Park (6):
mm/rmap: Recognize non-writable TLB entries during TLB batch flush
mm: Defer TLB flush by keeping both src and dst folios at migration
mm, migrc: Skip TLB flushes at the CPUs that already have been done
mm, migrc: Ajust __zone_watermark_ok() with the amount of pending
folios
mm, migrc: Add a sysctl knob to enable/disable MIGRC mechanism
mm, migrc: Implement internal allocator to minimize impact onto vm
arch/x86/include/asm/tlbflush.h | 9 +
arch/x86/mm/tlb.c | 67 ++++++
include/linux/mm.h | 30 +++
include/linux/mm_types.h | 47 ++++
include/linux/mm_types_task.h | 4 +-
include/linux/mmzone.h | 6 +
include/linux/sched.h | 5 +
init/Kconfig | 13 ++
mm/internal.h | 14 ++
mm/memory.c | 17 +-
mm/migrate.c | 381 +++++++++++++++++++++++++++++++-
mm/mm_init.c | 1 +
mm/page_alloc.c | 19 ++
mm/rmap.c | 133 ++++++++++-
14 files changed, 734 insertions(+), 12 deletions(-)
--
2.17.1
TLB flushes can be skipped if TLB flushes requested have been done by
any reason, which doesn't have to be done from migrations. It can be
tracked by keeping timestamp(= migrc_gen) when it's requested and
when it's triggered.
Signed-off-by: Byungchul Park <[email protected]>
---
arch/x86/include/asm/tlbflush.h | 6 ++++
arch/x86/mm/tlb.c | 55 +++++++++++++++++++++++++++++++++
mm/migrate.c | 10 ++++++
mm/rmap.c | 1 +
4 files changed, 72 insertions(+)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 752d72ea209b..da987c15049e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -283,6 +283,12 @@ extern void arch_tlbbatch_clean(struct arch_tlbflush_unmap_batch *batch);
extern void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
struct arch_tlbflush_unmap_batch *bsrc);
+#ifdef CONFIG_MIGRC
+extern void arch_migrc_adj(struct arch_tlbflush_unmap_batch *batch, int gen);
+#else
+static inline void arch_migrc_adj(struct arch_tlbflush_unmap_batch *batch, int gen) {}
+#endif
+
static inline bool pte_flags_need_flush(unsigned long oldflags,
unsigned long newflags,
bool ignore_access)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 2dabf0f340fb..913cad013979 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1210,9 +1210,48 @@ STATIC_NOPV void native_flush_tlb_local(void)
native_write_cr3(__native_read_cr3());
}
+#ifdef CONFIG_MIGRC
+DEFINE_PER_CPU(int, migrc_done);
+
+static inline int migrc_tlb_local_begin(void)
+{
+ int ret = atomic_read(&migrc_gen);
+
+ /*
+ * XXX: barrier() would be sufficient if the architecture
+ * quarantees the order between memory access and TLB flush.
+ */
+ smp_mb();
+ return ret;
+}
+
+static inline void migrc_tlb_local_end(int gen)
+{
+ /*
+ * XXX: barrier() would be sufficient if the architecture
+ * quarantees the order between TLB flush and memory access.
+ */
+ smp_mb();
+ WRITE_ONCE(*this_cpu_ptr(&migrc_done), gen);
+}
+#else
+static inline int migrc_tlb_local_begin(void)
+{
+ return 0;
+}
+
+static inline void migrc_tlb_local_end(int gen)
+{
+}
+#endif
+
void flush_tlb_local(void)
{
+ unsigned int gen;
+
+ gen = migrc_tlb_local_begin();
__flush_tlb_local();
+ migrc_tlb_local_end(gen);
}
/*
@@ -1237,6 +1276,22 @@ void __flush_tlb_all(void)
}
EXPORT_SYMBOL_GPL(__flush_tlb_all);
+#ifdef CONFIG_MIGRC
+static inline bool before(int a, int b)
+{
+ return a - b < 0;
+}
+
+void arch_migrc_adj(struct arch_tlbflush_unmap_batch *batch, int gen)
+{
+ int cpu;
+
+ for_each_cpu(cpu, &batch->cpumask)
+ if (!before(READ_ONCE(*per_cpu_ptr(&migrc_done, cpu)), gen))
+ cpumask_clear_cpu(cpu, &batch->cpumask);
+}
+#endif
+
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
struct flush_tlb_info *info;
diff --git a/mm/migrate.c b/mm/migrate.c
index f9446f5b312a..c7b72d275b2a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2053,6 +2053,16 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
stats->nr_thp_failed += thp_retry;
stats->nr_failed_pages += nr_retry_pages;
move:
+ /*
+ * Should be prior to try_to_unmap_flush() so that
+ * migrc_try_flush_free_folios() that will be called later
+ * can take benefit from the TLB flushes in try_to_unmap_flush().
+ *
+ * migrc_req_end() will store the timestamp for pending, and
+ * TLB flushes will also store the timestamp for TLB flush so
+ * that unnecessary TLB flushes can be skipped using the time
+ * information.
+ */
if (migrc_cond1)
migrc_req_end();
diff --git a/mm/rmap.c b/mm/rmap.c
index 0652d25206ee..2ae1b1324f84 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -627,6 +627,7 @@ static bool __migrc_try_flush_free_folios(struct llist_head *h)
llist_for_each_entry_safe(req, req2, reqs, llnode) {
struct llist_node *n;
+ arch_migrc_adj(&req->arch, req->gen);
arch_tlbbatch_fold(&arch, &req->arch);
n = llist_del_all(&req->pages);
--
2.17.1
Functionally, no change. This is a preparation for CONFIG_MIGRC that
requires to recognize non-writable TLB entries and makes use of them to
batch more aggressively or even skip TLB flushes.
While at it, changed struct tlbflush_unmap's ->flush_required(boolean)
to ->nr_flush_required(int) in order to take into account not only
whether it has been requested or not, but also the exact number of the
requests. That will be used in CONFIG_MIGRC implementation.
Signed-off-by: Byungchul Park <[email protected]>
---
arch/x86/include/asm/tlbflush.h | 2 ++
arch/x86/mm/tlb.c | 7 +++++++
include/linux/mm_types_task.h | 4 ++--
include/linux/sched.h | 1 +
mm/internal.h | 4 ++++
mm/rmap.c | 29 ++++++++++++++++++++++++-----
6 files changed, 40 insertions(+), 7 deletions(-)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 75bfaa421030..63504cde364b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -279,6 +279,8 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
}
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+extern void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
+ struct arch_tlbflush_unmap_batch *bsrc);
static inline bool pte_flags_need_flush(unsigned long oldflags,
unsigned long newflags,
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 267acf27480a..69d145f1fff1 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1265,6 +1265,13 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
put_cpu();
}
+void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
+ struct arch_tlbflush_unmap_batch *bsrc)
+{
+ cpumask_or(&bdst->cpumask, &bdst->cpumask, &bsrc->cpumask);
+ cpumask_clear(&bsrc->cpumask);
+}
+
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 5414b5c6a103..6f3bb757eb46 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -59,8 +59,8 @@ struct tlbflush_unmap_batch {
*/
struct arch_tlbflush_unmap_batch arch;
- /* True if a flush is needed. */
- bool flush_required;
+ /* The number of flush requested. */
+ int nr_flush_required;
/*
* If true then the PTE was dirty when unmapped. The entry must be
diff --git a/include/linux/sched.h b/include/linux/sched.h
index eed5d65b8d1f..2232b2cdfce8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1322,6 +1322,7 @@ struct task_struct {
#endif
struct tlbflush_unmap_batch tlb_ubc;
+ struct tlbflush_unmap_batch tlb_ubc_nowr;
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
diff --git a/mm/internal.h b/mm/internal.h
index 68410c6d97ac..b90d516ad41f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -840,6 +840,7 @@ extern struct workqueue_struct *mm_percpu_wq;
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
void flush_tlb_batched_pending(struct mm_struct *mm);
+void fold_ubc_nowr(void);
#else
static inline void try_to_unmap_flush(void)
{
@@ -850,6 +851,9 @@ static inline void try_to_unmap_flush_dirty(void)
static inline void flush_tlb_batched_pending(struct mm_struct *mm)
{
}
+static inline void fold_ubc_nowr(void)
+{
+}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
extern const struct trace_print_flags pageflag_names[];
diff --git a/mm/rmap.c b/mm/rmap.c
index 19392e090bec..d18460a48485 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -605,6 +605,22 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
}
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+
+void fold_ubc_nowr(void)
+{
+ struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc_nowr = ¤t->tlb_ubc_nowr;
+
+ if (!tlb_ubc_nowr->nr_flush_required)
+ return;
+
+ arch_tlbbatch_fold(&tlb_ubc->arch, &tlb_ubc_nowr->arch);
+ tlb_ubc->writable = tlb_ubc->writable || tlb_ubc_nowr->writable;
+ tlb_ubc->nr_flush_required += tlb_ubc_nowr->nr_flush_required;
+ tlb_ubc_nowr->nr_flush_required = 0;
+ tlb_ubc_nowr->writable = false;
+}
+
/*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
* important if a PTE was dirty when it was unmapped that it's flushed
@@ -615,11 +631,12 @@ void try_to_unmap_flush(void)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
- if (!tlb_ubc->flush_required)
+ fold_ubc_nowr();
+ if (!tlb_ubc->nr_flush_required)
return;
arch_tlbbatch_flush(&tlb_ubc->arch);
- tlb_ubc->flush_required = false;
+ tlb_ubc->nr_flush_required = 0;
tlb_ubc->writable = false;
}
@@ -627,8 +644,9 @@ void try_to_unmap_flush(void)
void try_to_unmap_flush_dirty(void)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc_nowr = ¤t->tlb_ubc_nowr;
- if (tlb_ubc->writable)
+ if (tlb_ubc->writable || tlb_ubc_nowr->writable)
try_to_unmap_flush();
}
@@ -644,15 +662,16 @@ void try_to_unmap_flush_dirty(void)
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval)
{
- struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc;
int batch;
bool writable = pte_dirty(pteval);
if (!pte_accessible(mm, pteval))
return;
+ tlb_ubc = pte_write(pteval) ? ¤t->tlb_ubc : ¤t->tlb_ubc_nowr;
arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
- tlb_ubc->flush_required = true;
+ tlb_ubc->nr_flush_required += 1;
/*
* Ensure compiler does not re-order the setting of tlb_flush_batched
--
2.17.1
Implementation of CONFIG_MIGRC that stands for 'Migration Read Copy'.
We always face the migration overhead at either promotion or demotion,
while working with tiered memory e.g. CXL memory and found out TLB
shootdown is a quite big one that is needed to get rid of if possible.
Fortunately, TLB flush can be defered or even skipped if both source and
destination of folios during migration are kept until all TLB flushes
required will have been done, of course, only if the target PTE entries
have read only permission, more precisely speaking, don't have write
permission. Otherwise, no doubt the folio might get messed up.
To achieve that:
1. For the folios that map only to non-writable TLB entries, prevent
TLB flush at migration by keeping both source and destination
folios, which will be handled later at a better time.
2. When any non-writable TLB entry changes to writable e.g. through
fault handler, give up CONFIG_MIGRC mechanism so as to perform
TLB flush required right away.
The measurement result:
Architecture - x86_64
QEMU - kvm enabled, host cpu, 2nodes {(4cpus, 2GB), (cpuless, 6GB)}
Linux Kernel - v6.4, numa balancing tiering on, demotion enabled
Benchmark - XSBench with no parameter changed
run 'perf stat' using events:
1) itlb.itlb_flush
2) tlb_flush.dtlb_thread
3) tlb_flush.stlb_any
run 'cat /proc/vmstat' and pick up:
1) pgdemote_kswapd
2) numa_pages_migrated
3) pgmigrate_success
4) nr_tlb_remote_flush
5) nr_tlb_remote_flush_received
6) nr_tlb_local_flush_all
7) nr_tlb_local_flush_one
BEFORE - mainline v6.4
------------------------------------------
$ perf stat -e itlb.itlb_flush,tlb_flush.dtlb_thread,tlb_flush.stlb_any ./XSBench
Performance counter stats for './XSBench':
426856 itlb.itlb_flush
6900414 tlb_flush.dtlb_thread
7303137 tlb_flush.stlb_any
33.500486566 seconds time elapsed
92.852128000 seconds user
10.526718000 seconds sys
$ cat /proc/vmstat
...
pgdemote_kswapd 1052596
numa_pages_migrated 1052359
pgmigrate_success 2161846
nr_tlb_remote_flush 72370
nr_tlb_remote_flush_received 213711
nr_tlb_local_flush_all 3385
nr_tlb_local_flush_one 198679
...
AFTER - mainline v6.4 + CONFIG_MIGRC
------------------------------------------
$ perf stat -e itlb.itlb_flush,tlb_flush.dtlb_thread,tlb_flush.stlb_any ./XSBench
Performance counter stats for './XSBench':
179537 itlb.itlb_flush
6131135 tlb_flush.dtlb_thread
6920979 tlb_flush.stlb_any
30.396700625 seconds time elapsed
80.331252000 seconds user
10.303761000 seconds sys
$ cat /proc/vmstat
...
pgdemote_kswapd 1044602
numa_pages_migrated 1044202
pgmigrate_success 2157808
nr_tlb_remote_flush 30453
nr_tlb_remote_flush_received 88840
nr_tlb_local_flush_all 3039
nr_tlb_local_flush_one 198875
...
Signed-off-by: Byungchul Park <[email protected]>
---
arch/x86/include/asm/tlbflush.h | 1 +
arch/x86/mm/tlb.c | 5 +
include/linux/mm.h | 28 +++
include/linux/mm_types.h | 47 +++++
include/linux/mmzone.h | 3 +
include/linux/sched.h | 4 +
init/Kconfig | 13 ++
mm/internal.h | 10 ++
mm/memory.c | 17 +-
mm/migrate.c | 296 +++++++++++++++++++++++++++++++-
mm/mm_init.c | 1 +
mm/page_alloc.c | 3 +
mm/rmap.c | 103 +++++++++++
13 files changed, 526 insertions(+), 5 deletions(-)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 63504cde364b..752d72ea209b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -279,6 +279,7 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
}
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+extern void arch_tlbbatch_clean(struct arch_tlbflush_unmap_batch *batch);
extern void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
struct arch_tlbflush_unmap_batch *bsrc);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 69d145f1fff1..2dabf0f340fb 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1265,6 +1265,11 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
put_cpu();
}
+void arch_tlbbatch_clean(struct arch_tlbflush_unmap_batch *batch)
+{
+ cpumask_clear(&batch->cpumask);
+}
+
void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
struct arch_tlbflush_unmap_batch *bsrc)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 27ce77080c79..1ceec7f3591e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3816,4 +3816,32 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
}
#endif
+#ifdef CONFIG_MIGRC
+void migrc_init_page(struct page *p);
+bool migrc_pending(struct folio *f);
+void migrc_shrink(struct llist_head *h);
+void migrc_req_start(void);
+void migrc_req_end(void);
+bool migrc_req_processing(void);
+bool migrc_try_flush_free_folios(void);
+void migrc_try_flush_free_folios_dirty(void);
+struct migrc_req *fold_ubc_nowr_to_migrc(void);
+void free_migrc_req(struct migrc_req *req);
+
+extern atomic_t migrc_gen;
+extern struct llist_head migrc_reqs;
+extern struct llist_head migrc_reqs_dirty;
+#else
+static inline void migrc_init_page(struct page *p) {}
+static inline bool migrc_pending(struct folio *f) { return false; }
+static inline void migrc_shrink(struct llist_head *h) {}
+static inline void migrc_req_start(void) {}
+static inline void migrc_req_end(void) {}
+static inline bool migrc_req_processing(void) { return false; }
+static inline bool migrc_try_flush_free_folios(void) { return false; }
+static inline void migrc_try_flush_free_folios_dirty(void) {}
+static inline struct migrc_req *fold_ubc_nowr_to_migrc(void) { return NULL; }
+static inline void free_migrc_req(struct migrc_req *req) {}
+#endif
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 306a3d1a0fa6..56011670a6fe 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -228,6 +228,23 @@ struct page {
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
+#ifdef CONFIG_MIGRC
+ /*
+ * XXX: Need to get rid of the following additional fields.
+ */
+
+ /*
+ * for hanging onto a request(struct migrc_req), waiting for TLB
+ * flushes to free up this page
+ */
+ struct llist_node migrc_node;
+
+ /*
+ * for keeping a state of this page e.g. whether pending for TLB
+ * flushes, whether duplicated or whether in the initial state
+ */
+ unsigned int migrc_state;
+#endif
} _struct_page_alignment;
/*
@@ -1255,4 +1272,34 @@ enum {
/* See also internal only FOLL flags in mm/internal.h */
};
+#ifdef CONFIG_MIGRC
+struct migrc_req {
+ /*
+ * pages hung onto this, pending for TLB flush
+ */
+ struct llist_head pages;
+
+ /*
+ * llist_node of the last page in 'pages'
+ */
+ struct llist_node *last;
+
+ /*
+ * for hanging onto the global llist, 'migrc_reqs'
+ */
+ struct llist_node llnode;
+
+ /*
+ * architecture specific batch data
+ */
+ struct arch_tlbflush_unmap_batch arch;
+
+ /*
+ * when this hung onto the global llist, 'migrc_reqs'
+ */
+ int gen;
+};
+#else
+struct migrc_req {};
+#endif
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a4889c9d4055..6d645beaf7a6 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1371,6 +1371,9 @@ typedef struct pglist_data {
#ifdef CONFIG_MEMORY_FAILURE
struct memory_failure_stats mf_stats;
#endif
+#ifdef CONFIG_MIGRC
+ atomic_t migrc_pending_nr;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2232b2cdfce8..d0a46089959d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1323,6 +1323,10 @@ struct task_struct {
struct tlbflush_unmap_batch tlb_ubc;
struct tlbflush_unmap_batch tlb_ubc_nowr;
+#ifdef CONFIG_MIGRC
+ struct migrc_req *mreq;
+ struct migrc_req *mreq_dirty;
+#endif
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
diff --git a/init/Kconfig b/init/Kconfig
index 32c24950c4ce..9f9d0f7e15d2 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -907,6 +907,19 @@ config NUMA_BALANCING_DEFAULT_ENABLED
If set, automatic NUMA balancing will be enabled if running on a NUMA
machine.
+config MIGRC
+ bool "Deferring TLB flush by keeping read copies on migration"
+ depends on ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ depends on NUMA_BALANCING
+ default n
+ help
+ TLB flush is necessary when PTE changes by migration. However,
+ TLB flush can be deferred if both copies of the src page and
+ the dst page are kept until TLB flush if they are non-writable.
+ System performance will be improved especially in case that
+ promotion and demotion types of migrations are heavily
+ happening.
+
menuconfig CGROUPS
bool "Control Group support"
select KERNFS
diff --git a/mm/internal.h b/mm/internal.h
index b90d516ad41f..a8e3168614d6 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -841,6 +841,8 @@ void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
void flush_tlb_batched_pending(struct mm_struct *mm);
void fold_ubc_nowr(void);
+int nr_flush_required(void);
+int nr_flush_required_nowr(void);
#else
static inline void try_to_unmap_flush(void)
{
@@ -854,6 +856,14 @@ static inline void flush_tlb_batched_pending(struct mm_struct *mm)
static inline void fold_ubc_nowr(void)
{
}
+static inline int nr_flush_required(void)
+{
+ return 0;
+}
+static inline int nr_flush_required_nowr(void)
+{
+ return 0;
+}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
extern const struct trace_print_flags pageflag_names[];
diff --git a/mm/memory.c b/mm/memory.c
index f69fbc251198..066b7d5b7217 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3345,6 +3345,20 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
+ if (vmf->page)
+ folio = page_folio(vmf->page);
+
+ /*
+ * This folio has its read copy to prevent inconsistency while
+ * deferring TLB flushes. However, the problem might arise if
+ * it's going to become writable.
+ *
+ * To prevent it, give up the deferring TLB flushes and perform
+ * TLB flush right away.
+ */
+ if (folio && migrc_pending(folio))
+ migrc_try_flush_free_folios();
+
/*
* Shared mapping: we are guaranteed to have VM_WRITE and
* FAULT_FLAG_WRITE set at this point.
@@ -3362,9 +3376,6 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
return wp_page_shared(vmf);
}
- if (vmf->page)
- folio = page_folio(vmf->page);
-
/*
* Private mapping: create an exclusive anonymous page copy if reuse
* is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
diff --git a/mm/migrate.c b/mm/migrate.c
index 01cac26a3127..f9446f5b312a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -58,6 +58,230 @@
#include "internal.h"
+#ifdef CONFIG_MIGRC
+
+/*
+ * TODO: Yeah, it's a non-sense magic number. This simple value manages
+ * to work conservatively anyway. However, the value needs to be
+ * tuned and adjusted based on the internal condition of memory
+ * management subsystem later.
+ *
+ * Let's start with a simple value for now.
+ */
+static const int migrc_pending_max = 512; /* unit: page */
+
+atomic_t migrc_gen;
+LLIST_HEAD(migrc_reqs);
+LLIST_HEAD(migrc_reqs_dirty);
+
+enum {
+ MIGRC_STATE_NONE,
+ MIGRC_SRC_PENDING,
+ MIGRC_DST_PENDING,
+};
+
+static struct migrc_req *alloc_migrc_req(void)
+{
+ return kmalloc(sizeof(struct migrc_req), GFP_KERNEL);
+}
+
+void free_migrc_req(struct migrc_req *req)
+{
+ kfree(req);
+}
+
+static bool migrc_is_full(int nid)
+{
+ struct pglist_data *node = NODE_DATA(nid);
+
+ if (migrc_pending_max == -1)
+ return false;
+
+ return atomic_read(&node->migrc_pending_nr) >= migrc_pending_max;
+}
+
+void migrc_init_page(struct page *p)
+{
+ WRITE_ONCE(p->migrc_state, MIGRC_STATE_NONE);
+}
+
+/*
+ * The list should be isolated before.
+ */
+void migrc_shrink(struct llist_head *h)
+{
+ struct page *p, *p2;
+ struct llist_node *n;
+
+ n = llist_del_all(h);
+ llist_for_each_entry_safe(p, p2, n, migrc_node) {
+ if (p->migrc_state == MIGRC_SRC_PENDING) {
+ struct pglist_data *node;
+
+ node = NODE_DATA(page_to_nid(p));
+ atomic_dec(&node->migrc_pending_nr);
+ }
+
+ if (WARN_ON(!migrc_pending(page_folio(p))))
+ continue;
+
+ WRITE_ONCE(p->migrc_state, MIGRC_STATE_NONE);
+
+ /*
+ * Ensure the folio is in the initial state once it has
+ * been freed and then allocated.
+ */
+ smp_wmb();
+
+ folio_put(page_folio(p));
+ }
+}
+
+static inline bool migrc_src_pending(struct folio *f)
+{
+ /*
+ * For the case called from page fault handler, make sure the
+ * order between seeing updated PTE and reading migrc_state.
+ *
+ * Or should be able to see the initial state if no one has
+ * touched the state since allocation.
+ */
+ smp_rmb();
+ return READ_ONCE(f->page.migrc_state) == MIGRC_SRC_PENDING;
+}
+
+static inline bool migrc_dst_pending(struct folio *f)
+{
+ /*
+ * For the case called from page fault handler, make sure the
+ * order between seeing updated PTE and reading migrc_state.
+ *
+ * Or should be able to see the initial state if no one has
+ * touched the state since allocation.
+ */
+ smp_rmb();
+ return READ_ONCE(f->page.migrc_state) == MIGRC_DST_PENDING;
+}
+
+bool migrc_pending(struct folio *f)
+{
+ return migrc_src_pending(f) || migrc_dst_pending(f);
+}
+
+static void migrc_expand_req(struct folio *fsrc, struct folio *fdst)
+{
+ struct migrc_req *req;
+ struct pglist_data *node;
+
+ req = fold_ubc_nowr_to_migrc();
+ if (!req)
+ return;
+
+ folio_get(fsrc);
+ WRITE_ONCE(fsrc->page.migrc_state, MIGRC_SRC_PENDING);
+ WRITE_ONCE(fdst->page.migrc_state, MIGRC_DST_PENDING);
+
+ /*
+ * Keep the order between migrc_state update and PTE update.
+ */
+ smp_wmb();
+
+ if (llist_add(&fsrc->page.migrc_node, &req->pages))
+ req->last = &fsrc->page.migrc_node;
+
+ node = NODE_DATA(folio_nid(fsrc));
+ atomic_inc(&node->migrc_pending_nr);
+
+ if (migrc_is_full(folio_nid(fsrc)))
+ migrc_try_flush_free_folios();
+}
+
+/*
+ * To start to gather pages pending for TLB flushes, try to allocate
+ * objects needed, initialize them and make them ready.
+ */
+void migrc_req_start(void)
+{
+ struct migrc_req *req;
+ struct migrc_req *req_dirty;
+
+ if (WARN_ON(current->mreq || current->mreq_dirty))
+ return;
+
+ req = alloc_migrc_req();
+ req_dirty = alloc_migrc_req();
+
+ if (!req || !req_dirty)
+ goto fail;
+
+ arch_tlbbatch_clean(&req->arch);
+ init_llist_head(&req->pages);
+ req->last = NULL;
+ current->mreq = req;
+
+ /*
+ * Gather pages having a mapping, pte_dirty() == true, in a
+ * separate request to handle try_to_unmap_flush_dirty().
+ */
+ arch_tlbbatch_clean(&req_dirty->arch);
+ init_llist_head(&req_dirty->pages);
+ req_dirty->last = NULL;
+ current->mreq_dirty = req_dirty;
+ return;
+fail:
+ if (req_dirty)
+ free_migrc_req(req_dirty);
+ if (req)
+ free_migrc_req(req);
+}
+
+/*
+ * Hang the request with the collected pages onto the global llist,
+ * 'migrc_reqs', which will be referred when performing TLB flush via
+ * migrc_try_flush_free_folios().
+ */
+void migrc_req_end(void)
+{
+ struct migrc_req *req = current->mreq;
+ struct migrc_req *req_dirty = current->mreq_dirty;
+
+ WARN_ON((!req && req_dirty) || (req && !req_dirty));
+
+ if (!req || !req_dirty)
+ return;
+
+ if (llist_empty(&req->pages)) {
+ free_migrc_req(req);
+ } else {
+ req->gen = atomic_inc_return(&migrc_gen);
+ llist_add(&req->llnode, &migrc_reqs);
+ }
+ current->mreq = NULL;
+
+ /*
+ * Gather pages having a mapping, pte_dirty() == true, in a
+ * separate request to handle try_to_unmap_flush_dirty().
+ */
+ if (llist_empty(&req_dirty->pages)) {
+ free_migrc_req(req_dirty);
+ } else {
+ req_dirty->gen = atomic_inc_return(&migrc_gen);
+ llist_add(&req_dirty->llnode, &migrc_reqs_dirty);
+ }
+ current->mreq_dirty = NULL;
+}
+
+bool migrc_req_processing(void)
+{
+ return current->mreq && current->mreq_dirty;
+}
+#else
+static inline bool migrc_src_pending(struct folio *f) { return false; }
+static inline bool migrc_dst_pending(struct folio *f) { return false; }
+static inline bool migrc_is_full(int nid) { return true; }
+static inline void migrc_expand_req(struct folio *fsrc, struct folio *fdst) {}
+#endif
+
bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{
struct folio *folio = folio_get_nontail_page(page);
@@ -383,6 +607,9 @@ static int folio_expected_refs(struct address_space *mapping,
struct folio *folio)
{
int refs = 1;
+
+ refs += migrc_src_pending(folio) ? 1 : 0;
+
if (!mapping)
return refs;
@@ -1060,6 +1287,12 @@ static void migrate_folio_undo_src(struct folio *src,
bool locked,
struct list_head *ret)
{
+ /*
+ * TODO: There might be folios already pending for migrc.
+ * However, there's no way to cancel those on failure for now.
+ * Let's reflect the requirement when needed.
+ */
+
if (page_was_mapped)
remove_migration_ptes(src, src, false);
/* Drop an anon_vma reference if we took one */
@@ -1627,10 +1860,17 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
LIST_HEAD(unmap_folios);
LIST_HEAD(dst_folios);
bool nosplit = (reason == MR_NUMA_MISPLACED);
+ bool migrc_cond1;
+ bool need_migrc_flush = false;
VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
!list_empty(from) && !list_is_singular(from));
+ migrc_cond1 = (reason == MR_DEMOTION && current_is_kswapd()) ||
+ (reason == MR_NUMA_MISPLACED);
+
+ if (migrc_cond1)
+ migrc_req_start();
for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
retry = 0;
large_retry = 0;
@@ -1638,6 +1878,10 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
nr_retry_pages = 0;
list_for_each_entry_safe(folio, folio2, from, lru) {
+ int nr_required;
+ bool migrc_cond2;
+ bool migrc;
+
/*
* Large folio statistics is based on the source large
* folio. Capture required information that might get
@@ -1671,8 +1915,33 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
continue;
}
+ nr_required = nr_flush_required();
rc = migrate_folio_unmap(get_new_page, put_new_page, private,
folio, &dst, mode, reason, ret_folios);
+ migrc_cond2 = nr_required == nr_flush_required() &&
+ nr_flush_required_nowr() &&
+ !migrc_is_full(folio_nid(folio));
+ migrc = migrc_cond1 && migrc_cond2;
+
+ /*
+ * This folio already has been participating in
+ * migrc mechanism previously.
+ *
+ * If it was the destination at the migration,
+ * then TLB flush is needed to keep consistency
+ * because the folio is about to move to
+ * somewhere else again.
+ *
+ * If it was the source at the migration which
+ * cannot happen because the folio was isolated
+ * at that time so it couldn't play here, then
+ * it should be warned.
+ */
+ if (migrc_dst_pending(folio))
+ need_migrc_flush = true;
+ else if (migrc_src_pending(folio))
+ WARN_ON(1);
+
/*
* The rules are:
* Success: folio will be freed
@@ -1722,9 +1991,11 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
nr_large_failed += large_retry;
stats->nr_thp_failed += thp_retry;
rc_saved = rc;
- if (list_empty(&unmap_folios))
+ if (list_empty(&unmap_folios)) {
+ if (migrc_cond1)
+ migrc_req_end();
goto out;
- else
+ } else
goto move;
case -EAGAIN:
if (is_large) {
@@ -1742,6 +2013,13 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
case MIGRATEPAGE_UNMAP:
list_move_tail(&folio->lru, &unmap_folios);
list_add_tail(&dst->lru, &dst_folios);
+
+ if (migrc)
+ /*
+ * XXX: On migration failure,
+ * extra TLB flush might happen.
+ */
+ migrc_expand_req(folio, dst);
break;
default:
/*
@@ -1760,6 +2038,14 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
stats->nr_failed_pages += nr_pages;
break;
}
+
+ /*
+ * Setting up TLB batch information for this
+ * folio is done. It's about to move on to the
+ * next folio. Fold the remaining TLB batch
+ * information if it exists.
+ */
+ fold_ubc_nowr();
}
}
nr_failed += retry;
@@ -1767,9 +2053,15 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
stats->nr_thp_failed += thp_retry;
stats->nr_failed_pages += nr_retry_pages;
move:
+ if (migrc_cond1)
+ migrc_req_end();
+
/* Flush TLBs for all unmapped folios */
try_to_unmap_flush();
+ if (need_migrc_flush)
+ migrc_try_flush_free_folios();
+
retry = 1;
for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
retry = 0;
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 7f7f9c677854..87cbddc7d780 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -558,6 +558,7 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
page_mapcount_reset(page);
page_cpupid_reset_last(page);
page_kasan_tag_reset(page);
+ migrc_init_page(page);
INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 47421bedc12b..c51cbdb45d86 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1730,6 +1730,9 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
set_page_owner(page, order, gfp_flags);
page_table_check_alloc(page, order);
+
+ for (i = 0; i != 1 << order; ++i)
+ migrc_init_page(page + i);
}
static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
diff --git a/mm/rmap.c b/mm/rmap.c
index d18460a48485..0652d25206ee 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -606,6 +606,97 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+#ifdef CONFIG_MIGRC
+static bool __migrc_try_flush_free_folios(struct llist_head *h)
+{
+ struct arch_tlbflush_unmap_batch arch;
+ struct llist_node *reqs;
+ struct migrc_req *req;
+ struct migrc_req *req2;
+ LLIST_HEAD(pages);
+
+ reqs = llist_del_all(h);
+ if (!reqs)
+ return false;
+
+ arch_tlbbatch_clean(&arch);
+
+ /*
+ * TODO: Optimize the time complexity.
+ */
+ llist_for_each_entry_safe(req, req2, reqs, llnode) {
+ struct llist_node *n;
+
+ arch_tlbbatch_fold(&arch, &req->arch);
+
+ n = llist_del_all(&req->pages);
+ llist_add_batch(n, req->last, &pages);
+ free_migrc_req(req);
+ }
+
+ arch_tlbbatch_flush(&arch);
+ migrc_shrink(&pages);
+ return true;
+}
+
+bool migrc_try_flush_free_folios(void)
+{
+ bool ret;
+
+ /*
+ * If building up a request collecting pages is in progress,
+ * the request needs to be finalized and hung onto the global
+ * llist, 'migrc_reqs', so that it can be used in migrc's TLB
+ * flush routine e.i. __migrc_try_flush_free_folios().
+ */
+ if (migrc_req_processing()) {
+ migrc_req_end();
+ migrc_req_start();
+ }
+ ret = __migrc_try_flush_free_folios(&migrc_reqs);
+ ret = ret || __migrc_try_flush_free_folios(&migrc_reqs_dirty);
+
+ return ret;
+}
+
+void migrc_try_flush_free_folios_dirty(void)
+{
+ /*
+ * If building up a request collecting pages is in progress,
+ * the request needs to be finalized and hung onto the global
+ * llist, 'migrc_reqs', so that it can be used in migrc's TLB
+ * flush routine e.i. __migrc_try_flush_free_folios().
+ */
+ if (migrc_req_processing()) {
+ migrc_req_end();
+ migrc_req_start();
+ }
+ __migrc_try_flush_free_folios(&migrc_reqs_dirty);
+}
+
+struct migrc_req *fold_ubc_nowr_to_migrc(void)
+{
+ struct tlbflush_unmap_batch *tlb_ubc_nowr = ¤t->tlb_ubc_nowr;
+ struct migrc_req *req;
+ bool dirty;
+
+ if (!tlb_ubc_nowr->nr_flush_required)
+ return NULL;
+
+ dirty = tlb_ubc_nowr->writable;
+ req = dirty ? current->mreq_dirty : current->mreq;
+ if (!req) {
+ fold_ubc_nowr();
+ return NULL;
+ }
+
+ arch_tlbbatch_fold(&req->arch, &tlb_ubc_nowr->arch);
+ tlb_ubc_nowr->nr_flush_required = 0;
+ tlb_ubc_nowr->writable = false;
+ return req;
+}
+#endif
+
void fold_ubc_nowr(void)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
@@ -621,6 +712,16 @@ void fold_ubc_nowr(void)
tlb_ubc_nowr->writable = false;
}
+int nr_flush_required(void)
+{
+ return current->tlb_ubc.nr_flush_required;
+}
+
+int nr_flush_required_nowr(void)
+{
+ return current->tlb_ubc_nowr.nr_flush_required;
+}
+
/*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
* important if a PTE was dirty when it was unmapped that it's flushed
@@ -648,6 +749,8 @@ void try_to_unmap_flush_dirty(void)
if (tlb_ubc->writable || tlb_ubc_nowr->writable)
try_to_unmap_flush();
+
+ migrc_try_flush_free_folios_dirty();
}
/*
--
2.17.1
(Not sure if this patch works meaningfully. Ignore if not.)
Signed-off-by: Byungchul Park <[email protected]>
---
mm/migrate.c | 23 +++++++++++++++++++++--
1 file changed, 21 insertions(+), 2 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index c57536a0b2a6..6b5113d5a1e2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -122,14 +122,33 @@ enum {
MIGRC_DST_PENDING,
};
+#define MAX_MIGRC_REQ_NR 4096
+static struct migrc_req migrc_req_pool_static[MAX_MIGRC_REQ_NR];
+static atomic_t migrc_req_pool_idx = ATOMIC_INIT(-1);
+static LLIST_HEAD(migrc_req_pool_llist);
+static DEFINE_SPINLOCK(migrc_req_pool_lock);
+
static struct migrc_req *alloc_migrc_req(void)
{
- return kmalloc(sizeof(struct migrc_req), GFP_KERNEL);
+ int idx = atomic_read(&migrc_req_pool_idx);
+ struct llist_node *n;
+
+ if (idx < MAX_MIGRC_REQ_NR - 1) {
+ idx = atomic_inc_return(&migrc_req_pool_idx);
+ if (idx < MAX_MIGRC_REQ_NR)
+ return migrc_req_pool_static + idx;
+ }
+
+ spin_lock(&migrc_req_pool_lock);
+ n = llist_del_first(&migrc_req_pool_llist);
+ spin_unlock(&migrc_req_pool_lock);
+
+ return n ? llist_entry(n, struct migrc_req, llnode) : NULL;
}
void free_migrc_req(struct migrc_req *req)
{
- kfree(req);
+ llist_add(&req->llnode, &migrc_req_pool_llist);
}
static bool migrc_is_full(int nid)
--
2.17.1