From: Aaron Tomlin <[email protected]>
Add CPU-specific variable namely vmstat_dirty to indicate if
a vmstat imbalance is present for a given CPU. Therefore, at the
appropriate time, we can fold all the remaining differentials.
This speeds up quiet_vmstat in case no per-CPU differentials exist.
Based on
https://lore.kernel.org/lkml/[email protected]/
Signed-off-by: Aaron Tomlin <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
---
mm/vmstat.c | 54 ++++++++++++++++++++----------------------------------
1 file changed, 20 insertions(+), 34 deletions(-)
Index: linux-2.6/mm/vmstat.c
===================================================================
--- linux-2.6.orig/mm/vmstat.c
+++ linux-2.6/mm/vmstat.c
@@ -195,6 +195,12 @@ void fold_vm_numa_events(void)
#endif
#ifdef CONFIG_SMP
+static DEFINE_PER_CPU_ALIGNED(bool, vmstat_dirty);
+
+static inline void mark_vmstat_dirty(void)
+{
+ this_cpu_write(vmstat_dirty, true);
+}
int calculate_pressure_threshold(struct zone *zone)
{
@@ -367,6 +373,7 @@ void __mod_zone_page_state(struct zone *
x = 0;
}
__this_cpu_write(*p, x);
+ mark_vmstat_dirty();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
@@ -405,6 +412,7 @@ void __mod_node_page_state(struct pglist
x = 0;
}
__this_cpu_write(*p, x);
+ mark_vmstat_dirty();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
@@ -603,6 +611,7 @@ static inline void mod_zone_state(struct
if (z)
zone_page_state_add(z, zone, item);
+ mark_vmstat_dirty();
}
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
@@ -671,6 +680,7 @@ static inline void mod_node_state(struct
if (z)
node_page_state_add(z, pgdat, item);
+ mark_vmstat_dirty();
}
void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
@@ -825,6 +835,14 @@ static int refresh_cpu_vm_stats(bool do_
int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
int changes = 0;
+ /*
+ * Clear vmstat_dirty before clearing the percpu vmstats.
+ * If interrupts are enabled, it is possible that an interrupt
+ * or another task modifies a percpu vmstat, which will
+ * set vmstat_dirty to true.
+ */
+ this_cpu_write(vmstat_dirty, false);
+
for_each_populated_zone(zone) {
struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
#ifdef CONFIG_NUMA
@@ -1949,35 +1967,6 @@ static void vmstat_update(struct work_st
}
/*
- * Check if the diffs for a certain cpu indicate that
- * an update is needed.
- */
-static bool need_update(int cpu)
-{
- pg_data_t *last_pgdat = NULL;
- struct zone *zone;
-
- for_each_populated_zone(zone) {
- struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
- struct per_cpu_nodestat *n;
-
- /*
- * The fast way of checking if there are any vmstat diffs.
- */
- if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
- return true;
-
- if (last_pgdat == zone->zone_pgdat)
- continue;
- last_pgdat = zone->zone_pgdat;
- n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
- if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
- return true;
- }
- return false;
-}
-
-/*
* Switch off vmstat processing and then fold all the remaining differentials
* until the diffs stay at zero. The function is used by NOHZ and can only be
* invoked when tick processing is not active.
@@ -1987,10 +1976,7 @@ void quiet_vmstat(void)
if (system_state != SYSTEM_RUNNING)
return;
- if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
- return;
-
- if (!need_update(smp_processor_id()))
+ if (!__this_cpu_read(vmstat_dirty))
return;
/*
@@ -2021,7 +2007,7 @@ static void vmstat_shepherd(struct work_
for_each_online_cpu(cpu) {
struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
- if (!delayed_work_pending(dw) && need_update(cpu))
+ if (!delayed_work_pending(dw) && per_cpu(vmstat_dirty, cpu))
queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
cond_resched();