This series contains a fix for a edge case in my earlier protection
calculation patches, and a patch to make the area overall a little more
robust to hopefully help avoid this in future.
Chris Down (1):
mm, memcg: Decouple e{low,min} state mutations from protection checks
Yafang Shao (1):
mm: memcontrol: memory.low reclaim fix & cleanup
include/linux/memcontrol.h | 48 +++++++++++++++++++++++++++++---------
mm/memcontrol.c | 43 ++++++++++++++++------------------
mm/vmscan.c | 17 ++++----------
3 files changed, 61 insertions(+), 47 deletions(-)
--
2.26.2
mem_cgroup_protected currently is both used to set effective low and min
and return a mem_cgroup_protection based on the result. As a user, this
can be a little unexpected: it appears to be a simple predicate
function, if not for the big warning in the comment above about the
order in which it must be executed.
This change makes it so that we separate the state mutations from the
actual protection checks, which makes it more obvious where we need to
be careful mutating internal state, and where we are simply checking and
don't need to worry about that.
Signed-off-by: Chris Down <[email protected]>
Suggested-by: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Roman Gushchin <[email protected]>
Cc: Yafang Shao <[email protected]>
---
include/linux/memcontrol.h | 48 +++++++++++++++++++++++++++++---------
mm/memcontrol.c | 30 +++++++-----------------
mm/vmscan.c | 17 ++++----------
3 files changed, 49 insertions(+), 46 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d630af1a4e17..88576b1235b0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -50,12 +50,6 @@ enum memcg_memory_event {
MEMCG_NR_MEMORY_EVENTS,
};
-enum mem_cgroup_protection {
- MEMCG_PROT_NONE,
- MEMCG_PROT_LOW,
- MEMCG_PROT_MIN,
-};
-
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
unsigned int generation;
@@ -357,8 +351,26 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
READ_ONCE(memcg->memory.elow));
}
-enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
- struct mem_cgroup *memcg);
+void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg);
+
+static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_disabled())
+ return false;
+
+ return READ_ONCE(memcg->memory.elow) >=
+ page_counter_read(&memcg->memory);
+}
+
+static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_disabled())
+ return false;
+
+ return READ_ONCE(memcg->memory.emin) >=
+ page_counter_read(&memcg->memory);
+}
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
@@ -838,13 +850,27 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
bool in_low_reclaim)
{
+
+
+static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg);
+{
+}
+
+static inline void mem_cgroup_protection(struct mem_cgroup *memcg,
+ bool in_low_reclaim)
+{
return 0;
}
-static inline enum mem_cgroup_protection mem_cgroup_protected(
- struct mem_cgroup *root, struct mem_cgroup *memcg)
+static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
{
- return MEMCG_PROT_NONE;
+ return false;
}
static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b0374be44e9e..317dbbaac603 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6368,27 +6368,21 @@ static unsigned long effective_protection(unsigned long usage,
}
/**
- * mem_cgroup_protected - check if memory consumption is in the normal range
+ * mem_cgroup_calculate_protection - calculate and cache effective low and min
* @root: the top ancestor of the sub-tree being checked
* @memcg: the memory cgroup to check
*
* WARNING: This function is not stateless! It can only be used as part
* of a top-down tree iteration, not for isolated queries.
- *
- * Returns one of the following:
- * MEMCG_PROT_NONE: cgroup memory is not protected
- * MEMCG_PROT_LOW: cgroup memory is protected as long there is
- * an unprotected supply of reclaimable memory from other cgroups.
- * MEMCG_PROT_MIN: cgroup memory is protected
*/
-enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
- struct mem_cgroup *memcg)
+void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg)
{
unsigned long usage, parent_usage;
struct mem_cgroup *parent;
if (mem_cgroup_disabled())
- return MEMCG_PROT_NONE;
+ return;
if (!root)
root = root_mem_cgroup;
@@ -6403,22 +6397,22 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
*/
WRITE_ONCE(memcg->memory.emin, 0);
WRITE_ONCE(memcg->memory.elow, 0);
- return MEMCG_PROT_NONE;
+ return;
}
usage = page_counter_read(&memcg->memory);
if (!usage)
- return MEMCG_PROT_NONE;
+ return;
parent = parent_mem_cgroup(memcg);
/* No parent means a non-hierarchical mode on v1 memcg */
if (!parent)
- return MEMCG_PROT_NONE;
+ return;
if (parent == root) {
memcg->memory.emin = READ_ONCE(memcg->memory.min);
memcg->memory.elow = memcg->memory.low;
- goto out;
+ return;
}
parent_usage = page_counter_read(&parent->memory);
@@ -6431,14 +6425,6 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
memcg->memory.low, READ_ONCE(parent->memory.elow),
atomic_long_read(&parent->memory.children_low_usage)));
-
-out:
- if (usage <= memcg->memory.emin)
- return MEMCG_PROT_MIN;
- else if (usage <= memcg->memory.elow)
- return MEMCG_PROT_LOW;
- else
- return MEMCG_PROT_NONE;
}
/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 72ac38eb8c29..e913c4652341 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2645,14 +2645,15 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
unsigned long reclaimed;
unsigned long scanned;
- switch (mem_cgroup_protected(target_memcg, memcg)) {
- case MEMCG_PROT_MIN:
+ mem_cgroup_calculate_protection(target_memcg, memcg);
+
+ if (mem_cgroup_below_min(memcg)) {
/*
* Hard protection.
* If there is no reclaimable memory, OOM.
*/
continue;
- case MEMCG_PROT_LOW:
+ } else if (mem_cgroup_below_low(memcg)) {
/*
* Soft protection.
* Respect the protection only as long as
@@ -2664,16 +2665,6 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
continue;
}
memcg_memory_event(memcg, MEMCG_LOW);
- break;
- case MEMCG_PROT_NONE:
- /*
- * All protection thresholds breached. We may
- * still choose to vary the scan pressure
- * applied based on by how much the cgroup in
- * question has exceeded its protection
- * thresholds (see get_scan_count).
- */
- break;
}
reclaimed = sc->nr_reclaimed;
--
2.26.2
From: Yafang Shao <[email protected]>
A cgroup can have both memory protection and a memory limit to isolate
it from its siblings in both directions - for example, to prevent it
from being shrunk below 2G under high pressure from outside, but also
from growing beyond 4G under low pressure.
Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
implemented proportional scan pressure so that multiple siblings in
excess of their protection settings don't get reclaimed equally but
instead in accordance to their unprotected portion.
During limit reclaim, this proportionality shouldn't apply of course:
there is no competition, all pressure is from within the cgroup and
should be applied as such. Reclaim should operate at full efficiency.
However, mem_cgroup_protected() never expected anybody to look at the
effective protection values when it indicated that the cgroup is above
its protection. As a result, a query during limit reclaim may return
stale protection values that were calculated by a previous reclaim cycle
in which the cgroup did have siblings.
When this happens, reclaim is unnecessarily hesitant and potentially
slow to meet the desired limit. In theory this could lead to premature
OOM kills, although it's not obvious this has occurred in practice.
Fixes: 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
Signed-off-by: Yafang Shao <[email protected]>
Signed-off-by: Chris Down <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Roman Gushchin <[email protected]>
[[email protected]: rework code comment]
[[email protected]: changelog]
[[email protected]: fix store tear]
[[email protected]: retitle]
---
mm/memcontrol.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0be00826b832..b0374be44e9e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6392,8 +6392,19 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
if (!root)
root = root_mem_cgroup;
- if (memcg == root)
+ if (memcg == root) {
+ /*
+ * The cgroup is the reclaim root in this reclaim
+ * cycle, and therefore not protected. But it may have
+ * stale effective protection values from previous
+ * cycles in which it was not the reclaim root - for
+ * example, global reclaim followed by limit reclaim.
+ * Reset these values for mem_cgroup_protection().
+ */
+ WRITE_ONCE(memcg->memory.emin, 0);
+ WRITE_ONCE(memcg->memory.elow, 0);
return MEMCG_PROT_NONE;
+ }
usage = page_counter_read(&memcg->memory);
if (!usage)
--
2.26.2
On Tue, Apr 28, 2020 at 07:26:47PM +0100, Chris Down wrote:
> From: Yafang Shao <[email protected]>
>
> A cgroup can have both memory protection and a memory limit to isolate
> it from its siblings in both directions - for example, to prevent it
> from being shrunk below 2G under high pressure from outside, but also
> from growing beyond 4G under low pressure.
>
> Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> implemented proportional scan pressure so that multiple siblings in
> excess of their protection settings don't get reclaimed equally but
> instead in accordance to their unprotected portion.
>
> During limit reclaim, this proportionality shouldn't apply of course:
> there is no competition, all pressure is from within the cgroup and
> should be applied as such. Reclaim should operate at full efficiency.
>
> However, mem_cgroup_protected() never expected anybody to look at the
> effective protection values when it indicated that the cgroup is above
> its protection. As a result, a query during limit reclaim may return
> stale protection values that were calculated by a previous reclaim cycle
> in which the cgroup did have siblings.
>
> When this happens, reclaim is unnecessarily hesitant and potentially
> slow to meet the desired limit. In theory this could lead to premature
> OOM kills, although it's not obvious this has occurred in practice.
>
> Fixes: 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> Signed-off-by: Yafang Shao <[email protected]>
> Signed-off-by: Chris Down <[email protected]>
> Cc: Johannes Weiner <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Roman Gushchin <[email protected]>
>
> [[email protected]: rework code comment]
> [[email protected]: changelog]
> [[email protected]: fix store tear]
> [[email protected]: retitle]
Acked-by: Johannes Weiner <[email protected]>
On Tue, Apr 28, 2020 at 07:27:00PM +0100, Chris Down wrote:
> mem_cgroup_protected currently is both used to set effective low and min
> and return a mem_cgroup_protection based on the result. As a user, this
> can be a little unexpected: it appears to be a simple predicate
> function, if not for the big warning in the comment above about the
> order in which it must be executed.
>
> This change makes it so that we separate the state mutations from the
> actual protection checks, which makes it more obvious where we need to
> be careful mutating internal state, and where we are simply checking and
> don't need to worry about that.
>
> Signed-off-by: Chris Down <[email protected]>
> Suggested-by: Johannes Weiner <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Roman Gushchin <[email protected]>
> Cc: Yafang Shao <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
On Tue 28-04-20 19:27:00, Chris Down wrote:
> mem_cgroup_protected currently is both used to set effective low and min
> and return a mem_cgroup_protection based on the result. As a user, this
> can be a little unexpected: it appears to be a simple predicate
> function, if not for the big warning in the comment above about the
> order in which it must be executed.
>
> This change makes it so that we separate the state mutations from the
> actual protection checks, which makes it more obvious where we need to
> be careful mutating internal state, and where we are simply checking and
> don't need to worry about that.
>
> Signed-off-by: Chris Down <[email protected]>
> Suggested-by: Johannes Weiner <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Roman Gushchin <[email protected]>
> Cc: Yafang Shao <[email protected]>
Acked-by: Michal Hocko <[email protected]>
> ---
> include/linux/memcontrol.h | 48 +++++++++++++++++++++++++++++---------
> mm/memcontrol.c | 30 +++++++-----------------
> mm/vmscan.c | 17 ++++----------
> 3 files changed, 49 insertions(+), 46 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index d630af1a4e17..88576b1235b0 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -50,12 +50,6 @@ enum memcg_memory_event {
> MEMCG_NR_MEMORY_EVENTS,
> };
>
> -enum mem_cgroup_protection {
> - MEMCG_PROT_NONE,
> - MEMCG_PROT_LOW,
> - MEMCG_PROT_MIN,
> -};
> -
> struct mem_cgroup_reclaim_cookie {
> pg_data_t *pgdat;
> unsigned int generation;
> @@ -357,8 +351,26 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
> READ_ONCE(memcg->memory.elow));
> }
>
> -enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
> - struct mem_cgroup *memcg);
> +void mem_cgroup_calculate_protection(struct mem_cgroup *root,
> + struct mem_cgroup *memcg);
> +
> +static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
> +{
> + if (mem_cgroup_disabled())
> + return false;
> +
> + return READ_ONCE(memcg->memory.elow) >=
> + page_counter_read(&memcg->memory);
> +}
> +
> +static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
> +{
> + if (mem_cgroup_disabled())
> + return false;
> +
> + return READ_ONCE(memcg->memory.emin) >=
> + page_counter_read(&memcg->memory);
> +}
>
> int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
> gfp_t gfp_mask, struct mem_cgroup **memcgp,
> @@ -838,13 +850,27 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
> static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
> bool in_low_reclaim)
> {
> +
> +
> +static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
> + struct mem_cgroup *memcg);
> +{
> +}
> +
> +static inline void mem_cgroup_protection(struct mem_cgroup *memcg,
> + bool in_low_reclaim)
> +{
> return 0;
> }
>
> -static inline enum mem_cgroup_protection mem_cgroup_protected(
> - struct mem_cgroup *root, struct mem_cgroup *memcg)
> +static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
> +{
> + return false;
> +}
> +
> +static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
> {
> - return MEMCG_PROT_NONE;
> + return false;
> }
>
> static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index b0374be44e9e..317dbbaac603 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -6368,27 +6368,21 @@ static unsigned long effective_protection(unsigned long usage,
> }
>
> /**
> - * mem_cgroup_protected - check if memory consumption is in the normal range
> + * mem_cgroup_calculate_protection - calculate and cache effective low and min
> * @root: the top ancestor of the sub-tree being checked
> * @memcg: the memory cgroup to check
> *
> * WARNING: This function is not stateless! It can only be used as part
> * of a top-down tree iteration, not for isolated queries.
> - *
> - * Returns one of the following:
> - * MEMCG_PROT_NONE: cgroup memory is not protected
> - * MEMCG_PROT_LOW: cgroup memory is protected as long there is
> - * an unprotected supply of reclaimable memory from other cgroups.
> - * MEMCG_PROT_MIN: cgroup memory is protected
> */
> -enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
> - struct mem_cgroup *memcg)
> +void mem_cgroup_calculate_protection(struct mem_cgroup *root,
> + struct mem_cgroup *memcg)
> {
> unsigned long usage, parent_usage;
> struct mem_cgroup *parent;
>
> if (mem_cgroup_disabled())
> - return MEMCG_PROT_NONE;
> + return;
>
> if (!root)
> root = root_mem_cgroup;
> @@ -6403,22 +6397,22 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
> */
> WRITE_ONCE(memcg->memory.emin, 0);
> WRITE_ONCE(memcg->memory.elow, 0);
> - return MEMCG_PROT_NONE;
> + return;
> }
>
> usage = page_counter_read(&memcg->memory);
> if (!usage)
> - return MEMCG_PROT_NONE;
> + return;
>
> parent = parent_mem_cgroup(memcg);
> /* No parent means a non-hierarchical mode on v1 memcg */
> if (!parent)
> - return MEMCG_PROT_NONE;
> + return;
>
> if (parent == root) {
> memcg->memory.emin = READ_ONCE(memcg->memory.min);
> memcg->memory.elow = memcg->memory.low;
> - goto out;
> + return;
> }
>
> parent_usage = page_counter_read(&parent->memory);
> @@ -6431,14 +6425,6 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
> WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
> memcg->memory.low, READ_ONCE(parent->memory.elow),
> atomic_long_read(&parent->memory.children_low_usage)));
> -
> -out:
> - if (usage <= memcg->memory.emin)
> - return MEMCG_PROT_MIN;
> - else if (usage <= memcg->memory.elow)
> - return MEMCG_PROT_LOW;
> - else
> - return MEMCG_PROT_NONE;
> }
>
> /**
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 72ac38eb8c29..e913c4652341 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2645,14 +2645,15 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
> unsigned long reclaimed;
> unsigned long scanned;
>
> - switch (mem_cgroup_protected(target_memcg, memcg)) {
> - case MEMCG_PROT_MIN:
> + mem_cgroup_calculate_protection(target_memcg, memcg);
> +
> + if (mem_cgroup_below_min(memcg)) {
> /*
> * Hard protection.
> * If there is no reclaimable memory, OOM.
> */
> continue;
> - case MEMCG_PROT_LOW:
> + } else if (mem_cgroup_below_low(memcg)) {
> /*
> * Soft protection.
> * Respect the protection only as long as
> @@ -2664,16 +2665,6 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
> continue;
> }
> memcg_memory_event(memcg, MEMCG_LOW);
> - break;
> - case MEMCG_PROT_NONE:
> - /*
> - * All protection thresholds breached. We may
> - * still choose to vary the scan pressure
> - * applied based on by how much the cgroup in
> - * question has exceeded its protection
> - * thresholds (see get_scan_count).
> - */
> - break;
> }
>
> reclaimed = sc->nr_reclaimed;
> --
> 2.26.2
--
Michal Hocko
SUSE Labs
On Tue 28-04-20 19:26:47, Chris Down wrote:
> From: Yafang Shao <[email protected]>
>
> A cgroup can have both memory protection and a memory limit to isolate
> it from its siblings in both directions - for example, to prevent it
> from being shrunk below 2G under high pressure from outside, but also
> from growing beyond 4G under low pressure.
>
> Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> implemented proportional scan pressure so that multiple siblings in
> excess of their protection settings don't get reclaimed equally but
> instead in accordance to their unprotected portion.
>
> During limit reclaim, this proportionality shouldn't apply of course:
> there is no competition, all pressure is from within the cgroup and
> should be applied as such. Reclaim should operate at full efficiency.
>
> However, mem_cgroup_protected() never expected anybody to look at the
> effective protection values when it indicated that the cgroup is above
> its protection. As a result, a query during limit reclaim may return
> stale protection values that were calculated by a previous reclaim cycle
> in which the cgroup did have siblings.
>
> When this happens, reclaim is unnecessarily hesitant and potentially
> slow to meet the desired limit. In theory this could lead to premature
> OOM kills, although it's not obvious this has occurred in practice.
Thanks this describes the underlying problem. I would be also explicit
that the issue should be visible only on tail memcgs which have both
max/high and protection configured and the effect depends on the
difference between the two (the smaller it is the largrger the effect).
There is no mention about the fix. The patch resets effective values for
the reclaim root and I've had some concerns about that
http://lkml.kernel.org/r/[email protected].
Johannes has argued that other races are possible and I didn't get to
think about it thoroughly. But this patch is introducing a new
possibility of breaking protection. If we want to have a quick and
simple fix that would be easier to backport to older kernels then I
would feel much better if we simply workedaround the problem as
suggested earlier http://lkml.kernel.org/r/[email protected]
We can rework the effective values calculation to be more robust against
races on top of that because this is likely a more tricky thing to do.
> Fixes: 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> Signed-off-by: Yafang Shao <[email protected]>
> Signed-off-by: Chris Down <[email protected]>
> Cc: Johannes Weiner <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Roman Gushchin <[email protected]>
>
> [[email protected]: rework code comment]
> [[email protected]: changelog]
> [[email protected]: fix store tear]
> [[email protected]: retitle]
> ---
> mm/memcontrol.c | 13 ++++++++++++-
> 1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 0be00826b832..b0374be44e9e 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -6392,8 +6392,19 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
>
> if (!root)
> root = root_mem_cgroup;
> - if (memcg == root)
> + if (memcg == root) {
> + /*
> + * The cgroup is the reclaim root in this reclaim
> + * cycle, and therefore not protected. But it may have
> + * stale effective protection values from previous
> + * cycles in which it was not the reclaim root - for
> + * example, global reclaim followed by limit reclaim.
> + * Reset these values for mem_cgroup_protection().
> + */
> + WRITE_ONCE(memcg->memory.emin, 0);
> + WRITE_ONCE(memcg->memory.elow, 0);
> return MEMCG_PROT_NONE;
> + }
>
> usage = page_counter_read(&memcg->memory);
> if (!usage)
> --
> 2.26.2
--
Michal Hocko
SUSE Labs
On Wed, Apr 29, 2020 at 6:15 PM Michal Hocko <[email protected]> wrote:
>
> On Tue 28-04-20 19:26:47, Chris Down wrote:
> > From: Yafang Shao <[email protected]>
> >
> > A cgroup can have both memory protection and a memory limit to isolate
> > it from its siblings in both directions - for example, to prevent it
> > from being shrunk below 2G under high pressure from outside, but also
> > from growing beyond 4G under low pressure.
> >
> > Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> > implemented proportional scan pressure so that multiple siblings in
> > excess of their protection settings don't get reclaimed equally but
> > instead in accordance to their unprotected portion.
> >
> > During limit reclaim, this proportionality shouldn't apply of course:
> > there is no competition, all pressure is from within the cgroup and
> > should be applied as such. Reclaim should operate at full efficiency.
> >
> > However, mem_cgroup_protected() never expected anybody to look at the
> > effective protection values when it indicated that the cgroup is above
> > its protection. As a result, a query during limit reclaim may return
> > stale protection values that were calculated by a previous reclaim cycle
> > in which the cgroup did have siblings.
> >
> > When this happens, reclaim is unnecessarily hesitant and potentially
> > slow to meet the desired limit. In theory this could lead to premature
> > OOM kills, although it's not obvious this has occurred in practice.
>
> Thanks this describes the underlying problem. I would be also explicit
> that the issue should be visible only on tail memcgs which have both
> max/high and protection configured and the effect depends on the
> difference between the two (the smaller it is the largrger the effect).
>
> There is no mention about the fix. The patch resets effective values for
> the reclaim root and I've had some concerns about that
> http://lkml.kernel.org/r/[email protected].
> Johannes has argued that other races are possible and I didn't get to
> think about it thoroughly. But this patch is introducing a new
> possibility of breaking protection.
Agreed with Michal that more writes will cause more bugs.
We should operate the volatile emin and elow as less as possible.
> If we want to have a quick and
> simple fix that would be easier to backport to older kernels then I
> would feel much better if we simply workedaround the problem as
> suggested earlier http://lkml.kernel.org/r/[email protected]
+1
This should be the right workaround to fix the current issue and it is
worth to be backported to the stable kernel.
> We can rework the effective values calculation to be more robust against
> races on top of that because this is likely a more tricky thing to do.
>
> > Fixes: 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> > Signed-off-by: Yafang Shao <[email protected]>
> > Signed-off-by: Chris Down <[email protected]>
> > Cc: Johannes Weiner <[email protected]>
> > Cc: Michal Hocko <[email protected]>
> > Cc: Roman Gushchin <[email protected]>
> >
> > [[email protected]: rework code comment]
> > [[email protected]: changelog]
> > [[email protected]: fix store tear]
> > [[email protected]: retitle]
> > ---
> > mm/memcontrol.c | 13 ++++++++++++-
> > 1 file changed, 12 insertions(+), 1 deletion(-)
> >
> > diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> > index 0be00826b832..b0374be44e9e 100644
> > --- a/mm/memcontrol.c
> > +++ b/mm/memcontrol.c
> > @@ -6392,8 +6392,19 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
> >
> > if (!root)
> > root = root_mem_cgroup;
> > - if (memcg == root)
> > + if (memcg == root) {
> > + /*
> > + * The cgroup is the reclaim root in this reclaim
> > + * cycle, and therefore not protected. But it may have
> > + * stale effective protection values from previous
> > + * cycles in which it was not the reclaim root - for
> > + * example, global reclaim followed by limit reclaim.
> > + * Reset these values for mem_cgroup_protection().
> > + */
> > + WRITE_ONCE(memcg->memory.emin, 0);
> > + WRITE_ONCE(memcg->memory.elow, 0);
> > return MEMCG_PROT_NONE;
> > + }
> >
> > usage = page_counter_read(&memcg->memory);
> > if (!usage)
> > --
> > 2.26.2
>
> --
> Michal Hocko
> SUSE Labs
--
Thanks
Yafang
On Wed, Apr 29, 2020 at 12:15:10PM +0200, Michal Hocko wrote:
> On Tue 28-04-20 19:26:47, Chris Down wrote:
> > From: Yafang Shao <[email protected]>
> >
> > A cgroup can have both memory protection and a memory limit to isolate
> > it from its siblings in both directions - for example, to prevent it
> > from being shrunk below 2G under high pressure from outside, but also
> > from growing beyond 4G under low pressure.
> >
> > Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> > implemented proportional scan pressure so that multiple siblings in
> > excess of their protection settings don't get reclaimed equally but
> > instead in accordance to their unprotected portion.
> >
> > During limit reclaim, this proportionality shouldn't apply of course:
> > there is no competition, all pressure is from within the cgroup and
> > should be applied as such. Reclaim should operate at full efficiency.
> >
> > However, mem_cgroup_protected() never expected anybody to look at the
> > effective protection values when it indicated that the cgroup is above
> > its protection. As a result, a query during limit reclaim may return
> > stale protection values that were calculated by a previous reclaim cycle
> > in which the cgroup did have siblings.
> >
> > When this happens, reclaim is unnecessarily hesitant and potentially
> > slow to meet the desired limit. In theory this could lead to premature
> > OOM kills, although it's not obvious this has occurred in practice.
>
> Thanks this describes the underlying problem. I would be also explicit
> that the issue should be visible only on tail memcgs which have both
> max/high and protection configured and the effect depends on the
> difference between the two (the smaller it is the largrger the effect).
>
> There is no mention about the fix. The patch resets effective values for
> the reclaim root and I've had some concerns about that
> http://lkml.kernel.org/r/[email protected].
> Johannes has argued that other races are possible and I didn't get to
> think about it thoroughly. But this patch is introducing a new
> possibility of breaking protection. If we want to have a quick and
> simple fix that would be easier to backport to older kernels then I
> would feel much better if we simply workedaround the problem as
> suggested earlier http://lkml.kernel.org/r/[email protected]
> We can rework the effective values calculation to be more robust against
> races on top of that because this is likely a more tricky thing to do.
Well, can you please *do* think more thoroughly about what I wrote,
instead of pushing for an alternative patch on gut feeling alone?
Especially when you imply that this should be a stable patch.
Not only does your alternative patch not protect against the race you
are worried about, the race itself doesn't matter. Racing reclaimers
will write their competing views of the world into the shared state on
all other levels anyway.
And that's okay. If the configuration and memory usage is such that
there is at least one reclaimer that scans without any protection
(like a limit reclaimer), it's not a problem when a second reclaimer
that meant to do protected global reclaim will also do one iteration
without protection. It's no different than if a second thread had
entered limit reclaim through another internal allocation.
There is no semantical violation with the race in your patch or the
race in this patch. Any effective protection that becomes visible is
1) permitted by the configuration, but 2) also triggered *right now*
by an acute need to reclaim memory with these parameters.
The *right now* part is important. That's what's broken before either
patch, and that's what we're fixing: to see really, really *old* stale
that might not be representative of the config semantics anymore.
Since you haven't linked to my email, here is my counter argument to
the alternative patch "fixing" this race somehow.
A reclaim:
root
`- A (low=2G, max=3G -> elow=0)
`- A1 (low=0G -> elow=0)
Global reclaim:
root
`- A (low=2G, max=3G -> elow=2G)
`- A1 (low=0G -> elow=2G)
During global reclaim, A1 is supposed to have 2G effective low
protection. If A limit reclaim races, it can set A1's elow to
0. Global reclaim will now query mem_cgroup_protection(root, A1), the
root == memcg check you insist we add will fail and it'll reclaim A1
without protection.
The alternative patch is nothing except slightly worse code.
On Wed, Apr 29, 2020 at 10:03 PM Johannes Weiner <[email protected]> wrote:
>
> On Wed, Apr 29, 2020 at 12:15:10PM +0200, Michal Hocko wrote:
> > On Tue 28-04-20 19:26:47, Chris Down wrote:
> > > From: Yafang Shao <[email protected]>
> > >
> > > A cgroup can have both memory protection and a memory limit to isolate
> > > it from its siblings in both directions - for example, to prevent it
> > > from being shrunk below 2G under high pressure from outside, but also
> > > from growing beyond 4G under low pressure.
> > >
> > > Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> > > implemented proportional scan pressure so that multiple siblings in
> > > excess of their protection settings don't get reclaimed equally but
> > > instead in accordance to their unprotected portion.
> > >
> > > During limit reclaim, this proportionality shouldn't apply of course:
> > > there is no competition, all pressure is from within the cgroup and
> > > should be applied as such. Reclaim should operate at full efficiency.
> > >
> > > However, mem_cgroup_protected() never expected anybody to look at the
> > > effective protection values when it indicated that the cgroup is above
> > > its protection. As a result, a query during limit reclaim may return
> > > stale protection values that were calculated by a previous reclaim cycle
> > > in which the cgroup did have siblings.
> > >
> > > When this happens, reclaim is unnecessarily hesitant and potentially
> > > slow to meet the desired limit. In theory this could lead to premature
> > > OOM kills, although it's not obvious this has occurred in practice.
> >
> > Thanks this describes the underlying problem. I would be also explicit
> > that the issue should be visible only on tail memcgs which have both
> > max/high and protection configured and the effect depends on the
> > difference between the two (the smaller it is the largrger the effect).
> >
> > There is no mention about the fix. The patch resets effective values for
> > the reclaim root and I've had some concerns about that
> > http://lkml.kernel.org/r/[email protected].
> > Johannes has argued that other races are possible and I didn't get to
> > think about it thoroughly. But this patch is introducing a new
> > possibility of breaking protection. If we want to have a quick and
> > simple fix that would be easier to backport to older kernels then I
> > would feel much better if we simply workedaround the problem as
> > suggested earlier http://lkml.kernel.org/r/[email protected]
> > We can rework the effective values calculation to be more robust against
> > races on top of that because this is likely a more tricky thing to do.
>
> Well, can you please *do* think more thoroughly about what I wrote,
> instead of pushing for an alternative patch on gut feeling alone?
>
> Especially when you imply that this should be a stable patch.
>
> Not only does your alternative patch not protect against the race you
> are worried about, the race itself doesn't matter. Racing reclaimers
> will write their competing views of the world into the shared state on
> all other levels anyway.
>
> And that's okay. If the configuration and memory usage is such that
> there is at least one reclaimer that scans without any protection
> (like a limit reclaimer), it's not a problem when a second reclaimer
> that meant to do protected global reclaim will also do one iteration
> without protection. It's no different than if a second thread had
> entered limit reclaim through another internal allocation.
>
> There is no semantical violation with the race in your patch or the
> race in this patch. Any effective protection that becomes visible is
> 1) permitted by the configuration, but 2) also triggered *right now*
> by an acute need to reclaim memory with these parameters.
>
> The *right now* part is important. That's what's broken before either
> patch, and that's what we're fixing: to see really, really *old* stale
> that might not be representative of the config semantics anymore.
>
> Since you haven't linked to my email, here is my counter argument to
> the alternative patch "fixing" this race somehow.
>
> A reclaim:
>
> root
> `- A (low=2G, max=3G -> elow=0)
> `- A1 (low=0G -> elow=0)
>
> Global reclaim:
>
> root
> `- A (low=2G, max=3G -> elow=2G)
> `- A1 (low=0G -> elow=2G)
>
> During global reclaim, A1 is supposed to have 2G effective low
> protection. If A limit reclaim races, it can set A1's elow to
> 0.
Before the commit 8a931f801340c2be ("mm: memcontrol: recursive
memory.low protection"), the A1's elow should be 0, while after this
commit A1's elow is 2G.
That is a behavior change.
Then this case gives us another example why accessing emin and elow in
the very deap reclaiming code (get_scan_count) is the root of ALL
EVIL.
> Global reclaim will now query mem_cgroup_protection(root, A1), the
> root == memcg check you insist we add will fail and it'll reclaim A1
> without protection.
>
> The alternative patch is nothing except slightly worse code.
--
Thanks
Yafang
On Wed, Apr 29, 2020 at 06:53:03PM +0800, Yafang Shao wrote:
> On Wed, Apr 29, 2020 at 6:15 PM Michal Hocko <[email protected]> wrote:
> >
> > On Tue 28-04-20 19:26:47, Chris Down wrote:
> > > From: Yafang Shao <[email protected]>
> > >
> > > A cgroup can have both memory protection and a memory limit to isolate
> > > it from its siblings in both directions - for example, to prevent it
> > > from being shrunk below 2G under high pressure from outside, but also
> > > from growing beyond 4G under low pressure.
> > >
> > > Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> > > implemented proportional scan pressure so that multiple siblings in
> > > excess of their protection settings don't get reclaimed equally but
> > > instead in accordance to their unprotected portion.
> > >
> > > During limit reclaim, this proportionality shouldn't apply of course:
> > > there is no competition, all pressure is from within the cgroup and
> > > should be applied as such. Reclaim should operate at full efficiency.
> > >
> > > However, mem_cgroup_protected() never expected anybody to look at the
> > > effective protection values when it indicated that the cgroup is above
> > > its protection. As a result, a query during limit reclaim may return
> > > stale protection values that were calculated by a previous reclaim cycle
> > > in which the cgroup did have siblings.
> > >
> > > When this happens, reclaim is unnecessarily hesitant and potentially
> > > slow to meet the desired limit. In theory this could lead to premature
> > > OOM kills, although it's not obvious this has occurred in practice.
> >
> > Thanks this describes the underlying problem. I would be also explicit
> > that the issue should be visible only on tail memcgs which have both
> > max/high and protection configured and the effect depends on the
> > difference between the two (the smaller it is the largrger the effect).
> >
> > There is no mention about the fix. The patch resets effective values for
> > the reclaim root and I've had some concerns about that
> > http://lkml.kernel.org/r/[email protected].
> > Johannes has argued that other races are possible and I didn't get to
> > think about it thoroughly. But this patch is introducing a new
> > possibility of breaking protection.
>
> Agreed with Michal that more writes will cause more bugs.
> We should operate the volatile emin and elow as less as possible.
That's not a technical argument.
If races are a problem, it doesn't matter that they're rare. If
they're not a problem, it doesn't matter that they're frequent.
> > If we want to have a quick and
> > simple fix that would be easier to backport to older kernels then I
> > would feel much better if we simply workedaround the problem as
> > suggested earlier http://lkml.kernel.org/r/[email protected]
>
> +1
>
> This should be the right workaround to fix the current issue and it is
> worth to be backported to the stable kernel.
From Documentation/process/stable-kernel-rules.rst:
- It must fix a real bug that bothers people (not a, "This could be a
problem..." type thing).
There hasn't been a mention of this affecting real workloads in the
submission history of this patch, so it doesn't qualify for -stable.
On Wed, Apr 29, 2020 at 10:17:21PM +0800, Yafang Shao wrote:
> On Wed, Apr 29, 2020 at 10:03 PM Johannes Weiner <[email protected]> wrote:
> >
> > On Wed, Apr 29, 2020 at 12:15:10PM +0200, Michal Hocko wrote:
> > > On Tue 28-04-20 19:26:47, Chris Down wrote:
> > > > From: Yafang Shao <[email protected]>
> > > >
> > > > A cgroup can have both memory protection and a memory limit to isolate
> > > > it from its siblings in both directions - for example, to prevent it
> > > > from being shrunk below 2G under high pressure from outside, but also
> > > > from growing beyond 4G under low pressure.
> > > >
> > > > Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> > > > implemented proportional scan pressure so that multiple siblings in
> > > > excess of their protection settings don't get reclaimed equally but
> > > > instead in accordance to their unprotected portion.
> > > >
> > > > During limit reclaim, this proportionality shouldn't apply of course:
> > > > there is no competition, all pressure is from within the cgroup and
> > > > should be applied as such. Reclaim should operate at full efficiency.
> > > >
> > > > However, mem_cgroup_protected() never expected anybody to look at the
> > > > effective protection values when it indicated that the cgroup is above
> > > > its protection. As a result, a query during limit reclaim may return
> > > > stale protection values that were calculated by a previous reclaim cycle
> > > > in which the cgroup did have siblings.
> > > >
> > > > When this happens, reclaim is unnecessarily hesitant and potentially
> > > > slow to meet the desired limit. In theory this could lead to premature
> > > > OOM kills, although it's not obvious this has occurred in practice.
> > >
> > > Thanks this describes the underlying problem. I would be also explicit
> > > that the issue should be visible only on tail memcgs which have both
> > > max/high and protection configured and the effect depends on the
> > > difference between the two (the smaller it is the largrger the effect).
> > >
> > > There is no mention about the fix. The patch resets effective values for
> > > the reclaim root and I've had some concerns about that
> > > http://lkml.kernel.org/r/[email protected].
> > > Johannes has argued that other races are possible and I didn't get to
> > > think about it thoroughly. But this patch is introducing a new
> > > possibility of breaking protection. If we want to have a quick and
> > > simple fix that would be easier to backport to older kernels then I
> > > would feel much better if we simply workedaround the problem as
> > > suggested earlier http://lkml.kernel.org/r/[email protected]
> > > We can rework the effective values calculation to be more robust against
> > > races on top of that because this is likely a more tricky thing to do.
> >
> > Well, can you please *do* think more thoroughly about what I wrote,
> > instead of pushing for an alternative patch on gut feeling alone?
> >
> > Especially when you imply that this should be a stable patch.
> >
> > Not only does your alternative patch not protect against the race you
> > are worried about, the race itself doesn't matter. Racing reclaimers
> > will write their competing views of the world into the shared state on
> > all other levels anyway.
> >
> > And that's okay. If the configuration and memory usage is such that
> > there is at least one reclaimer that scans without any protection
> > (like a limit reclaimer), it's not a problem when a second reclaimer
> > that meant to do protected global reclaim will also do one iteration
> > without protection. It's no different than if a second thread had
> > entered limit reclaim through another internal allocation.
> >
> > There is no semantical violation with the race in your patch or the
> > race in this patch. Any effective protection that becomes visible is
> > 1) permitted by the configuration, but 2) also triggered *right now*
> > by an acute need to reclaim memory with these parameters.
> >
> > The *right now* part is important. That's what's broken before either
> > patch, and that's what we're fixing: to see really, really *old* stale
> > that might not be representative of the config semantics anymore.
> >
> > Since you haven't linked to my email, here is my counter argument to
> > the alternative patch "fixing" this race somehow.
> >
> > A reclaim:
> >
> > root
> > `- A (low=2G, max=3G -> elow=0)
> > `- A1 (low=0G -> elow=0)
> >
> > Global reclaim:
> >
> > root
> > `- A (low=2G, max=3G -> elow=2G)
> > `- A1 (low=0G -> elow=2G)
> >
> > During global reclaim, A1 is supposed to have 2G effective low
> > protection. If A limit reclaim races, it can set A1's elow to
> > 0.
>
> Before the commit 8a931f801340c2be ("mm: memcontrol: recursive
> memory.low protection"), the A1's elow should be 0, while after this
> commit A1's elow is 2G.
> That is a behavior change.
Yes, that was an intentional change around the inheritance rules.
And your alternative patch doesn't fix the race you are (wrongly)
worried about under these rules.
What's your point, exactly?
> Then this case gives us another example why accessing emin and elow in
> the very deap reclaiming code (get_scan_count) is the root of ALL
> EVIL.
You must be confusing this software engineering list with a witch
doctor conference.
On Wed, Apr 29, 2020 at 10:27 PM Johannes Weiner <[email protected]> wrote:
>
> On Wed, Apr 29, 2020 at 10:17:21PM +0800, Yafang Shao wrote:
> > On Wed, Apr 29, 2020 at 10:03 PM Johannes Weiner <[email protected]> wrote:
> > >
> > > On Wed, Apr 29, 2020 at 12:15:10PM +0200, Michal Hocko wrote:
> > > > On Tue 28-04-20 19:26:47, Chris Down wrote:
> > > > > From: Yafang Shao <[email protected]>
> > > > >
> > > > > A cgroup can have both memory protection and a memory limit to isolate
> > > > > it from its siblings in both directions - for example, to prevent it
> > > > > from being shrunk below 2G under high pressure from outside, but also
> > > > > from growing beyond 4G under low pressure.
> > > > >
> > > > > Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> > > > > implemented proportional scan pressure so that multiple siblings in
> > > > > excess of their protection settings don't get reclaimed equally but
> > > > > instead in accordance to their unprotected portion.
> > > > >
> > > > > During limit reclaim, this proportionality shouldn't apply of course:
> > > > > there is no competition, all pressure is from within the cgroup and
> > > > > should be applied as such. Reclaim should operate at full efficiency.
> > > > >
> > > > > However, mem_cgroup_protected() never expected anybody to look at the
> > > > > effective protection values when it indicated that the cgroup is above
> > > > > its protection. As a result, a query during limit reclaim may return
> > > > > stale protection values that were calculated by a previous reclaim cycle
> > > > > in which the cgroup did have siblings.
> > > > >
> > > > > When this happens, reclaim is unnecessarily hesitant and potentially
> > > > > slow to meet the desired limit. In theory this could lead to premature
> > > > > OOM kills, although it's not obvious this has occurred in practice.
> > > >
> > > > Thanks this describes the underlying problem. I would be also explicit
> > > > that the issue should be visible only on tail memcgs which have both
> > > > max/high and protection configured and the effect depends on the
> > > > difference between the two (the smaller it is the largrger the effect).
> > > >
> > > > There is no mention about the fix. The patch resets effective values for
> > > > the reclaim root and I've had some concerns about that
> > > > http://lkml.kernel.org/r/[email protected].
> > > > Johannes has argued that other races are possible and I didn't get to
> > > > think about it thoroughly. But this patch is introducing a new
> > > > possibility of breaking protection. If we want to have a quick and
> > > > simple fix that would be easier to backport to older kernels then I
> > > > would feel much better if we simply workedaround the problem as
> > > > suggested earlier http://lkml.kernel.org/r/[email protected]
> > > > We can rework the effective values calculation to be more robust against
> > > > races on top of that because this is likely a more tricky thing to do.
> > >
> > > Well, can you please *do* think more thoroughly about what I wrote,
> > > instead of pushing for an alternative patch on gut feeling alone?
> > >
> > > Especially when you imply that this should be a stable patch.
> > >
> > > Not only does your alternative patch not protect against the race you
> > > are worried about, the race itself doesn't matter. Racing reclaimers
> > > will write their competing views of the world into the shared state on
> > > all other levels anyway.
> > >
> > > And that's okay. If the configuration and memory usage is such that
> > > there is at least one reclaimer that scans without any protection
> > > (like a limit reclaimer), it's not a problem when a second reclaimer
> > > that meant to do protected global reclaim will also do one iteration
> > > without protection. It's no different than if a second thread had
> > > entered limit reclaim through another internal allocation.
> > >
> > > There is no semantical violation with the race in your patch or the
> > > race in this patch. Any effective protection that becomes visible is
> > > 1) permitted by the configuration, but 2) also triggered *right now*
> > > by an acute need to reclaim memory with these parameters.
> > >
> > > The *right now* part is important. That's what's broken before either
> > > patch, and that's what we're fixing: to see really, really *old* stale
> > > that might not be representative of the config semantics anymore.
> > >
> > > Since you haven't linked to my email, here is my counter argument to
> > > the alternative patch "fixing" this race somehow.
> > >
> > > A reclaim:
> > >
> > > root
> > > `- A (low=2G, max=3G -> elow=0)
> > > `- A1 (low=0G -> elow=0)
> > >
> > > Global reclaim:
> > >
> > > root
> > > `- A (low=2G, max=3G -> elow=2G)
> > > `- A1 (low=0G -> elow=2G)
> > >
> > > During global reclaim, A1 is supposed to have 2G effective low
> > > protection. If A limit reclaim races, it can set A1's elow to
> > > 0.
> >
> > Before the commit 8a931f801340c2be ("mm: memcontrol: recursive
> > memory.low protection"), the A1's elow should be 0, while after this
> > commit A1's elow is 2G.
> > That is a behavior change.
>
> Yes, that was an intentional change around the inheritance rules.
>
> And your alternative patch doesn't fix the race you are (wrongly)
> worried about under these rules.
>
> What's your point, exactly?
>
No point, really.
> > Then this case gives us another example why accessing emin and elow in
> > the very deap reclaiming code (get_scan_count) is the root of ALL
> > EVIL.
>
> You must be confusing this software engineering list with a witch
> doctor conference.
No, I didn't consider you as a witch doctor.
--
Thanks
Yafang
On Wed 29-04-20 10:03:30, Johannes Weiner wrote:
> On Wed, Apr 29, 2020 at 12:15:10PM +0200, Michal Hocko wrote:
> > On Tue 28-04-20 19:26:47, Chris Down wrote:
> > > From: Yafang Shao <[email protected]>
> > >
> > > A cgroup can have both memory protection and a memory limit to isolate
> > > it from its siblings in both directions - for example, to prevent it
> > > from being shrunk below 2G under high pressure from outside, but also
> > > from growing beyond 4G under low pressure.
> > >
> > > Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> > > implemented proportional scan pressure so that multiple siblings in
> > > excess of their protection settings don't get reclaimed equally but
> > > instead in accordance to their unprotected portion.
> > >
> > > During limit reclaim, this proportionality shouldn't apply of course:
> > > there is no competition, all pressure is from within the cgroup and
> > > should be applied as such. Reclaim should operate at full efficiency.
> > >
> > > However, mem_cgroup_protected() never expected anybody to look at the
> > > effective protection values when it indicated that the cgroup is above
> > > its protection. As a result, a query during limit reclaim may return
> > > stale protection values that were calculated by a previous reclaim cycle
> > > in which the cgroup did have siblings.
> > >
> > > When this happens, reclaim is unnecessarily hesitant and potentially
> > > slow to meet the desired limit. In theory this could lead to premature
> > > OOM kills, although it's not obvious this has occurred in practice.
> >
> > Thanks this describes the underlying problem. I would be also explicit
> > that the issue should be visible only on tail memcgs which have both
> > max/high and protection configured and the effect depends on the
> > difference between the two (the smaller it is the largrger the effect).
> >
> > There is no mention about the fix. The patch resets effective values for
> > the reclaim root and I've had some concerns about that
> > http://lkml.kernel.org/r/[email protected].
> > Johannes has argued that other races are possible and I didn't get to
> > think about it thoroughly. But this patch is introducing a new
> > possibility of breaking protection. If we want to have a quick and
> > simple fix that would be easier to backport to older kernels then I
> > would feel much better if we simply workedaround the problem as
> > suggested earlier http://lkml.kernel.org/r/[email protected]
> > We can rework the effective values calculation to be more robust against
> > races on top of that because this is likely a more tricky thing to do.
>
> Well, can you please *do* think more thoroughly about what I wrote,
> instead of pushing for an alternative patch on gut feeling alone?
>
> Especially when you imply that this should be a stable patch.
The patch has a Fixes tag and so it is not unrealistic to assume that it
will hit older trees. I wasn't really implying stable tree backport and
I do not think this is a stable material.
All I was arguing here is that a fix/workaround which doesn't add new
side effects is a safer option.
> Not only does your alternative patch not protect against the race you
> are worried about, the race itself doesn't matter. Racing reclaimers
> will write their competing views of the world into the shared state on
> all other levels anyway.
>
> And that's okay. If the configuration and memory usage is such that
> there is at least one reclaimer that scans without any protection
> (like a limit reclaimer), it's not a problem when a second reclaimer
> that meant to do protected global reclaim will also do one iteration
> without protection. It's no different than if a second thread had
> entered limit reclaim through another internal allocation.
Yes I do agree here.
> There is no semantical violation with the race in your patch or the
> race in this patch. Any effective protection that becomes visible is
> 1) permitted by the configuration, but 2) also triggered *right now*
> by an acute need to reclaim memory with these parameters.
>
> The *right now* part is important. That's what's broken before either
> patch, and that's what we're fixing: to see really, really *old* stale
> that might not be representative of the config semantics anymore.
No disagreement here either. But please remember that the example I've
given is a clear violation of the protection. Let me paste it here so
that we have both examples in one email:
: Let's have global and A's reclaim in parallel:
: |
: A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
: |\
: | C (low = 1G, usage = 2.5G)
: B (low = 1G, usage = 0.5G)
:
: for A reclaim we have
: B.elow = B.low
: C.elow = C.low
:
: For the global reclaim
: A.elow = A.low
: B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
: C.elow = min(C.usage, C.low)
:
: With the effective values reseting we have A reclaim
: A.elow = 0
: B.elow = B.low
: C.elow = C.low
:
: and global reclaim could see the above and then
: B.elow = C.elow = 0 because children_low_usage > A.elow
I hope we both agree that B shouldn't be reclaimed whether the reclaim
comes from A or above A. The race is not possible with with the patch
working around the problem in mem_cgroup_protection().
> Since you haven't linked to my email, here is my counter argument to
> the alternative patch "fixing" this race somehow.
>
> A reclaim:
>
> root
> `- A (low=2G, max=3G -> elow=0)
> `- A1 (low=0G -> elow=0)
>
> Global reclaim:
>
> root
> `- A (low=2G, max=3G -> elow=2G)
> `- A1 (low=0G -> elow=2G)
>
> During global reclaim, A1 is supposed to have 2G effective low
> protection. If A limit reclaim races, it can set A1's elow to
> 0. Global reclaim will now query mem_cgroup_protection(root, A1), the
> root == memcg check you insist we add will fail and it'll reclaim A1
> without protection.
You are right that hooking into mem_cgroup_protection wouldn't prevent
the race in this example. But in this example the race really doesn't
matter because the overall protection is not violated. A1 would get
reclaimed by A anyway. But in my example there is a protected memcg
which shouldn't get reclaimed.
--
Michal Hocko
SUSE Labs
On Wed, Apr 29, 2020 at 05:04:14PM +0200, Michal Hocko wrote:
> On Wed 29-04-20 10:03:30, Johannes Weiner wrote:
> > On Wed, Apr 29, 2020 at 12:15:10PM +0200, Michal Hocko wrote:
> > > On Tue 28-04-20 19:26:47, Chris Down wrote:
> > > > From: Yafang Shao <[email protected]>
> > > >
> > > > A cgroup can have both memory protection and a memory limit to isolate
> > > > it from its siblings in both directions - for example, to prevent it
> > > > from being shrunk below 2G under high pressure from outside, but also
> > > > from growing beyond 4G under low pressure.
> > > >
> > > > Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> > > > implemented proportional scan pressure so that multiple siblings in
> > > > excess of their protection settings don't get reclaimed equally but
> > > > instead in accordance to their unprotected portion.
> > > >
> > > > During limit reclaim, this proportionality shouldn't apply of course:
> > > > there is no competition, all pressure is from within the cgroup and
> > > > should be applied as such. Reclaim should operate at full efficiency.
> > > >
> > > > However, mem_cgroup_protected() never expected anybody to look at the
> > > > effective protection values when it indicated that the cgroup is above
> > > > its protection. As a result, a query during limit reclaim may return
> > > > stale protection values that were calculated by a previous reclaim cycle
> > > > in which the cgroup did have siblings.
> > > >
> > > > When this happens, reclaim is unnecessarily hesitant and potentially
> > > > slow to meet the desired limit. In theory this could lead to premature
> > > > OOM kills, although it's not obvious this has occurred in practice.
> > >
> > > Thanks this describes the underlying problem. I would be also explicit
> > > that the issue should be visible only on tail memcgs which have both
> > > max/high and protection configured and the effect depends on the
> > > difference between the two (the smaller it is the largrger the effect).
> > >
> > > There is no mention about the fix. The patch resets effective values for
> > > the reclaim root and I've had some concerns about that
> > > http://lkml.kernel.org/r/[email protected].
> > > Johannes has argued that other races are possible and I didn't get to
> > > think about it thoroughly. But this patch is introducing a new
> > > possibility of breaking protection. If we want to have a quick and
> > > simple fix that would be easier to backport to older kernels then I
> > > would feel much better if we simply workedaround the problem as
> > > suggested earlier http://lkml.kernel.org/r/[email protected]
> > > We can rework the effective values calculation to be more robust against
> > > races on top of that because this is likely a more tricky thing to do.
> >
> > Well, can you please *do* think more thoroughly about what I wrote,
> > instead of pushing for an alternative patch on gut feeling alone?
> >
> > Especially when you imply that this should be a stable patch.
>
> The patch has a Fixes tag and so it is not unrealistic to assume that it
> will hit older trees. I wasn't really implying stable tree backport and
> I do not think this is a stable material.
Okay, thanks for clarifying.
> > Not only does your alternative patch not protect against the race you
> > are worried about, the race itself doesn't matter. Racing reclaimers
> > will write their competing views of the world into the shared state on
> > all other levels anyway.
> >
> > And that's okay. If the configuration and memory usage is such that
> > there is at least one reclaimer that scans without any protection
> > (like a limit reclaimer), it's not a problem when a second reclaimer
> > that meant to do protected global reclaim will also do one iteration
> > without protection. It's no different than if a second thread had
> > entered limit reclaim through another internal allocation.
>
> Yes I do agree here.
Okay.
> > There is no semantical violation with the race in your patch or the
> > race in this patch. Any effective protection that becomes visible is
> > 1) permitted by the configuration, but 2) also triggered *right now*
> > by an acute need to reclaim memory with these parameters.
> >
> > The *right now* part is important. That's what's broken before either
> > patch, and that's what we're fixing: to see really, really *old* stale
> > that might not be representative of the config semantics anymore.
>
> No disagreement here either. But please remember that the example I've
> given is a clear violation of the protection. Let me paste it here so
> that we have both examples in one email:
> : Let's have global and A's reclaim in parallel:
> : |
> : A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
> : |\
> : | C (low = 1G, usage = 2.5G)
> : B (low = 1G, usage = 0.5G)
> :
> : for A reclaim we have
> : B.elow = B.low
> : C.elow = C.low
> :
> : For the global reclaim
> : A.elow = A.low
> : B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
> : C.elow = min(C.usage, C.low)
> :
> : With the effective values reseting we have A reclaim
> : A.elow = 0
> : B.elow = B.low
> : C.elow = C.low
> :
> : and global reclaim could see the above and then
> : B.elow = C.elow = 0 because children_low_usage > A.elow
>
> I hope we both agree that B shouldn't be reclaimed whether the reclaim
> comes from A or above A. The race is not possible with with the patch
> working around the problem in mem_cgroup_protection().
Okay, I misread this the first time.
The problem is that in limit reclaim we reset A.elow in anticipation
of treating B and C as the top-level groups of our scan cycle and will
be using their B.low and C.low verbatim. Global reclaim can then visit
them before us and propagate A.elow=0 down to them.
But doesn't this problem cut both ways? Say you have the following
subtree:
A (memory.max=10G, memory.low=2G)
`- A1 (memory.low=max)
`- A2 (memory.low=max)
`- A3 (memory.low=0)
A similar race can give A1 and A2 absolute exemption from global
reclaim instead of proportional distribution of the parental 2G.
The chances of that happening could be boosted maliciously by
triggering many short limit reclaim invocations, like ioless cache
from sparse files, to keep overwriting A1.elow and A2.elow to max.
I think to address this, we need a more comprehensive solution and
introduce some form of serialization. I'm not sure yet how that would
look like yet.
I'm still not sure it's worth having a somewhat ugly workaround in
mem_cgroup_protection() to protect against half of the bug. If you
think so, the full problem should at least be documented and marked
XXX or something.
In practice, I doubt this matters all that much because limit reclaim
and global reclaim tend to occur in complementary
containerization/isolation strategies, not heavily simultaneously.
On Wed, Apr 29, 2020 at 2:26 AM Chris Down <[email protected]> wrote:
>
> From: Yafang Shao <[email protected]>
>
> A cgroup can have both memory protection and a memory limit to isolate
> it from its siblings in both directions - for example, to prevent it
> from being shrunk below 2G under high pressure from outside, but also
> from growing beyond 4G under low pressure.
>
> Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> implemented proportional scan pressure so that multiple siblings in
> excess of their protection settings don't get reclaimed equally but
> instead in accordance to their unprotected portion.
>
> During limit reclaim, this proportionality shouldn't apply of course:
> there is no competition, all pressure is from within the cgroup and
> should be applied as such. Reclaim should operate at full efficiency.
>
> However, mem_cgroup_protected() never expected anybody to look at the
> effective protection values when it indicated that the cgroup is above
> its protection. As a result, a query during limit reclaim may return
> stale protection values that were calculated by a previous reclaim cycle
> in which the cgroup did have siblings.
>
> When this happens, reclaim is unnecessarily hesitant and potentially
> slow to meet the desired limit. In theory this could lead to premature
> OOM kills, although it's not obvious this has occurred in practice.
>
> Fixes: 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> Signed-off-by: Yafang Shao <[email protected]>
> Signed-off-by: Chris Down <[email protected]>
> Cc: Johannes Weiner <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Roman Gushchin <[email protected]>
>
> [[email protected]: rework code comment]
> [[email protected]: changelog]
> [[email protected]: fix store tear]
> [[email protected]: retitle]
> ---
> mm/memcontrol.c | 13 ++++++++++++-
> 1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 0be00826b832..b0374be44e9e 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -6392,8 +6392,19 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
>
> if (!root)
> root = root_mem_cgroup;
> - if (memcg == root)
> + if (memcg == root) {
> + /*
> + * The cgroup is the reclaim root in this reclaim
> + * cycle, and therefore not protected. But it may have
> + * stale effective protection values from previous
> + * cycles in which it was not the reclaim root - for
> + * example, global reclaim followed by limit reclaim.
> + * Reset these values for mem_cgroup_protection().
> + */
> + WRITE_ONCE(memcg->memory.emin, 0);
> + WRITE_ONCE(memcg->memory.elow, 0);
Hi Chris,
Would you pls. add some comments above these newly added WRITE_ONCE() ?
E.g.
What does them mean to fix ?
Why do we must add WRITE_ONCE() and READ_ONCE here and there all over
the memcg protection ?
Otherwise, it may be harder to understand by the others.
> return MEMCG_PROT_NONE;
> + }
>
> usage = page_counter_read(&memcg->memory);
> if (!usage)
> --
> 2.26.2
>
--
Thanks
Yafang
Hi Yafang,
Yafang Shao writes:
>Would you pls. add some comments above these newly added WRITE_ONCE() ?
>E.g.
>What does them mean to fix ?
>Why do we must add WRITE_ONCE() and READ_ONCE here and there all over
>the memcg protection ?
>Otherwise, it may be harder to understand by the others.
There is already discussion in the changelogs for previous store tear
improvements. For example, b3a7822e5e75 ("mm, memcg: prevent
mem_cgroup_protected store tearing").
WRITE_ONCE and READ_ONCE are standard compiler barriers, in this case, to avoid
store tears from writes in another thread (effective protection caching is
designed by its very nature to permit racing, but tearing is non-ideal).
You can find out more about them in the "COMPILER BARRIER" section in
Documentation/memory-barriers.txt. I'm not really seeing the value of adding an
extra comment about this specific use of them, unless you have some more
explicit concern.
On Thu, Apr 30, 2020 at 9:16 AM Chris Down <[email protected]> wrote:
>
> Hi Yafang,
>
> Yafang Shao writes:
> >Would you pls. add some comments above these newly added WRITE_ONCE() ?
> >E.g.
> >What does them mean to fix ?
> >Why do we must add WRITE_ONCE() and READ_ONCE here and there all over
> >the memcg protection ?
> >Otherwise, it may be harder to understand by the others.
>
> There is already discussion in the changelogs for previous store tear
> improvements. For example, b3a7822e5e75 ("mm, memcg: prevent
> mem_cgroup_protected store tearing").
>
I'm sorry that I missed the changelog in the other one.
So you'd better add these commit log or comment to this one again.
> WRITE_ONCE and READ_ONCE are standard compiler barriers, in this case, to avoid
> store tears from writes in another thread (effective protection caching is
> designed by its very nature to permit racing, but tearing is non-ideal).
>
> You can find out more about them in the "COMPILER BARRIER" section in
> Documentation/memory-barriers.txt. I'm not really seeing the value of adding an
> extra comment about this specific use of them, unless you have some more
> explicit concern.
My concern is why we add these barriers to memcg protection
specifically but don't add these barriers to the other memebers like
memcg->oom_group which has the same issue ?
What is the difference between these members and that members ?
--
Thanks
Yafang
Yafang Shao writes:
>My concern is why we add these barriers to memcg protection
>specifically but don't add these barriers to the other memebers like
>memcg->oom_group which has the same issue ?
>What is the difference between these members and that members ?
There are certainly more missing cases -- I didn't look at oom_group
specifically, but it sounds likely if there's not other mitigating factors.
Most of us have just been busy and haven't had time to comprehensively fix all
the potential store and load tears.
Tearing is another case of something that would be nice to fix once and for all
in the memcg code, but isn't causing any significant issues for the timebeing.
We should certainly aim to avoid introducing any new tearing opportunities,
though :-)
So the answer is just that improvement is incremental and we've not had the
time to track down and fix them all. If you find more cases, feel free to send
out the patches and I'll be happy to take a look.
On Thu, Apr 30, 2020 at 9:46 AM Chris Down <[email protected]> wrote:
>
> Yafang Shao writes:
> >My concern is why we add these barriers to memcg protection
> >specifically but don't add these barriers to the other memebers like
> >memcg->oom_group which has the same issue ?
> >What is the difference between these members and that members ?
>
> There are certainly more missing cases -- I didn't look at oom_group
> specifically, but it sounds likely if there's not other mitigating factors.
> Most of us have just been busy and haven't had time to comprehensively fix all
> the potential store and load tears.
>
> Tearing is another case of something that would be nice to fix once and for all
> in the memcg code, but isn't causing any significant issues for the timebeing.
> We should certainly aim to avoid introducing any new tearing opportunities,
> though :-)
>
> So the answer is just that improvement is incremental and we've not had the
> time to track down and fix them all. If you find more cases, feel free to send
> out the patches and I'll be happy to take a look.
Thanks for your suggestion.
I'm planning to add these barriers all over the memory cgroup code.
--
Thanks
Yafang
On Wed 29-04-20 12:56:27, Johannes Weiner wrote:
[...]
> I think to address this, we need a more comprehensive solution and
> introduce some form of serialization. I'm not sure yet how that would
> look like yet.
Yeah, that is what I've tried to express earlier and that is why I would
rather go with an uglier workaround for now and think about a more
robust effective values calculation on top.
> I'm still not sure it's worth having a somewhat ugly workaround in
> mem_cgroup_protection() to protect against half of the bug. If you
> think so, the full problem should at least be documented and marked
> XXX or something.
Yes, this makes sense to me. What about the following?
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1b4150ff64be..50ffbc17cdd8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -350,6 +350,42 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
if (mem_cgroup_disabled())
return 0;
+ /*
+ * There is no reclaim protection applied to a targeted reclaim.
+ * We are special casing this specific case here because
+ * mem_cgroup_protected calculation is not robust enough to keep
+ * the protection invariant for calculated effective values for
+ * parallel reclaimers with different reclaim target. This is
+ * especially a problem for tail memcgs (as they have pages on LRU)
+ * which would want to have effective values 0 for targeted reclaim
+ * but a different value for external reclaim.
+ *
+ * Example
+ * Let's have global and A's reclaim in parallel:
+ * |
+ * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
+ * |\
+ * | C (low = 1G, usage = 2.5G)
+ * B (low = 1G, usage = 0.5G)
+ *
+ * For the global reclaim
+ * A.elow = A.low
+ * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
+ * C.elow = min(C.usage, C.low)
+ *
+ * With the effective values resetting we have A reclaim
+ * A.elow = 0
+ * B.elow = B.low
+ * C.elow = C.low
+ *
+ * If the global reclaim races with A's reclaim then
+ * B.elow = C.elow = 0 because children_low_usage > A.elow)
+ * is possible and reclaiming B would be violating the protection.
+ *
+ */
+ if (memcg == root)
+ return 0;
+
if (in_low_reclaim)
return READ_ONCE(memcg->memory.emin);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 05b4ec2c6499..df88a22f09bc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6385,6 +6385,14 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
if (!root)
root = root_mem_cgroup;
+
+ /*
+ * Effective values of the reclaim targets are ignored so they
+ * can be stale. Have a look at mem_cgroup_protection for more
+ * details.
+ * TODO: calculation should be more robust so that we do not need
+ * that special casing.
+ */
if (memcg == root)
return MEMCG_PROT_NONE;
> In practice, I doubt this matters all that much because limit reclaim
> and global reclaim tend to occur in complementary
> containerization/isolation strategies, not heavily simultaneously.
I would expect that as well but this is always hard to tell.
--
Michal Hocko
SUSE Labs
On Thu, Apr 30, 2020 at 04:57:21PM +0200, Michal Hocko wrote:
> On Wed 29-04-20 12:56:27, Johannes Weiner wrote:
> [...]
> > I think to address this, we need a more comprehensive solution and
> > introduce some form of serialization. I'm not sure yet how that would
> > look like yet.
>
> Yeah, that is what I've tried to express earlier and that is why I would
> rather go with an uglier workaround for now and think about a more
> robust effective values calculation on top.
>
> > I'm still not sure it's worth having a somewhat ugly workaround in
> > mem_cgroup_protection() to protect against half of the bug. If you
> > think so, the full problem should at least be documented and marked
> > XXX or something.
>
> Yes, this makes sense to me. What about the following?
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 1b4150ff64be..50ffbc17cdd8 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -350,6 +350,42 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
> if (mem_cgroup_disabled())
> return 0;
>
> + /*
> + * There is no reclaim protection applied to a targeted reclaim.
> + * We are special casing this specific case here because
> + * mem_cgroup_protected calculation is not robust enough to keep
> + * the protection invariant for calculated effective values for
> + * parallel reclaimers with different reclaim target. This is
> + * especially a problem for tail memcgs (as they have pages on LRU)
> + * which would want to have effective values 0 for targeted reclaim
> + * but a different value for external reclaim.
> + *
> + * Example
> + * Let's have global and A's reclaim in parallel:
> + * |
> + * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
> + * |\
> + * | C (low = 1G, usage = 2.5G)
> + * B (low = 1G, usage = 0.5G)
> + *
> + * For the global reclaim
> + * A.elow = A.low
> + * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
> + * C.elow = min(C.usage, C.low)
> + *
> + * With the effective values resetting we have A reclaim
> + * A.elow = 0
> + * B.elow = B.low
> + * C.elow = C.low
> + *
> + * If the global reclaim races with A's reclaim then
> + * B.elow = C.elow = 0 because children_low_usage > A.elow)
> + * is possible and reclaiming B would be violating the protection.
> + *
> + */
> + if (memcg == root)
> + return 0;
> +
> if (in_low_reclaim)
> return READ_ONCE(memcg->memory.emin);
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 05b4ec2c6499..df88a22f09bc 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -6385,6 +6385,14 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
>
> if (!root)
> root = root_mem_cgroup;
> +
> + /*
> + * Effective values of the reclaim targets are ignored so they
> + * can be stale. Have a look at mem_cgroup_protection for more
> + * details.
> + * TODO: calculation should be more robust so that we do not need
> + * that special casing.
> + */
> if (memcg == root)
> return MEMCG_PROT_NONE;
Acked-by: Roman Gushchin <[email protected]>
Thanks!
On Thu, Apr 30, 2020 at 10:57 PM Michal Hocko <[email protected]> wrote:
>
> On Wed 29-04-20 12:56:27, Johannes Weiner wrote:
> [...]
> > I think to address this, we need a more comprehensive solution and
> > introduce some form of serialization. I'm not sure yet how that would
> > look like yet.
>
> Yeah, that is what I've tried to express earlier and that is why I would
> rather go with an uglier workaround for now and think about a more
> robust effective values calculation on top.
>
Agreed.
If there's a more robust effective values calculation on top, then we
don't need to hack it here and there.
> > I'm still not sure it's worth having a somewhat ugly workaround in
> > mem_cgroup_protection() to protect against half of the bug. If you
> > think so, the full problem should at least be documented and marked
> > XXX or something.
>
> Yes, this makes sense to me. What about the following?
Many thanks for the explaination on this workaround.
With this explanation, I think the others will have a clear idea why
we must add this ugly workaround here.
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 1b4150ff64be..50ffbc17cdd8 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -350,6 +350,42 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
> if (mem_cgroup_disabled())
> return 0;
>
> + /*
> + * There is no reclaim protection applied to a targeted reclaim.
> + * We are special casing this specific case here because
> + * mem_cgroup_protected calculation is not robust enough to keep
> + * the protection invariant for calculated effective values for
> + * parallel reclaimers with different reclaim target. This is
> + * especially a problem for tail memcgs (as they have pages on LRU)
> + * which would want to have effective values 0 for targeted reclaim
> + * but a different value for external reclaim.
> + *
> + * Example
> + * Let's have global and A's reclaim in parallel:
> + * |
> + * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
> + * |\
> + * | C (low = 1G, usage = 2.5G)
> + * B (low = 1G, usage = 0.5G)
> + *
> + * For the global reclaim
> + * A.elow = A.low
> + * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
> + * C.elow = min(C.usage, C.low)
> + *
> + * With the effective values resetting we have A reclaim
> + * A.elow = 0
> + * B.elow = B.low
> + * C.elow = C.low
> + *
> + * If the global reclaim races with A's reclaim then
> + * B.elow = C.elow = 0 because children_low_usage > A.elow)
> + * is possible and reclaiming B would be violating the protection.
> + *
> + */
> + if (memcg == root)
> + return 0;
> +
> if (in_low_reclaim)
> return READ_ONCE(memcg->memory.emin);
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 05b4ec2c6499..df88a22f09bc 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -6385,6 +6385,14 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
>
> if (!root)
> root = root_mem_cgroup;
> +
> + /*
> + * Effective values of the reclaim targets are ignored so they
> + * can be stale. Have a look at mem_cgroup_protection for more
> + * details.
> + * TODO: calculation should be more robust so that we do not need
> + * that special casing.
> + */
> if (memcg == root)
> return MEMCG_PROT_NONE;
>
>
> > In practice, I doubt this matters all that much because limit reclaim
> > and global reclaim tend to occur in complementary
> > containerization/isolation strategies, not heavily simultaneously.
>
> I would expect that as well but this is always hard to tell.
>
> --
> Michal Hocko
> SUSE Labs
--
Thanks
Yafang
On Fri 01-05-20 07:59:57, Yafang Shao wrote:
> On Thu, Apr 30, 2020 at 10:57 PM Michal Hocko <[email protected]> wrote:
> >
> > On Wed 29-04-20 12:56:27, Johannes Weiner wrote:
> > [...]
> > > I think to address this, we need a more comprehensive solution and
> > > introduce some form of serialization. I'm not sure yet how that would
> > > look like yet.
> >
> > Yeah, that is what I've tried to express earlier and that is why I would
> > rather go with an uglier workaround for now and think about a more
> > robust effective values calculation on top.
> >
>
> Agreed.
> If there's a more robust effective values calculation on top, then we
> don't need to hack it here and there.
>
> > > I'm still not sure it's worth having a somewhat ugly workaround in
> > > mem_cgroup_protection() to protect against half of the bug. If you
> > > think so, the full problem should at least be documented and marked
> > > XXX or something.
> >
> > Yes, this makes sense to me. What about the following?
>
> Many thanks for the explaination on this workaround.
> With this explanation, I think the others will have a clear idea why
> we must add this ugly workaround here.
OK, this would be the patch with the full changelog. If both Chris and
Johannes are ok with this I would suggest replacing the one Andrew took
already
From dfcdbfd336d2d23195ec9d90e6e58898f49f8998 Mon Sep 17 00:00:00 2001
From: Yafang Shao <[email protected]>
Date: Mon, 4 May 2020 09:10:03 +0200
Subject: [PATCH] mm, memcg: Avoid stale protection values when cgroup is above
protection
A cgroup can have both memory protection and a memory limit to isolate
it from its siblings in both directions - for example, to prevent it
from being shrunk below 2G under high pressure from outside, but also
from growing beyond 4G under low pressure.
Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
implemented proportional scan pressure so that multiple siblings in
excess of their protection settings don't get reclaimed equally but
instead in accordance to their unprotected portion.
During limit reclaim, this proportionality shouldn't apply of course:
there is no competition, all pressure is from within the cgroup and
should be applied as such. Reclaim should operate at full efficiency.
However, mem_cgroup_protected() never expected anybody to look at the
effective protection values when it indicated that the cgroup is above
its protection. As a result, a query during limit reclaim may return
stale protection values that were calculated by a previous reclaim cycle
in which the cgroup did have siblings.
When this happens, reclaim is unnecessarily hesitant and potentially
slow to meet the desired limit. In theory this could lead to premature
OOM kills, although it's not obvious this has occurred in practice.
Workaround the problem by special casing reclaim roots in
mem_cgroup_protection. These memcgs are never participating in the
reclaim protection because the reclaim is internal.
We have to ignore effective protection values for reclaim roots because
mem_cgroup_protected might be called from racing reclaim contexts with
different roots. Calculation is relying on root -> leaf tree traversal
therefore top-down reclaim protection invariants should hold. The only
exception is the reclaim root which should have effective protection set
to 0 but that would be problematic for the following setup:
Let's have global and A's reclaim in parallel:
|
A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
|\
| C (low = 1G, usage = 2.5G)
B (low = 1G, usage = 0.5G)
for A reclaim we have
B.elow = B.low
C.elow = C.low
For the global reclaim
A.elow = A.low
B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
C.elow = min(C.usage, C.low)
With the effective values resetting we have A reclaim
A.elow = 0
B.elow = B.low
C.elow = C.low
and global reclaim could see the above and then
B.elow = C.elow = 0 because children_low_usage > A.elow
Which means that protected memcgs would get reclaimed.
In future we would like to make mem_cgroup_protected more robust against
racing reclaim contexts but that is likely more complex solution that
this simple workaround.
[[email protected] - large part of the changelog]
[[email protected] - workaround explanation]
Fixes: 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
Signed-off-by: Yafang Shao <[email protected]>
Signed-off-by: Michal Hocko <[email protected]>
---
include/linux/memcontrol.h | 36 ++++++++++++++++++++++++++++++++++++
mm/memcontrol.c | 8 ++++++++
2 files changed, 44 insertions(+)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1b4150ff64be..50ffbc17cdd8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -350,6 +350,42 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
if (mem_cgroup_disabled())
return 0;
+ /*
+ * There is no reclaim protection applied to a targeted reclaim.
+ * We are special casing this specific case here because
+ * mem_cgroup_protected calculation is not robust enough to keep
+ * the protection invariant for calculated effective values for
+ * parallel reclaimers with different reclaim target. This is
+ * especially a problem for tail memcgs (as they have pages on LRU)
+ * which would want to have effective values 0 for targeted reclaim
+ * but a different value for external reclaim.
+ *
+ * Example
+ * Let's have global and A's reclaim in parallel:
+ * |
+ * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
+ * |\
+ * | C (low = 1G, usage = 2.5G)
+ * B (low = 1G, usage = 0.5G)
+ *
+ * For the global reclaim
+ * A.elow = A.low
+ * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
+ * C.elow = min(C.usage, C.low)
+ *
+ * With the effective values resetting we have A reclaim
+ * A.elow = 0
+ * B.elow = B.low
+ * C.elow = C.low
+ *
+ * If the global reclaim races with A's reclaim then
+ * B.elow = C.elow = 0 because children_low_usage > A.elow)
+ * is possible and reclaiming B would be violating the protection.
+ *
+ */
+ if (memcg == root)
+ return 0;
+
if (in_low_reclaim)
return READ_ONCE(memcg->memory.emin);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 05b4ec2c6499..df88a22f09bc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6385,6 +6385,14 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
if (!root)
root = root_mem_cgroup;
+
+ /*
+ * Effective values of the reclaim targets are ignored so they
+ * can be stale. Have a look at mem_cgroup_protection for more
+ * details.
+ * TODO: calculation should be more robust so that we do not need
+ * that special casing.
+ */
if (memcg == root)
return MEMCG_PROT_NONE;
--
2.25.1
--
Michal Hocko
SUSE Labs
On Mon, May 04, 2020 at 09:23:42AM +0200, Michal Hocko wrote:
> On Fri 01-05-20 07:59:57, Yafang Shao wrote:
> > On Thu, Apr 30, 2020 at 10:57 PM Michal Hocko <[email protected]> wrote:
> > >
> > > On Wed 29-04-20 12:56:27, Johannes Weiner wrote:
> > > [...]
> > > > I think to address this, we need a more comprehensive solution and
> > > > introduce some form of serialization. I'm not sure yet how that would
> > > > look like yet.
> > >
> > > Yeah, that is what I've tried to express earlier and that is why I would
> > > rather go with an uglier workaround for now and think about a more
> > > robust effective values calculation on top.
> > >
> >
> > Agreed.
> > If there's a more robust effective values calculation on top, then we
> > don't need to hack it here and there.
> >
> > > > I'm still not sure it's worth having a somewhat ugly workaround in
> > > > mem_cgroup_protection() to protect against half of the bug. If you
> > > > think so, the full problem should at least be documented and marked
> > > > XXX or something.
> > >
> > > Yes, this makes sense to me. What about the following?
> >
> > Many thanks for the explaination on this workaround.
> > With this explanation, I think the others will have a clear idea why
> > we must add this ugly workaround here.
>
> OK, this would be the patch with the full changelog. If both Chris and
> Johannes are ok with this I would suggest replacing the one Andrew took
> already
>
>
> From dfcdbfd336d2d23195ec9d90e6e58898f49f8998 Mon Sep 17 00:00:00 2001
> From: Yafang Shao <[email protected]>
> Date: Mon, 4 May 2020 09:10:03 +0200
> Subject: [PATCH] mm, memcg: Avoid stale protection values when cgroup is above
> protection
>
> A cgroup can have both memory protection and a memory limit to isolate
> it from its siblings in both directions - for example, to prevent it
> from being shrunk below 2G under high pressure from outside, but also
> from growing beyond 4G under low pressure.
>
> Commit 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> implemented proportional scan pressure so that multiple siblings in
> excess of their protection settings don't get reclaimed equally but
> instead in accordance to their unprotected portion.
>
> During limit reclaim, this proportionality shouldn't apply of course:
> there is no competition, all pressure is from within the cgroup and
> should be applied as such. Reclaim should operate at full efficiency.
>
> However, mem_cgroup_protected() never expected anybody to look at the
> effective protection values when it indicated that the cgroup is above
> its protection. As a result, a query during limit reclaim may return
> stale protection values that were calculated by a previous reclaim cycle
> in which the cgroup did have siblings.
>
> When this happens, reclaim is unnecessarily hesitant and potentially
> slow to meet the desired limit. In theory this could lead to premature
> OOM kills, although it's not obvious this has occurred in practice.
>
> Workaround the problem by special casing reclaim roots in
> mem_cgroup_protection. These memcgs are never participating in the
> reclaim protection because the reclaim is internal.
>
> We have to ignore effective protection values for reclaim roots because
> mem_cgroup_protected might be called from racing reclaim contexts with
> different roots. Calculation is relying on root -> leaf tree traversal
> therefore top-down reclaim protection invariants should hold. The only
> exception is the reclaim root which should have effective protection set
> to 0 but that would be problematic for the following setup:
> Let's have global and A's reclaim in parallel:
> |
> A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
> |\
> | C (low = 1G, usage = 2.5G)
> B (low = 1G, usage = 0.5G)
>
> for A reclaim we have
> B.elow = B.low
> C.elow = C.low
>
> For the global reclaim
> A.elow = A.low
> B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
> C.elow = min(C.usage, C.low)
>
> With the effective values resetting we have A reclaim
> A.elow = 0
> B.elow = B.low
> C.elow = C.low
>
> and global reclaim could see the above and then
> B.elow = C.elow = 0 because children_low_usage > A.elow
>
> Which means that protected memcgs would get reclaimed.
>
> In future we would like to make mem_cgroup_protected more robust against
> racing reclaim contexts but that is likely more complex solution that
> this simple workaround.
>
> [[email protected] - large part of the changelog]
> [[email protected] - workaround explanation]
> Fixes: 9783aa9917f8 ("mm, memcg: proportional memory.{low,min} reclaim")
> Signed-off-by: Yafang Shao <[email protected]>
> Signed-off-by: Michal Hocko <[email protected]>
Acked-by: Roman Gushchin <[email protected]>
> ---
> include/linux/memcontrol.h | 36 ++++++++++++++++++++++++++++++++++++
> mm/memcontrol.c | 8 ++++++++
> 2 files changed, 44 insertions(+)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 1b4150ff64be..50ffbc17cdd8 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -350,6 +350,42 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
> if (mem_cgroup_disabled())
> return 0;
>
> + /*
> + * There is no reclaim protection applied to a targeted reclaim.
> + * We are special casing this specific case here because
> + * mem_cgroup_protected calculation is not robust enough to keep
> + * the protection invariant for calculated effective values for
> + * parallel reclaimers with different reclaim target. This is
> + * especially a problem for tail memcgs (as they have pages on LRU)
> + * which would want to have effective values 0 for targeted reclaim
> + * but a different value for external reclaim.
> + *
> + * Example
> + * Let's have global and A's reclaim in parallel:
> + * |
> + * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
> + * |\
> + * | C (low = 1G, usage = 2.5G)
> + * B (low = 1G, usage = 0.5G)
> + *
> + * For the global reclaim
> + * A.elow = A.low
> + * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
> + * C.elow = min(C.usage, C.low)
> + *
> + * With the effective values resetting we have A reclaim
> + * A.elow = 0
> + * B.elow = B.low
> + * C.elow = C.low
> + *
> + * If the global reclaim races with A's reclaim then
> + * B.elow = C.elow = 0 because children_low_usage > A.elow)
> + * is possible and reclaiming B would be violating the protection.
> + *
> + */
> + if (memcg == root)
> + return 0;
> +
> if (in_low_reclaim)
> return READ_ONCE(memcg->memory.emin);
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 05b4ec2c6499..df88a22f09bc 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -6385,6 +6385,14 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
>
> if (!root)
> root = root_mem_cgroup;
> +
> + /*
> + * Effective values of the reclaim targets are ignored so they
> + * can be stale. Have a look at mem_cgroup_protection for more
> + * details.
> + * TODO: calculation should be more robust so that we do not need
> + * that special casing.
> + */
> if (memcg == root)
> return MEMCG_PROT_NONE;
>
> --
> 2.25.1
>
> --
> Michal Hocko
> SUSE Labs