2020-12-18 10:25:56

by Jacob Wen

[permalink] [raw]
Subject: [PATCH] mm/vmscan: DRY cleanup for do_try_to_free_pages()

This patch reduces repetition of set_task_reclaim_state() around
do_try_to_free_pages().

Signed-off-by: Jacob Wen <[email protected]>
---
mm/vmscan.c | 27 ++++++++++++++++-----------
1 file changed, 16 insertions(+), 11 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 257cba79a96d..4bc244b23686 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3023,6 +3023,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
pg_data_t *last_pgdat;
struct zoneref *z;
struct zone *zone;
+ unsigned long ret;
+
+ set_task_reclaim_state(current, &sc->reclaim_state);
+
retry:
delayacct_freepages_start();

@@ -3069,12 +3073,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,

delayacct_freepages_end();

- if (sc->nr_reclaimed)
- return sc->nr_reclaimed;
+ if (sc->nr_reclaimed) {
+ ret = sc->nr_reclaimed;
+ goto out;
+ }

/* Aborted reclaim to try compaction? don't OOM, then */
- if (sc->compaction_ready)
- return 1;
+ if (sc->compaction_ready) {
+ ret = 1;
+ goto out;
+ }

/*
* We make inactive:active ratio decisions based on the node's
@@ -3101,7 +3109,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
goto retry;
}

- return 0;
+ ret = 0;
+out:
+ set_task_reclaim_state(current, NULL);
+ return ret;
}

static bool allow_direct_reclaim(pg_data_t *pgdat)
@@ -3269,13 +3280,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
return 1;

- set_task_reclaim_state(current, &sc.reclaim_state);
trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);

nr_reclaimed = do_try_to_free_pages(zonelist, &sc);

trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
- set_task_reclaim_state(current, NULL);

return nr_reclaimed;
}
@@ -3347,7 +3356,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
*/
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);

- set_task_reclaim_state(current, &sc.reclaim_state);
trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
noreclaim_flag = memalloc_noreclaim_save();

@@ -3355,7 +3363,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,

memalloc_noreclaim_restore(noreclaim_flag);
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
- set_task_reclaim_state(current, NULL);

return nr_reclaimed;
}
@@ -4023,11 +4030,9 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)

fs_reclaim_acquire(sc.gfp_mask);
noreclaim_flag = memalloc_noreclaim_save();
- set_task_reclaim_state(current, &sc.reclaim_state);

nr_reclaimed = do_try_to_free_pages(zonelist, &sc);

- set_task_reclaim_state(current, NULL);
memalloc_noreclaim_restore(noreclaim_flag);
fs_reclaim_release(sc.gfp_mask);

--
2.25.1


2020-12-18 10:54:31

by Michal Hocko

[permalink] [raw]
Subject: Re: [PATCH] mm/vmscan: DRY cleanup for do_try_to_free_pages()

On Fri 18-12-20 18:22:17, Jacob Wen wrote:
> This patch reduces repetition of set_task_reclaim_state() around
> do_try_to_free_pages().

The changelog really should be talking about why this is needed/useful.
From the above it is not really clear whether you aimed at doing
a clean up or this is a fix for some misbehavior. I do assume the former
but this should be clearly articulated.

> Signed-off-by: Jacob Wen <[email protected]>
> ---
> mm/vmscan.c | 27 ++++++++++++++++-----------
> 1 file changed, 16 insertions(+), 11 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 257cba79a96d..4bc244b23686 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -3023,6 +3023,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
> pg_data_t *last_pgdat;
> struct zoneref *z;
> struct zone *zone;
> + unsigned long ret;
> +
> + set_task_reclaim_state(current, &sc->reclaim_state);
> +
> retry:
> delayacct_freepages_start();
>
> @@ -3069,12 +3073,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
>
> delayacct_freepages_end();
>
> - if (sc->nr_reclaimed)
> - return sc->nr_reclaimed;
> + if (sc->nr_reclaimed) {
> + ret = sc->nr_reclaimed;
> + goto out;
> + }
>
> /* Aborted reclaim to try compaction? don't OOM, then */
> - if (sc->compaction_ready)
> - return 1;
> + if (sc->compaction_ready) {
> + ret = 1;
> + goto out;
> + }
>
> /*
> * We make inactive:active ratio decisions based on the node's
> @@ -3101,7 +3109,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
> goto retry;
> }
>
> - return 0;
> + ret = 0;
> +out:
> + set_task_reclaim_state(current, NULL);
> + return ret;
> }
>
> static bool allow_direct_reclaim(pg_data_t *pgdat)
> @@ -3269,13 +3280,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
> return 1;
>
> - set_task_reclaim_state(current, &sc.reclaim_state);
> trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
>
> nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
>
> trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
> - set_task_reclaim_state(current, NULL);
>
> return nr_reclaimed;
> }
> @@ -3347,7 +3356,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
> */
> struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
>
> - set_task_reclaim_state(current, &sc.reclaim_state);
> trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
> noreclaim_flag = memalloc_noreclaim_save();
>
> @@ -3355,7 +3363,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
>
> memalloc_noreclaim_restore(noreclaim_flag);
> trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
> - set_task_reclaim_state(current, NULL);
>
> return nr_reclaimed;
> }
> @@ -4023,11 +4030,9 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
>
> fs_reclaim_acquire(sc.gfp_mask);
> noreclaim_flag = memalloc_noreclaim_save();
> - set_task_reclaim_state(current, &sc.reclaim_state);
>
> nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
>
> - set_task_reclaim_state(current, NULL);
> memalloc_noreclaim_restore(noreclaim_flag);
> fs_reclaim_release(sc.gfp_mask);
>
> --
> 2.25.1
>

--
Michal Hocko
SUSE Labs

2020-12-18 12:26:28

by Matthew Wilcox

[permalink] [raw]
Subject: Re: [PATCH] mm/vmscan: DRY cleanup for do_try_to_free_pages()

On Fri, Dec 18, 2020 at 06:22:17PM +0800, Jacob Wen wrote:
> This patch reduces repetition of set_task_reclaim_state() around
> do_try_to_free_pages().

what is a DRY cleanup?

2020-12-18 13:59:26

by Jacob Wen

[permalink] [raw]
Subject: Re: [PATCH] mm/vmscan: DRY cleanup for do_try_to_free_pages()


On 12/18/20 6:51 PM, Michal Hocko wrote:
> On Fri 18-12-20 18:22:17, Jacob Wen wrote:
>> This patch reduces repetition of set_task_reclaim_state() around
>> do_try_to_free_pages().
> The changelog really should be talking about why this is needed/useful.
> From the above it is not really clear whether you aimed at doing
> a clean up or this is a fix for some misbehavior. I do assume the former
> but this should be clearly articulated.

How about this?

mm/vmscan: remove duplicate code around do_try_to_free_pages()

This patch moves set_task_reclaim_state() into do_try_to_free_pages()
to avoid unnecessary repetition. It doesn't introduce functional
change.

>
>> Signed-off-by: Jacob Wen <[email protected]>
>> ---
>> mm/vmscan.c | 27 ++++++++++++++++-----------
>> 1 file changed, 16 insertions(+), 11 deletions(-)
>>
>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>> index 257cba79a96d..4bc244b23686 100644
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -3023,6 +3023,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
>> pg_data_t *last_pgdat;
>> struct zoneref *z;
>> struct zone *zone;
>> + unsigned long ret;
>> +
>> + set_task_reclaim_state(current, &sc->reclaim_state);
>> +
>> retry:
>> delayacct_freepages_start();
>>
>> @@ -3069,12 +3073,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
>>
>> delayacct_freepages_end();
>>
>> - if (sc->nr_reclaimed)
>> - return sc->nr_reclaimed;
>> + if (sc->nr_reclaimed) {
>> + ret = sc->nr_reclaimed;
>> + goto out;
>> + }
>>
>> /* Aborted reclaim to try compaction? don't OOM, then */
>> - if (sc->compaction_ready)
>> - return 1;
>> + if (sc->compaction_ready) {
>> + ret = 1;
>> + goto out;
>> + }
>>
>> /*
>> * We make inactive:active ratio decisions based on the node's
>> @@ -3101,7 +3109,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
>> goto retry;
>> }
>>
>> - return 0;
>> + ret = 0;
>> +out:
>> + set_task_reclaim_state(current, NULL);
>> + return ret;
>> }
>>
>> static bool allow_direct_reclaim(pg_data_t *pgdat)
>> @@ -3269,13 +3280,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
>> if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
>> return 1;
>>
>> - set_task_reclaim_state(current, &sc.reclaim_state);
>> trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
>>
>> nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
>>
>> trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
>> - set_task_reclaim_state(current, NULL);
>>
>> return nr_reclaimed;
>> }
>> @@ -3347,7 +3356,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
>> */
>> struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
>>
>> - set_task_reclaim_state(current, &sc.reclaim_state);
>> trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
>> noreclaim_flag = memalloc_noreclaim_save();
>>
>> @@ -3355,7 +3363,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
>>
>> memalloc_noreclaim_restore(noreclaim_flag);
>> trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
>> - set_task_reclaim_state(current, NULL);
>>
>> return nr_reclaimed;
>> }
>> @@ -4023,11 +4030,9 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
>>
>> fs_reclaim_acquire(sc.gfp_mask);
>> noreclaim_flag = memalloc_noreclaim_save();
>> - set_task_reclaim_state(current, &sc.reclaim_state);
>>
>> nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
>>
>> - set_task_reclaim_state(current, NULL);
>> memalloc_noreclaim_restore(noreclaim_flag);
>> fs_reclaim_release(sc.gfp_mask);
>>
>> --
>> 2.25.1
>>

2020-12-18 14:00:55

by Jacob Wen

[permalink] [raw]
Subject: Re: [PATCH] mm/vmscan: DRY cleanup for do_try_to_free_pages()


On 12/18/20 8:17 PM, Matthew Wilcox wrote:
> On Fri, Dec 18, 2020 at 06:22:17PM +0800, Jacob Wen wrote:
>> This patch reduces repetition of set_task_reclaim_state() around
>> do_try_to_free_pages().
> what is a DRY cleanup?
>
DRY is short for "Don't repeat yourself"[1].

[1] https://en.wikipedia.org/wiki/Don%27t_repeat_yourself


2020-12-18 14:30:44

by Michal Hocko

[permalink] [raw]
Subject: Re: [PATCH] mm/vmscan: DRY cleanup for do_try_to_free_pages()

On Fri 18-12-20 21:51:48, Jacob Wen wrote:
>
> On 12/18/20 6:51 PM, Michal Hocko wrote:
> > On Fri 18-12-20 18:22:17, Jacob Wen wrote:
> > > This patch reduces repetition of set_task_reclaim_state() around
> > > do_try_to_free_pages().
> > The changelog really should be talking about why this is needed/useful.
> > From the above it is not really clear whether you aimed at doing
> > a clean up or this is a fix for some misbehavior. I do assume the former
> > but this should be clearly articulated.
>
> How about this?
>
> mm/vmscan: remove duplicate code around do_try_to_free_pages()
>
> This patch moves set_task_reclaim_state() into do_try_to_free_pages()
> to avoid unnecessary repetition. It doesn't introduce functional
> change.

This is still more about what is changed more than why it is changed. I
would go with something like the following:
"
reclaim_state has to be set for all reclaim paths because it acts as a
storage to collect reclaim feedback. Currently set_task_reclaim_state is
called from each highlevel reclaim function. Simplify the code flow by
moving set_task_reclaim_state into core direct reclaim function
(do_try_to_free_pages) for all direct reclaim paths.
"

To the patch itself. I am not opposed but I do not see an urgent reason
to take it either. The net LOC increases slightly, it makes
do_try_to_free_pages slightly more tricky due to different early return
paths. Highlevel direct reclaim functions do not tend to change a lot.

> > > Signed-off-by: Jacob Wen <[email protected]>
> > > ---
> > > mm/vmscan.c | 27 ++++++++++++++++-----------
> > > 1 file changed, 16 insertions(+), 11 deletions(-)
> > >
> > > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > > index 257cba79a96d..4bc244b23686 100644
> > > --- a/mm/vmscan.c
> > > +++ b/mm/vmscan.c
> > > @@ -3023,6 +3023,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
> > > pg_data_t *last_pgdat;
> > > struct zoneref *z;
> > > struct zone *zone;
> > > + unsigned long ret;
> > > +
> > > + set_task_reclaim_state(current, &sc->reclaim_state);
> > > +
> > > retry:
> > > delayacct_freepages_start();
> > > @@ -3069,12 +3073,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
> > > delayacct_freepages_end();
> > > - if (sc->nr_reclaimed)
> > > - return sc->nr_reclaimed;
> > > + if (sc->nr_reclaimed) {
> > > + ret = sc->nr_reclaimed;
> > > + goto out;
> > > + }
> > > /* Aborted reclaim to try compaction? don't OOM, then */
> > > - if (sc->compaction_ready)
> > > - return 1;
> > > + if (sc->compaction_ready) {
> > > + ret = 1;
> > > + goto out;
> > > + }
> > > /*
> > > * We make inactive:active ratio decisions based on the node's
> > > @@ -3101,7 +3109,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
> > > goto retry;
> > > }
> > > - return 0;
> > > + ret = 0;
> > > +out:
> > > + set_task_reclaim_state(current, NULL);
> > > + return ret;
> > > }
> > > static bool allow_direct_reclaim(pg_data_t *pgdat)
> > > @@ -3269,13 +3280,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> > > if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
> > > return 1;
> > > - set_task_reclaim_state(current, &sc.reclaim_state);
> > > trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
> > > nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
> > > trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
> > > - set_task_reclaim_state(current, NULL);
> > > return nr_reclaimed;
> > > }
> > > @@ -3347,7 +3356,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
> > > */
> > > struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
> > > - set_task_reclaim_state(current, &sc.reclaim_state);
> > > trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
> > > noreclaim_flag = memalloc_noreclaim_save();
> > > @@ -3355,7 +3363,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
> > > memalloc_noreclaim_restore(noreclaim_flag);
> > > trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
> > > - set_task_reclaim_state(current, NULL);
> > > return nr_reclaimed;
> > > }
> > > @@ -4023,11 +4030,9 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
> > > fs_reclaim_acquire(sc.gfp_mask);
> > > noreclaim_flag = memalloc_noreclaim_save();
> > > - set_task_reclaim_state(current, &sc.reclaim_state);
> > > nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
> > > - set_task_reclaim_state(current, NULL);
> > > memalloc_noreclaim_restore(noreclaim_flag);
> > > fs_reclaim_release(sc.gfp_mask);
> > > --
> > > 2.25.1
> > >

--
Michal Hocko
SUSE Labs

2020-12-18 17:44:37

by Jacob Wen

[permalink] [raw]
Subject: Re: [PATCH] mm/vmscan: DRY cleanup for do_try_to_free_pages()


On 12/18/20 10:27 PM, Michal Hocko wrote:
> On Fri 18-12-20 21:51:48, Jacob Wen wrote:
>> On 12/18/20 6:51 PM, Michal Hocko wrote:
>>> On Fri 18-12-20 18:22:17, Jacob Wen wrote:
>>>> This patch reduces repetition of set_task_reclaim_state() around
>>>> do_try_to_free_pages().
>>> The changelog really should be talking about why this is needed/useful.
>>> From the above it is not really clear whether you aimed at doing
>>> a clean up or this is a fix for some misbehavior. I do assume the former
>>> but this should be clearly articulated.
>> How about this?
>>
>> mm/vmscan: remove duplicate code around do_try_to_free_pages()
>>
>> This patch moves set_task_reclaim_state() into do_try_to_free_pages()
>> to avoid unnecessary repetition. It doesn't introduce functional
>> change.
> This is still more about what is changed more than why it is changed. I
> would go with something like the following:
> "
> reclaim_state has to be set for all reclaim paths because it acts as a
> storage to collect reclaim feedback. Currently set_task_reclaim_state is
> called from each highlevel reclaim function. Simplify the code flow by
> moving set_task_reclaim_state into core direct reclaim function
> (do_try_to_free_pages) for all direct reclaim paths.
> "
>
> To the patch itself. I am not opposed but I do not see an urgent reason
> to take it either. The net LOC increases slightly, it makes
> do_try_to_free_pages slightly more tricky due to different early return
> paths. Highlevel direct reclaim functions do not tend to change a lot.

set_task_reclaim_state() is a function with 3 lines of code of which 2
lines contain WARN_ON_ONCE.

I am not comfortable with the current repetition.

>
>>>> Signed-off-by: Jacob Wen <[email protected]>
>>>> ---
>>>> mm/vmscan.c | 27 ++++++++++++++++-----------
>>>> 1 file changed, 16 insertions(+), 11 deletions(-)
>>>>
>>>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>>>> index 257cba79a96d..4bc244b23686 100644
>>>> --- a/mm/vmscan.c
>>>> +++ b/mm/vmscan.c
>>>> @@ -3023,6 +3023,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
>>>> pg_data_t *last_pgdat;
>>>> struct zoneref *z;
>>>> struct zone *zone;
>>>> + unsigned long ret;
>>>> +
>>>> + set_task_reclaim_state(current, &sc->reclaim_state);
>>>> +
>>>> retry:
>>>> delayacct_freepages_start();
>>>> @@ -3069,12 +3073,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
>>>> delayacct_freepages_end();
>>>> - if (sc->nr_reclaimed)
>>>> - return sc->nr_reclaimed;
>>>> + if (sc->nr_reclaimed) {
>>>> + ret = sc->nr_reclaimed;
>>>> + goto out;
>>>> + }
>>>> /* Aborted reclaim to try compaction? don't OOM, then */
>>>> - if (sc->compaction_ready)
>>>> - return 1;
>>>> + if (sc->compaction_ready) {
>>>> + ret = 1;
>>>> + goto out;
>>>> + }
>>>> /*
>>>> * We make inactive:active ratio decisions based on the node's
>>>> @@ -3101,7 +3109,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
>>>> goto retry;
>>>> }
>>>> - return 0;
>>>> + ret = 0;
>>>> +out:
>>>> + set_task_reclaim_state(current, NULL);
>>>> + return ret;
>>>> }
>>>> static bool allow_direct_reclaim(pg_data_t *pgdat)
>>>> @@ -3269,13 +3280,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
>>>> if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
>>>> return 1;
>>>> - set_task_reclaim_state(current, &sc.reclaim_state);
>>>> trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
>>>> nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
>>>> trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
>>>> - set_task_reclaim_state(current, NULL);
>>>> return nr_reclaimed;
>>>> }
>>>> @@ -3347,7 +3356,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
>>>> */
>>>> struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
>>>> - set_task_reclaim_state(current, &sc.reclaim_state);
>>>> trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
>>>> noreclaim_flag = memalloc_noreclaim_save();
>>>> @@ -3355,7 +3363,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
>>>> memalloc_noreclaim_restore(noreclaim_flag);
>>>> trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
>>>> - set_task_reclaim_state(current, NULL);
>>>> return nr_reclaimed;
>>>> }
>>>> @@ -4023,11 +4030,9 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
>>>> fs_reclaim_acquire(sc.gfp_mask);
>>>> noreclaim_flag = memalloc_noreclaim_save();
>>>> - set_task_reclaim_state(current, &sc.reclaim_state);
>>>> nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
>>>> - set_task_reclaim_state(current, NULL);
>>>> memalloc_noreclaim_restore(noreclaim_flag);
>>>> fs_reclaim_release(sc.gfp_mask);
>>>> --
>>>> 2.25.1
>>>>

2020-12-19 01:23:29

by Chris Down

[permalink] [raw]
Subject: Re: [PATCH] mm/vmscan: DRY cleanup for do_try_to_free_pages()

Jacob Wen writes:
>set_task_reclaim_state() is a function with 3 lines of code of which 2
>lines contain WARN_ON_ONCE.
>
>I am not comfortable with the current repetition.

Ok, but could you please go into _why_ others should feel that way too? There
are equally also reasons to err on the side of leaving code as-is -- since we
know it already works, and this code generally has pretty high inertia -- and
avoid mutation of code without concrete description of the benefits.

2020-12-19 03:22:32

by Jacob Wen

[permalink] [raw]
Subject: Re: [PATCH] mm/vmscan: DRY cleanup for do_try_to_free_pages()


On 12/19/20 9:21 AM, Chris Down wrote:
> Jacob Wen writes:
>> set_task_reclaim_state() is a function with 3 lines of code of which
>> 2 lines contain WARN_ON_ONCE.
>>
>> I am not comfortable with the current repetition.
>
> Ok, but could you please go into _why_ others should feel that way
> too? There are equally also reasons to err on the side of leaving code
> as-is -- since we know it already works, and this code generally has
> pretty high inertia -- and avoid mutation of code without concrete
> description of the benefits.

I don't get your point. The patch doesn't change code of
set_task_reclaim_state(), so I am fine with the repeated WARN_ON_ONCE.

I mean I prefer removing duplicate code to avoid going down the rabbit
hole of set_task_reclaim_state().

It's a fundamental principle to me to move the code into its own
function. I'd like to hear the others' opinions.